org.apache.commons.lang3.mutable.MutableDouble Java Examples
The following examples show how to use
org.apache.commons.lang3.mutable.MutableDouble.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DoubleBufferedSumAggregator.java From spliceengine with GNU Affero General Public License v3.0 | 6 votes |
@Override public void merge(ExecAggregator addend) throws StandardException { if(addend==null) return; //treat null entries as zero //In Splice, we should never see a different type of an ExecAggregator DoubleBufferedSumAggregator other = (DoubleBufferedSumAggregator)addend; MutableDouble item; if (!other.sumTree.isEmpty()) isNull = false; for (Map.Entry<Integer, MutableDouble> s : other.sumTree.entrySet()) { if ((item = sumTree.get(s.getKey())) != null) item.setValue(s.getValue().doubleValue() + item.doubleValue()); else sumTree.put(s.getKey(), new MutableDouble(s.getValue())); } for (int i = 0; i< other.position;i++) { buffer[position] = other.buffer[i]; incrementPosition(); } }
Example #2
Source File: SoftmaxMbrDepParseTest.java From pacaya with Apache License 2.0 | 6 votes |
private void helpGradByFiniteDiffs(Algebra tmpS) { Tensor t1 = new Tensor(s, 4,4); Identity<Tensor> id1 = new Identity<Tensor>(t1); Identity<Tensor> temp = new Identity<Tensor>(Tensor.getScalarTensor(s, 2)); SoftmaxMbrDepParse ea = new SoftmaxMbrDepParse(id1, temp, tmpS); int numParams = ModuleFn.getOutputSize(ea.getInputs()); IntDoubleDenseVector x = ModuleTestUtils.getAbsZeroOneGaussian(numParams); final MutableDouble sum = new MutableDouble(0); x.iterate(new FnIntDoubleToVoid() { public void call(int idx, double val) { sum.add(val); } }); x.scale(-1.0/sum.doubleValue()); ModuleTestUtils.assertGradientCorrectByFd(ea, x, 1e-8, 1e-5); }
Example #3
Source File: SumTest.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Test public void SumTest() { SumInt si = new SumInt(); SumLong sl = new SumLong(); SumFloat sf = new SumFloat(); SumDouble sd = new SumDouble(); Assert.assertEquals(new MutableInt(10), si.accumulate(si.defaultAccumulatedValue(), 10)); Assert.assertEquals(new MutableInt(11), si.accumulate(new MutableInt(1), 10)); Assert.assertEquals(new MutableInt(22), si.merge(new MutableInt(1), new MutableInt(21))); Assert.assertEquals(new MutableLong(10L), sl.accumulate(sl.defaultAccumulatedValue(), 10L)); Assert.assertEquals(new MutableLong(22L), sl.accumulate(new MutableLong(2L), 20L)); Assert.assertEquals(new MutableLong(41L), sl.merge(new MutableLong(32L), new MutableLong(9L))); Assert.assertEquals(new MutableFloat(9.0F), sf.accumulate(sf.defaultAccumulatedValue(), 9.0F)); Assert.assertEquals(new MutableFloat(22.5F), sf.accumulate(new MutableFloat(2.5F), 20F)); Assert.assertEquals(new MutableFloat(41.0F), sf.merge(new MutableFloat(33.1F), new MutableFloat(7.9F))); Assert.assertEquals(new MutableDouble(9.0), sd.accumulate(sd.defaultAccumulatedValue(), 9.0)); Assert.assertEquals(new MutableDouble(22.5), sd.accumulate(new MutableDouble(2.5), 20.0)); Assert.assertEquals(new MutableDouble(41.0), sd.merge(new MutableDouble(33.1), new MutableDouble(7.9))); }
Example #4
Source File: GermlineFilter.java From gatk with BSD 3-Clause "New" or "Revised" License | 6 votes |
private double computeMinorAlleleFraction(final VariantContext vc, final Mutect2FilteringEngine filteringEngine, final int[] alleleCounts) { final MutableDouble weightedSumOfMafs = new MutableDouble(0); vc.getGenotypes().stream().filter(filteringEngine::isTumor).forEach(tumorGenotype -> { final String sample = tumorGenotype.getSampleName(); final List<MinorAlleleFractionRecord> segments = tumorSegments.containsKey(sample) ? tumorSegments.get(sample).getOverlaps(vc).stream().collect(Collectors.toList()) : Collections.emptyList(); // minor allele fraction -- we abbreviate the name to make the formulas below less cumbersome final double maf = segments.isEmpty() ? 0.5 : segments.get(0).getMinorAlleleFraction(); weightedSumOfMafs.add(maf * MathUtils.sum(tumorGenotype.getAD())); }); // weighted average of sample minor allele fractions. This is the expected allele fraction of a germline het in the aggregated read counts return weightedSumOfMafs.getValue() / MathUtils.sum(alleleCounts); }
Example #5
Source File: Link.java From ij-ridgedetection with GNU General Public License v2.0 | 5 votes |
/** * Interpolate gradient. * * @param gradx * the gradx * @param grady * the grady * @param px * the px * @param py * the py * @param width * the width * @param gx * the gx * @param gy * the gy */ /* * Interpolate the gradient of the gradient images gradx and grady with width * width at the point (px,py) using linear interpolation, and return the result * in (gx,gy). */ private void interpolate_gradient(float[] gradx, float[] grady, double px, double py, int width, MutableDouble gx, MutableDouble gy) { int gix, giy, gpos; double gfx, gfy, gx1, gy1, gx2, gy2, gx3, gy3, gx4, gy4; gix = (int) Math.floor(px); giy = (int) Math.floor(py); gfx = px % 1.0; ; gfy = py % 1.0; gpos = LinesUtil.LINCOOR(gix, giy, width); gx1 = gradx[gpos]; gy1 = grady[gpos]; gpos = LinesUtil.LINCOOR(gix + 1, giy, width); gx2 = gradx[gpos]; gy2 = grady[gpos]; gpos = LinesUtil.LINCOOR(gix, giy + 1, width); gx3 = gradx[gpos]; gy3 = grady[gpos]; gpos = LinesUtil.LINCOOR(gix + 1, giy + 1, width); gx4 = gradx[gpos]; gy4 = grady[gpos]; gx.setValue((1 - gfy) * ((1 - gfx) * gx1 + gfx * gx2) + gfy * ((1 - gfx) * gx3 + gfx * gx4)); gy.setValue((1 - gfy) * ((1 - gfx) * gy1 + gfx * gy2) + gfy * ((1 - gfx) * gy3 + gfx * gy4)); }
Example #6
Source File: DoubleBufferedSumAggregator.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
private void sum(int bufferLength) throws StandardException { for (int i=0;i<bufferLength;i++) { // Want to combine the big numbers first, so make // the key the negative of the exponent, as the // iterator will traverse the nodes in ascending // order of the keys. int ix = -java.lang.Math.getExponent(buffer[i]); MutableDouble entry = sumTree.get(ix); if (entry == null) sumTree.put(ix, new MutableDouble(buffer[i])); else entry.setValue(entry.doubleValue() + buffer[i]); } position = 0; }
Example #7
Source File: DoubleBufferedSumAggregator.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
private double collectFinalSum() throws StandardException { double tempSum = 0.0d; for (Map.Entry<Integer, MutableDouble> s : sumTree.entrySet()) { tempSum += s.getValue().doubleValue(); } tempSum = NumberDataType.normalizeDOUBLE(tempSum); return tempSum; }
Example #8
Source File: DoubleBufferedSumAggregator.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
public DoubleBufferedSumAggregator(int bufferSize, TreeMap<Integer, MutableDouble> tree) { if (tree == null) sumTree = new TreeMap<>(); else sumTree = tree; int s = 1; while(s<bufferSize){ s<<=1; } buffer = new double[s]; this.length = s-1; position = 0; }
Example #9
Source File: PortfolioValueTest.java From Chronicle-Map with Apache License 2.0 | 5 votes |
protected static double computeTotalUsingIterator(final ChronicleMap<LongValue, PortfolioAssetInterface> cache, int start, int end) { if (end > start) { final PortfolioAssetInterface asset = Values.newHeapInstance(PortfolioAssetInterface.class); PortfolioValueAccumulator accumulator = new PortfolioValueAccumulator(new MutableDouble(), asset); for (int s = start; s < end; s++) { try (MapSegmentContext<LongValue, PortfolioAssetInterface, ?> context = cache.segmentContext(s)) { context.forEachSegmentEntry(accumulator); } } return accumulator.total.doubleValue(); } return 0; }
Example #10
Source File: ThresholdCalculator.java From gatk with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * Compute the filtering threshold that maximizes the F_beta score * * @param posteriors A list of posterior probabilities, which gets sorted * @param beta relative weight of recall to precision */ @VisibleForTesting static double calculateThresholdBasedOnOptimalFScore(final List<Double> posteriors, final double beta){ ParamUtils.isPositiveOrZero(beta, "requested F-score beta must be non-negative"); Collections.sort(posteriors); final double expectedTruePositives = posteriors.stream() .mapToDouble(prob -> 1 - prob).sum(); // starting from filtering everything (threshold = 0) increase the threshold to maximize the F score final MutableDouble truePositives = new MutableDouble(0); final MutableDouble falsePositives = new MutableDouble(0); final MutableDouble falseNegatives = new MutableDouble(expectedTruePositives); int optimalIndexInclusive = -1; // include all indices up to and including this. -1 mean filter all. double optimalFScore = 0; // if you exclude everything, recall is zero final int N = posteriors.size(); for (int n = 0; n < N; n++){ truePositives.add(1 - posteriors.get(n)); falsePositives.add(posteriors.get(n)); falseNegatives.subtract(1 - posteriors.get(n)); final double F = (1+beta*beta)*truePositives.getValue() / ((1+beta*beta)*truePositives.getValue() + beta*beta*falseNegatives.getValue() + falsePositives.getValue()); if (F >= optimalFScore) { optimalIndexInclusive = n; optimalFScore = F; } } return optimalIndexInclusive == -1 ? 0 : (optimalIndexInclusive == N - 1 ? 1 : posteriors.get(optimalIndexInclusive)); }
Example #11
Source File: Mutect2FilteringEngine.java From gatk with BSD 3-Clause "New" or "Revised" License | 5 votes |
public double[] weightedAverageOfTumorAFs(final VariantContext vc) { final MutableDouble totalWeight = new MutableDouble(0); final double[] AFs = new double[vc.getNAlleles() - 1]; vc.getGenotypes().stream().filter(this::isTumor).forEach(g -> { final double weight = MathUtils.sum(g.getAD()); totalWeight.add(weight); final double[] sampleAFs = VariantContextGetters.getAttributeAsDoubleArray(g, VCFConstants.ALLELE_FREQUENCY_KEY, () -> new double[] {0.0}, 0.0); MathArrays.scaleInPlace(weight, sampleAFs); MathUtils.addToArrayInPlace(AFs, sampleAFs); }); MathArrays.scaleInPlace(1/totalWeight.getValue(), AFs); return AFs; }
Example #12
Source File: RectangleAndArcCollider.java From notreal2d with MIT License | 5 votes |
private static void updateFarthestPoint( @Nonnull Body body, @Nonnull Point2D point, @Nonnull Mutable<Point2D> farthestPoint, @Nonnull MutableDouble distanceToFarthestPoint, double startAngle, double finishAngle) { double distanceToPoint = body.getDistanceTo(point); if (GeometryUtil.isAngleBetween(new Vector2D(body.getPosition(), point).getAngle(), startAngle, finishAngle) && (farthestPoint.get() == null || distanceToPoint > distanceToFarthestPoint.doubleValue())) { farthestPoint.set(point); distanceToFarthestPoint.setValue(distanceToPoint); } }
Example #13
Source File: RectangleAndArcCollider.java From notreal2d with MIT License | 5 votes |
private void updateNearestPoint( @Nonnull Body body, @Nonnull Point2D point, @Nonnull Mutable<Point2D> nearestPoint, @Nonnull MutableDouble distanceToNearestPoint) { double distanceToPoint = body.getDistanceTo(point); if (distanceToPoint >= epsilon && (nearestPoint.get() == null || distanceToPoint < distanceToNearestPoint.doubleValue())) { nearestPoint.set(point); distanceToNearestPoint.setValue(distanceToPoint); } }
Example #14
Source File: Position.java From ij-ridgedetection with GNU General Public License v2.0 | 4 votes |
/** * Compute line points. * * @param ku * the ku * @param ismax * the ismax * @param ev * the ev * @param nx * the nx * @param ny * the ny * @param px * the px * @param py * the py * @param width * the width * @param height * the height * @param low * the low * @param high * the high * @param mode * the mode */ /* * For each point in the image determine whether there is a local maximum of the * second directional derivative in the direction (nx[l],ny[l]) within the * pixels's boundaries. If so, set ismax[l] to 2 if the eigenvalue ev[l] is * larger than high, to 1 if ev[l] is larger than low, and to 0 otherwise. * Furthermore, put the sub-pixel position of the maximum into (px[l],py[l]). * The parameter mode determines whether maxima (dark lines points) or minima * (bright line points) should be selected. The partial derivatives of the image * are input as ku[]. */ private void compute_line_points(float[][] ku, byte[] ismax, float[] ev, float[] nx, float[] ny, float[] px, float[] py, int width, int height, double low, double high, int mode) { int r, c, l; double[] k = new double[5]; double[] eigval = new double[2]; double[][] eigvec = new double[2][2]; double a, b; MutableDouble t = new MutableDouble(); MutableInt num = new MutableInt(); double n1, n2; double p1, p2; double val; for (r = 0; r < height; r++) { for (c = 0; c < width; c++) { l = LinesUtil.LINCOOR(r, c, width); k[0] = ku[0][l]; k[1] = ku[1][l]; k[2] = ku[2][l]; k[3] = ku[3][l]; k[4] = ku[4][l]; ev[l] = (float) 0.0; nx[l] = (float) 0.0; ny[l] = (float) 0.0; compute_eigenvals(k[2], k[3], k[4], eigval, eigvec); if (mode == LinesUtil.MODE_LIGHT) val = -eigval[0]; else val = eigval[0]; if (val > 0.0) { ev[l] = (float) val; n1 = eigvec[0][0]; n2 = eigvec[0][1]; a = k[2] * n1 * n1 + 2.0 * k[3] * n1 * n2 + k[4] * n2 * n2; b = k[0] * n1 + k[1] * n2; solve_linear(a, b, t, num); if (num.intValue() != 0) { p1 = t.doubleValue() * n1; p2 = t.doubleValue() * n2; if (Math.abs(p1) <= PIXEL_BOUNDARY && Math.abs(p2) <= PIXEL_BOUNDARY) { if (val >= low) { if (val >= high) ismax[l] = 2; else ismax[l] = 1; } nx[l] = (float) n1; ny[l] = (float) n2; px[l] = (float) (r + p1); py[l] = (float) (c + p2); } } } } } }
Example #15
Source File: Link.java From ij-ridgedetection with GNU General Public License v2.0 | 4 votes |
/** * Closest point. * * @param lx * the lx * @param ly * the ly * @param dx * the dx * @param dy * the dy * @param px * the px * @param py * the py * @param cx * the cx * @param cy * the cy * @param t * the t */ /* * Calculate the closest point to (px,py) on the line (lx,ly) + t*(dx,dy) and * return the result in (cx,cy), plus the parameter in t. */ private void closest_point(double lx, double ly, double dx, double dy, double px, double py, MutableDouble cx, MutableDouble cy, MutableDouble t) { double mx, my, den, nom, tt; mx = px - lx; my = py - ly; den = dx * dx + dy * dy; nom = mx * dx + my * dy; if (den != 0) tt = nom / den; else tt = 0; cx.setValue(lx + tt * dx); cy.setValue(ly + tt * dy); t.setValue(tt); }
Example #16
Source File: OverloadShedder.java From pulsar with Apache License 2.0 | 4 votes |
/** * Attempt to shed some bundles off every broker which is overloaded. * * @param loadData * The load data to used to make the unloading decision. * @param conf * The service configuration. * @return A map from bundles to unload to the brokers on which they are loaded. */ @Override public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) { selectedBundlesCache.clear(); final double overloadThreshold = conf.getLoadBalancerBrokerOverloadedThresholdPercentage() / 100.0; final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); // Check every broker and select loadData.getBrokerData().forEach((broker, brokerData) -> { final LocalBrokerData localData = brokerData.getLocalData(); final double currentUsage = localData.getMaxResourceUsage(); if (currentUsage < overloadThreshold) { if (log.isDebugEnabled()) { log.debug("[{}] Broker is not overloaded, ignoring at this point ({})", broker, localData.printResourceUsage()); } return; } // We want to offload enough traffic such that this broker will go below the overload threshold // Also, add a small margin so that this broker won't be very close to the threshold edge. double percentOfTrafficToOffload = currentUsage - overloadThreshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN; double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut(); double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload; log.info( "Attempting to shed load on {}, which has resource usage {}% above threshold {}% -- Offloading at least {} MByte/s of traffic ({})", broker, 100 * currentUsage, 100 * overloadThreshold, minimumThroughputToOffload / 1024 / 1024, localData.printResourceUsage()); MutableDouble trafficMarkedToOffload = new MutableDouble(0); MutableBoolean atLeastOneBundleSelected = new MutableBoolean(false); if (localData.getBundles().size() > 1) { // Sort bundles by throughput, then pick the biggest N which combined make up for at least the minimum throughput to offload loadData.getBundleData().entrySet().stream() .filter(e -> localData.getBundles().contains(e.getKey())) .map((e) -> { // Map to throughput value // Consider short-term byte rate to address system resource burden String bundle = e.getKey(); BundleData bundleData = e.getValue(); TimeAverageMessageData shortTermData = bundleData.getShortTermData(); double throughput = shortTermData.getMsgThroughputIn() + shortTermData.getMsgThroughputOut(); return Pair.of(bundle, throughput); }).filter(e -> { // Only consider bundles that were not already unloaded recently return !recentlyUnloadedBundles.containsKey(e.getLeft()); }).filter(e -> localData.getBundles().contains(e.getLeft()) ).sorted((e1, e2) -> { // Sort by throughput in reverse order return Double.compare(e2.getRight(), e1.getRight()); }).forEach(e -> { if (trafficMarkedToOffload.doubleValue() < minimumThroughputToOffload || atLeastOneBundleSelected.isFalse()) { selectedBundlesCache.put(broker, e.getLeft()); trafficMarkedToOffload.add(e.getRight()); atLeastOneBundleSelected.setTrue(); } }); } else if (localData.getBundles().size() == 1) { log.warn( "HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. " + "No Load Shedding will be done on this broker", localData.getBundles().iterator().next(), broker); } else { log.warn("Broker {} is overloaded despite having no bundles", broker); } }); return selectedBundlesCache; }
Example #17
Source File: SomaticClusteringModel.java From gatk with BSD 3-Clause "New" or "Revised" License | 4 votes |
private void performEMIteration(final boolean updateSomaticPriors) { final Map<Integer, MutableDouble> variantCountsByIndelLength = IntStream.range(-MAX_INDEL_SIZE_IN_PRIOR_MAP, MAX_INDEL_SIZE_IN_PRIOR_MAP + 1).boxed() .collect(Collectors.toMap(l -> l, l -> new MutableDouble(0))); final List<double[]> responsibilities = new ArrayList<>(data.size()); final double[] totalClusterResponsibilities = new double[clusters.size()]; for (final Datum datum : data) { final double somaticProb = probabilityOfSomaticVariant(datum); final int indelLength = datum.getIndelLength(); variantCountsByIndelLength.putIfAbsent(indelLength, new MutableDouble(0)); variantCountsByIndelLength.get(indelLength).add(somaticProb); final double[] clusterLogLikelihoods = new IndexRange(0, clusters.size()) .mapToDouble(c -> logClusterWeights[c] + clusters.get(c).logLikelihood(datum.getTotalCount(), datum.getAltCount())); final double[] clusterResponsibilitiesIfSomatic = NaturalLogUtils.normalizeFromLogToLinearSpace(clusterLogLikelihoods); final double[] clusterResponsibilities = MathArrays.scale(somaticProb, clusterResponsibilitiesIfSomatic); MathUtils.addToArrayInPlace(totalClusterResponsibilities, clusterResponsibilities); responsibilities.add(clusterResponsibilities); } MathUtils.applyToArrayInPlace(totalClusterResponsibilities, x -> x + REGULARIZING_PSEUDOCOUNT); logClusterWeights = MathUtils.applyToArrayInPlace(MathUtils.normalizeSumToOne(totalClusterResponsibilities), Math::log); final double technicalArtifactCount = obviousArtifactCount.getValue() + data.stream().mapToDouble(Datum::getArtifactProb).sum(); final double variantCount = variantCountsByIndelLength.values().stream().mapToDouble(MutableDouble::doubleValue).sum(); if (updateSomaticPriors) { logVariantVsArtifactPrior = Math.log((variantCount + REGULARIZING_PSEUDOCOUNT) / (variantCount + technicalArtifactCount + REGULARIZING_PSEUDOCOUNT * 2)); if (callableSites.isPresent()) { IntStream.range(-MAX_INDEL_SIZE_IN_PRIOR_MAP, MAX_INDEL_SIZE_IN_PRIOR_MAP + 1).forEach(n -> { final double empiricalRatio = variantCountsByIndelLength.getOrDefault(n, new MutableDouble(0)).doubleValue() / callableSites.getAsDouble(); logVariantPriors.put(n, Math.log(Math.max(empiricalRatio, n == 0 ? 1.0e-8 : 1.0e-9))); }); } } new IndexRange(0, clusters.size()).forEach(n -> { final double[] responsibilitiesForThisCluster = responsibilities.stream().mapToDouble(array -> array[n]).toArray(); clusters.get(n).learn(data, responsibilitiesForThisCluster); }); }
Example #18
Source File: FilteringOutputStats.java From gatk with BSD 3-Clause "New" or "Revised" License | 4 votes |
private Map<Mutect2Filter, MutableDouble> makeEmptyFilterCounts() { return filters.stream().collect(Collectors.toMap(f -> f, f -> new MutableDouble(0))); }
Example #19
Source File: Correct.java From ij-ridgedetection with GNU General Public License v2.0 | 4 votes |
/** * Line corrections. * * @param sigma * the sigma * @param w_est * the w est * @param r_est * the r est * @param w * the w * @param h * the h * @param correct * the correct * @param w_strong * the w strong * @param w_weak * the w weak * @return true, if successful */ /* * Return the correct line width w and asymmetry h, and a line position * correction correct for a line with extracted width w_est and extracted * gradient ratio r_est for a given sigma. Furthermore, return the line width on * the weak and strong side of the line. These values are obtained by bilinear * interpolation from the table ctable. */ static boolean line_corrections(double sigma, double w_est, double r_est, MutableDouble w, MutableDouble h, MutableDouble correct, MutableDouble w_strong, MutableDouble w_weak) { int i_we, i_re; boolean is_valid; double a, b; w_est = w_est / sigma; if (w_est < 2 || w_est > 6 || r_est < 0 || r_est > 1) { w.setValue(0); h.setValue(0); correct.setValue(0); w_strong.setValue(0); w_weak.setValue(0); return true; } i_we = (int) Math.floor((w_est - 2) * 10); i_re = (int) Math.floor(r_est * 20); if (i_we == 40) i_we = 39; if (i_re == 20) i_re = 19; is_valid = getCTable(i_re, i_we).is_valid && getCTable(i_re, (i_we + 1)).is_valid && getCTable((i_re + 1), i_we).is_valid && getCTable((i_re + 1), (i_we + 1)).is_valid; a = (w_est - 2) * 10 - i_we; b = r_est * 20 - i_re; w.setValue(BILINEAR(a, b, i_re, i_we, 0) * sigma); h.setValue(BILINEAR(a, b, i_re, i_we, 1)); correct.setValue(BILINEAR(a, b, i_re, i_we, 2) * sigma); w_strong.setValue(BILINEAR(a, b, i_re, i_we, 3) * sigma); w_weak.setValue(BILINEAR(a, b, i_re, i_we, 4) * sigma); return !is_valid; }
Example #20
Source File: SumDouble.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Override public Double getOutput(MutableDouble accumulatedValue) { return accumulatedValue.doubleValue(); }
Example #21
Source File: PortfolioValueAccumulator.java From Chronicle-Map with Apache License 2.0 | 4 votes |
public PortfolioValueAccumulator(MutableDouble total, PortfolioAssetInterface asset) { this.total = total; this.asset = asset; }
Example #22
Source File: SumDouble.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Override public MutableDouble merge(MutableDouble accumulatedValue1, MutableDouble accumulatedValue2) { accumulatedValue1.add(accumulatedValue2); return accumulatedValue1; }
Example #23
Source File: SumDouble.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Override public MutableDouble accumulate(MutableDouble accumulatedValue, Double input) { accumulatedValue.add(input); return accumulatedValue; }
Example #24
Source File: SumDouble.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Override public MutableDouble defaultAccumulatedValue() { return new MutableDouble(0.0); }
Example #25
Source File: DoubleBufferedSumAggregator.java From spliceengine with GNU Affero General Public License v3.0 | 4 votes |
@Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { this.eliminatedNulls = in.readBoolean(); this.isNull = in.readBoolean(); this.sumTree = (TreeMap<Integer, MutableDouble>) in.readObject(); }
Example #26
Source File: ThresholdShedder.java From pulsar with Apache License 2.0 | 4 votes |
@Override public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) { selectedBundlesCache.clear(); final double threshold = conf.getLoadBalancerBrokerThresholdShedderPercentage() / 100.0; final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); final double minThroughputThreshold = conf.getLoadBalancerBundleUnloadMinThroughputThreshold() * MB; final double avgUsage = getBrokerAvgUsage(loadData, conf.getLoadBalancerHistoryResourcePercentage(), conf); if (avgUsage == 0) { log.warn("average max resource usage is 0"); return selectedBundlesCache; } loadData.getBrokerData().forEach((broker, brokerData) -> { final LocalBrokerData localData = brokerData.getLocalData(); final double currentUsage = brokerAvgResourceUsage.getOrDefault(broker, 0.0); if (currentUsage < avgUsage + threshold) { if (log.isDebugEnabled()) { log.debug("[{}] broker is not overloaded, ignoring at this point", broker); } return; } double percentOfTrafficToOffload = currentUsage - avgUsage - threshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN; double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut(); double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload; if (minimumThroughputToOffload < minThroughputThreshold) { if (log.isDebugEnabled()) { log.info("[{}] broker is planning to shed throughput {} MByte/s less than " + "minimumThroughputThreshold {} MByte/s, skipping bundle unload.", broker, minimumThroughputToOffload / MB, minThroughputThreshold / MB); } return; } log.info( "Attempting to shed load on {}, which has max resource usage above avgUsage and threshold {}%" + " > {}% + {}% -- Offloading at least {} MByte/s of traffic, left throughput {} MByte/s", broker, currentUsage, avgUsage, threshold, minimumThroughputToOffload / MB, (brokerCurrentThroughput - minimumThroughputToOffload) / MB); MutableDouble trafficMarkedToOffload = new MutableDouble(0); MutableBoolean atLeastOneBundleSelected = new MutableBoolean(false); if (localData.getBundles().size() > 1) { loadData.getBundleData().entrySet().stream().map((e) -> { String bundle = e.getKey(); BundleData bundleData = e.getValue(); TimeAverageMessageData shortTermData = bundleData.getShortTermData(); double throughput = shortTermData.getMsgThroughputIn() + shortTermData.getMsgThroughputOut(); return Pair.of(bundle, throughput); }).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft()) ).filter(e -> localData.getBundles().contains(e.getLeft()) ).sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight()) ).forEach(e -> { if (trafficMarkedToOffload.doubleValue() < minimumThroughputToOffload || atLeastOneBundleSelected.isFalse()) { selectedBundlesCache.put(broker, e.getLeft()); trafficMarkedToOffload.add(e.getRight()); atLeastOneBundleSelected.setTrue(); } }); } else if (localData.getBundles().size() == 1) { log.warn( "HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. " + "No Load Shadding will be done on this broker", localData.getBundles().iterator().next(), broker); } else { log.warn("Broker {} is overloaded despit having no bundles", broker); } }); return selectedBundlesCache; }
Example #27
Source File: Position.java From ij-ridgedetection with GNU General Public License v2.0 | 3 votes |
/** * Solve the linear equation a*x+b=0 and return the result in t and the number * of solutions in num. * * @param a * the a * @param b * the b * @param t * the t * @param num * the num */ public void solve_linear(double a, double b, MutableDouble t, MutableInt num) { if (a == 0.0) { // num.setValue(0); return; } else { num.setValue(1); t.setValue(-b / a); return; } }