Java Code Examples for org.apache.spark.broadcast.Broadcast#destroy()
The following examples show how to use
org.apache.spark.broadcast.Broadcast#destroy() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BroadcastVariable.java From Apache-Spark-2x-for-Java-Developers with MIT License | 6 votes |
public static void main(String[] args) { // SparkConf conf = new SparkConf().setMaster("local").setAppName("BroadCasting"); // JavaSparkContext jsc = new JavaSparkContext(conf); // // Broadcast<String> broadcastVar = jsc.broadcast("Hello Spark"); // SparkSession sparkSession = SparkSession.builder().master("local").appName("My App") .config("spark.sql.warehouse.dir", "file:////C:/Users/sgulati/spark-warehouse").getOrCreate(); Broadcast<String> broadcastVar= sparkSession.sparkContext().broadcast("Hello Spark", scala.reflect.ClassTag$.MODULE$.apply(String.class)); System.out.println(broadcastVar.getValue()); broadcastVar.unpersist(); // broadcastVar.unpersist(true); broadcastVar.destroy(); }
Example 2
Source File: SparkExecutionContext.java From systemds with Apache License 2.0 | 5 votes |
/** * This call destroys a broadcast variable at all executors and the driver. * Hence, it is intended to be used on rmvar only. Depending on the * ASYNCHRONOUS_VAR_DESTROY configuration, this is asynchronous or not. * * @param bvar broadcast variable */ public static void cleanupBroadcastVariable(Broadcast<?> bvar) { //In comparison to 'unpersist' (which would only delete the broadcast //from the executors), this call also deletes related data from the driver. if( bvar.isValid() ) { bvar.destroy( !ASYNCHRONOUS_VAR_DESTROY ); } }
Example 3
Source File: SparkExecutionContext.java From systemds with Apache License 2.0 | 5 votes |
/** * This call destroys a broadcast variable at all executors and the driver. * Hence, it is intended to be used on rmvar only. Depending on the * ASYNCHRONOUS_VAR_DESTROY configuration, this is asynchronous or not. * * @param bvar broadcast variable */ public static void cleanupBroadcastVariable(Broadcast<?> bvar) { //In comparison to 'unpersist' (which would only delete the broadcast //from the executors), this call also deletes related data from the driver. if( bvar.isValid() ) { bvar.destroy( !ASYNCHRONOUS_VAR_DESTROY ); } }
Example 4
Source File: SparkUtils.java From gatk with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** Sometimes Spark has trouble destroying a broadcast variable, but we'd like the app to continue anyway. */ public static <T> void destroyBroadcast(final Broadcast<T> broadcast, final String whatBroadcast ) { try { broadcast.destroy(); } catch ( final Exception e ) { logger.warn("Failed to destroy broadcast for " + whatBroadcast, e); } }
Example 5
Source File: CoverageModelWLinearOperatorSpark.java From gatk-protected with BSD 3-Clause "New" or "Revised" License | 4 votes |
@Override public INDArray operate(@Nonnull final INDArray W_tl) throws DimensionMismatchException { if (W_tl.rank() != 2 || W_tl.shape()[0] != numTargets || W_tl.shape()[1] != numLatents) throw new DimensionMismatchException(W_tl.length(), numTargets * numLatents); /* Z F W */ final long startTimeZFW = System.nanoTime(); final INDArray Z_F_W_tl = Nd4j.create(numTargets, numLatents); IntStream.range(0, numLatents).parallel().forEach(li -> Z_F_W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign( F_tt.operate(W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li))))); Z_F_W_tl.assign(Nd4j.gemm(Z_F_W_tl, Z_ll, false, false)); final long endTimeZFW = System.nanoTime(); /* perform a broadcast hash join */ final long startTimeQW = System.nanoTime(); final Map<LinearlySpacedIndexBlock, INDArray> W_tl_map = CoverageModelSparkUtils.partitionINDArrayToMap(targetSpaceBlocks, W_tl); final Broadcast<Map<LinearlySpacedIndexBlock, INDArray>> W_tl_bc = ctx.broadcast(W_tl_map); final INDArray Q_W_tl = CoverageModelSparkUtils.assembleINDArrayBlocksFromRDD( computeRDD.mapValues(cb -> { final INDArray W_tl_chunk = W_tl_bc.value().get(cb.getTargetSpaceBlock()); final INDArray Q_tll_chunk = cb.getINDArrayFromCache(CoverageModelEMComputeBlock.CoverageModelICGCacheNode.Q_tll); final Collection<INDArray> W_Q_chunk = IntStream.range(0, cb.getTargetSpaceBlock().getNumElements()).parallel() .mapToObj(ti -> Q_tll_chunk.get(NDArrayIndex.point(ti)) .mmul(W_tl_chunk.get(NDArrayIndex.point(ti)).transpose())) .collect(Collectors.toList()); return Nd4j.vstack(W_Q_chunk); }), 0); W_tl_bc.destroy(); // final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> W_tl_RDD = CoverageModelSparkUtils.rddFromINDArray(W_tl, // targetSpaceBlocks, ctx, true); // final INDArray Q_W_tl = CoverageModelSparkUtils.assembleINDArrayBlocks( // computeRDD.join(W_tl_RDD).mapValues(p -> { // final CoverageModelEMComputeBlock cb = p._1; // final INDArray W_tl_chunk = p._2; // final INDArray Q_tll_chunk = cb.getINDArrayFromCache("Q_tll"); // return Nd4j.vstack(IntStream.range(0, cb.getTargetSpaceBlock().getNumElements()).parallel() // .mapToObj(ti -> Q_tll_chunk.get(NDArrayIndex.point(ti)).mmul(W_tl_chunk.get(NDArrayIndex.point(ti)).transpose())) // .collect(Collectors.toList())); // }), false); // W_tl_RDD.unpersist(); final long endTimeQW = System.nanoTime(); logger.debug("Local [Z] [F] [W] timing: " + (endTimeZFW - startTimeZFW)/1000000 + " ms"); logger.debug("Spark [Q] [W] timing: " + (endTimeQW - startTimeQW)/1000000 + " ms"); return Q_W_tl.addi(Z_F_W_tl); }
Example 6
Source File: CoverageModelWPreconditionerSpark.java From gatk-protected with BSD 3-Clause "New" or "Revised" License | 4 votes |
@Override public INDArray operate(@Nonnull final INDArray W_tl) throws DimensionMismatchException { if (W_tl.rank() != 2 || W_tl.shape()[0] != numTargets || W_tl.shape()[1] != numLatents) { throw new DimensionMismatchException(W_tl.length(), numTargets * numLatents); } long startTimeRFFT = System.nanoTime(); /* forward rfft */ final INDArray W_kl = Nd4j.create(fftSize, numLatents); IntStream.range(0, numLatents).parallel().forEach(li -> W_kl.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign( Nd4j.create(F_tt.getForwardFFT(W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li))), new int[]{fftSize, 1}))); long endTimeRFFT = System.nanoTime(); /* apply the preconditioner in the Fourier space */ long startTimePrecond = System.nanoTime(); final Map<LinearlySpacedIndexBlock, INDArray> W_kl_map = CoverageModelSparkUtils.partitionINDArrayToMap(fourierSpaceBlocks, W_kl); final Broadcast<Map<LinearlySpacedIndexBlock, INDArray>> W_kl_bc = ctx.broadcast(W_kl_map); final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> preconditionedWRDD = linOpPairRDD .mapToPair(p -> { final INDArray W_kl_chuck = W_kl_bc.value().get(p._1); final INDArray linOp_chunk = p._2; final int blockSize = linOp_chunk.shape()[0]; final List<INDArray> linOpWList = IntStream.range(0, blockSize).parallel() .mapToObj(k -> CoverageModelEMWorkspaceMathUtils.linsolve(linOp_chunk.get(NDArrayIndex.point(k)), W_kl_chuck.get(NDArrayIndex.point(k)))) .collect(Collectors.toList()); return new Tuple2<>(p._1, Nd4j.vstack(linOpWList)); }); W_kl.assign(CoverageModelSparkUtils.assembleINDArrayBlocksFromRDD(preconditionedWRDD, 0)); W_kl_bc.destroy(); // final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> W_kl_RDD = CoverageModelSparkUtils.rddFromINDArray(W_kl, // fourierSpaceBlocks, ctx, true); // W_kl.assign(CoverageModelSparkUtils.assembleINDArrayBlocks(linOpPairRDD.join((W_kl_RDD)) // .mapValues(p -> { // final INDArray linOp = p._1; // final INDArray W = p._2; // final int blockSize = linOp.shape()[0]; // final List<INDArray> linOpWList = IntStream.range(0, blockSize).parallel().mapToObj(k -> // CoverageModelEMWorkspaceMathUtils.linsolve(linOp.get(NDArrayIndex.point(k)), // W.get(NDArrayIndex.point(k)))) // .collect(Collectors.toList()); // return Nd4j.vstack(linOpWList); // }), false)); // W_kl_RDD.unpersist(); long endTimePrecond = System.nanoTime(); /* irfft */ long startTimeIRFFT = System.nanoTime(); final INDArray res = Nd4j.create(numTargets, numLatents); IntStream.range(0, numLatents).parallel().forEach(li -> res.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign( F_tt.getInverseFFT(W_kl.get(NDArrayIndex.all(), NDArrayIndex.point(li))))); long endTimeIRFFT = System.nanoTime(); logger.debug("Local FFT timing: " + (endTimeRFFT - startTimeRFFT + endTimeIRFFT - startTimeIRFFT)/1000000 + " ms"); logger.debug("Spark preconditioner application timing: " + (endTimePrecond - startTimePrecond)/1000000 + " ms"); return res; }
Example 7
Source File: ValueSetUdfs.java From bunsen with Apache License 2.0 | 3 votes |
/** * Pops a BroadcastableValueSets from the user-defined function stack. * * @param spark the spark session * @return true if there is still a registered in_valuset UDF, false otherwise */ public static synchronized boolean popUdf(SparkSession spark) { if (valueSetStack.isEmpty()) { return false; } else { // Cleanup the previously broadcast valuesets Broadcast<BroadcastableValueSets> old = valueSetStack.pop(); old.destroy(); if (valueSetStack.isEmpty()) { return false; } else { // Re-apply the previous function. Broadcast<BroadcastableValueSets> current = valueSetStack.peek(); spark.udf() .register("in_valueset", new InValuesetUdf(current), DataTypes.BooleanType); return true; } } }