Java Code Examples for org.apache.spark.api.java.JavaPairRDD#persist()
The following examples show how to use
org.apache.spark.api.java.JavaPairRDD#persist() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HoodieBloomIndex.java From hudi with Apache License 2.0 | 5 votes |
@Override public JavaRDD<HoodieRecord<T>> tagLocation(JavaRDD<HoodieRecord<T>> recordRDD, JavaSparkContext jsc, HoodieTable<T> hoodieTable) { // Step 0: cache the input record RDD if (config.getBloomIndexUseCaching()) { recordRDD.persist(SparkConfigUtils.getBloomIndexInputStorageLevel(config.getProps())); } // Step 1: Extract out thinner JavaPairRDD of (partitionPath, recordKey) JavaPairRDD<String, String> partitionRecordKeyPairRDD = recordRDD.mapToPair(record -> new Tuple2<>(record.getPartitionPath(), record.getRecordKey())); // Lookup indexes for all the partition/recordkey pair JavaPairRDD<HoodieKey, HoodieRecordLocation> keyFilenamePairRDD = lookupIndex(partitionRecordKeyPairRDD, jsc, hoodieTable); // Cache the result, for subsequent stages. if (config.getBloomIndexUseCaching()) { keyFilenamePairRDD.persist(StorageLevel.MEMORY_AND_DISK_SER()); } if (LOG.isDebugEnabled()) { long totalTaggedRecords = keyFilenamePairRDD.count(); LOG.debug("Number of update records (ones tagged with a fileID): " + totalTaggedRecords); } // Step 4: Tag the incoming records, as inserts or updates, by joining with existing record keys // Cost: 4 sec. JavaRDD<HoodieRecord<T>> taggedRecordRDD = tagLocationBacktoRecords(keyFilenamePairRDD, recordRDD); if (config.getBloomIndexUseCaching()) { recordRDD.unpersist(); // unpersist the input Record RDD keyFilenamePairRDD.unpersist(); } return taggedRecordRDD; }
Example 2
Source File: PMapmmSPInstruction.java From systemds with Apache License 2.0 | 4 votes |
@Override public void processInstruction(ExecutionContext ec) { SparkExecutionContext sec = (SparkExecutionContext)ec; //get inputs JavaPairRDD<MatrixIndexes,MatrixBlock> in1 = sec.getBinaryMatrixBlockRDDHandleForVariable( input1.getName() ); JavaPairRDD<MatrixIndexes,MatrixBlock> in2 = sec.getBinaryMatrixBlockRDDHandleForVariable( input2.getName() ); DataCharacteristics mc1 = sec.getDataCharacteristics(input1.getName()); // This avoids errors such as java.lang.UnsupportedOperationException: Cannot change storage level of an RDD after it was already assigned a level // Ideally, we should ensure that we donot redundantly call persist on the same RDD. StorageLevel pmapmmStorageLevel = StorageLevel.MEMORY_AND_DISK(); //cache right hand side because accessed many times in2 = in2.repartition(sec.getSparkContext().defaultParallelism()) .persist(pmapmmStorageLevel); JavaPairRDD<MatrixIndexes,MatrixBlock> out = null; for( int i=0; i<mc1.getRows(); i+=NUM_ROWBLOCKS*mc1.getBlocksize() ) { //create broadcast for rdd partition JavaPairRDD<MatrixIndexes,MatrixBlock> rdd = in1 .filter(new IsBlockInRange(i+1, i+NUM_ROWBLOCKS*mc1.getBlocksize(), 1, mc1.getCols(), mc1)) .mapToPair(new PMapMMRebaseBlocksFunction(i/mc1.getBlocksize())); int rlen = (int)Math.min(mc1.getRows()-i, NUM_ROWBLOCKS*mc1.getBlocksize()); PartitionedBlock<MatrixBlock> pmb = SparkExecutionContext.toPartitionedMatrixBlock(rdd, rlen, (int)mc1.getCols(), mc1.getBlocksize(), -1L); Broadcast<PartitionedBlock<MatrixBlock>> bpmb = sec.getSparkContext().broadcast(pmb); //matrix multiplication JavaPairRDD<MatrixIndexes,MatrixBlock> rdd2 = in2 .flatMapToPair(new PMapMMFunction(bpmb, i/mc1.getBlocksize())); rdd2 = RDDAggregateUtils.sumByKeyStable(rdd2, false); rdd2.persist(pmapmmStorageLevel) .count(); bpmb.unpersist(false); if( out == null ) out = rdd2; else out = out.union(rdd2); } //cache final result out = out.persist(pmapmmStorageLevel); out.count(); //put output RDD handle into symbol table sec.setRDDHandleForVariable(output.getName(), out); sec.addLineageRDD(output.getName(), input1.getName()); sec.addLineageRDD(output.getName(), input2.getName()); //update output statistics if not inferred updateBinaryMMOutputDataCharacteristics(sec, true); }
Example 3
Source File: UserVisitAnalyze.java From UserActionAnalyzePlatform with Apache License 2.0 | 4 votes |
public static void main(String[] args) { args=new String[]{"1"}; /** * 构建spark上下文 */ SparkConf conf=new SparkConf().setAppName(Constants.APP_NAME_SESSION).setMaster("local[3]"); JavaSparkContext context=new JavaSparkContext(conf); SQLContext sc=getSQLContext(context.sc()); //生成模拟数据 mock(context,sc); //拿到相应的Dao组建 TaskDao dao= DaoFactory.getTaskDao(); //从外部传入的参数获取任务的id Long taskId=ParamUtils.getTaskIdFromArgs(args); //从数据库中查询出相应的task Task task=dao.findTaskById(taskId); JSONObject jsonObject=JSONObject.parseObject(task.getTaskParam()); //获取指定范围内的Sesssion JavaRDD<Row> sessionRangeDate=getActionRDD(sc,jsonObject); //这里增加一个新的方法,主要是映射 JavaPairRDD<String,Row> sessionInfoPairRDD=getSessonInfoPairRDD(sessionRangeDate); //重复用到的RDD进行持久化 sessionInfoPairRDD.persist(StorageLevel.DISK_ONLY()); //上面的两个RDD是 //按照Sesson进行聚合 JavaPairRDD<String,String> sesssionAggregateInfoRDD=aggregateBySessionId(sc,sessionInfoPairRDD); //通过条件对RDD进行筛选 // 重构,同时统计 Accumulator<String> sessionAggrStatAccumulator=context.accumulator("",new SessionAggrStatAccumulator()); //在进行accumulator之前,需要aciton动作,不然会为空 JavaPairRDD<String,String> filteredSessionRDD=filterSessionAndAggrStat(sesssionAggregateInfoRDD,jsonObject,sessionAggrStatAccumulator); //重复用到的RDD进行持久化 filteredSessionRDD.persist(StorageLevel.DISK_ONLY()); //获取符合过滤条件的全信息公共RDD JavaPairRDD<String, Row> commonFullClickInfoRDD=getFilterFullInfoRDD(filteredSessionRDD,sessionInfoPairRDD); //重复用到的RDD进行持久化 commonFullClickInfoRDD.persist(StorageLevel.DISK_ONLY()); //session聚合统计,统计出访问时长和访问步长的各个区间所占的比例 /** * 重构实现的思路: * 1。不要去生成任何的新RDD * 2。不要去单独遍历一遍sesion的数据 * 3。可以在聚合数据的时候可以直接计算session的访问时长和访问步长 * 4。在以前的聚合操作中,可以在以前的基础上进行计算加上自己实现的Accumulator来进行一次性解决 * 开发Spark的经验准则 * 1。尽量少生成RDD * 2。尽量少对RDD进行蒜子操作,如果可能,尽量在一个算子里面,实现多个需求功能 * 3。尽量少对RDD进行shuffle算子操作,比如groupBykey、reduceBykey、sortByKey * shuffle操作,会导致大量的磁盘读写,严重降低性能 * 有shuffle的算子,和没有shuffle的算子,甚至性能相差极大 * 有shuffle的算子,很容易造成性能倾斜,一旦数据倾斜,简直就是性能杀手 * 4。无论做什么功能,性能第一 * 在大数据项目中,性能最重要。主要是大数据以及大数据项目的特点,决定了大数据的程序和项目速度,都比较满 * 如果不考虑性能的话,就会导致一个大数据处理程序运行长达数个小时,甚至是数个小时,对用户的体验,简直是 * 一场灾难。 */ /** * 使用CountByKey算子实现随机抽取功能 */ randomExtractSession(taskId,filteredSessionRDD,sessionInfoPairRDD); //在使用Accumulutor之前,需要使用Action算子,否则获取的值为空,这里随机计算 //filteredSessionRDD.count(); //计算各个session占比,并写入MySQL calculateAndPersist(sessionAggrStatAccumulator.value(),taskId); //获取热门品类数据Top10 List<Tuple2<CategorySortKey,String>> top10CategoryIds=getTop10Category(taskId,commonFullClickInfoRDD); //获取热门每一个品类点击Top10session getTop10Session(context,taskId,sessionInfoPairRDD,top10CategoryIds); //关闭spark上下文 context.close(); }
Example 4
Source File: PMapmmSPInstruction.java From systemds with Apache License 2.0 | 4 votes |
@Override public void processInstruction(ExecutionContext ec) { SparkExecutionContext sec = (SparkExecutionContext)ec; //get inputs JavaPairRDD<MatrixIndexes,MatrixBlock> in1 = sec.getBinaryMatrixBlockRDDHandleForVariable( input1.getName() ); JavaPairRDD<MatrixIndexes,MatrixBlock> in2 = sec.getBinaryMatrixBlockRDDHandleForVariable( input2.getName() ); DataCharacteristics mc1 = sec.getDataCharacteristics(input1.getName()); // This avoids errors such as java.lang.UnsupportedOperationException: Cannot change storage level of an RDD after it was already assigned a level // Ideally, we should ensure that we donot redundantly call persist on the same RDD. StorageLevel pmapmmStorageLevel = StorageLevel.MEMORY_AND_DISK(); //cache right hand side because accessed many times in2 = in2.repartition(sec.getSparkContext().defaultParallelism()) .persist(pmapmmStorageLevel); JavaPairRDD<MatrixIndexes,MatrixBlock> out = null; for( int i=0; i<mc1.getRows(); i+=NUM_ROWBLOCKS*mc1.getBlocksize() ) { //create broadcast for rdd partition JavaPairRDD<MatrixIndexes,MatrixBlock> rdd = in1 .filter(new IsBlockInRange(i+1, i+NUM_ROWBLOCKS*mc1.getBlocksize(), 1, mc1.getCols(), mc1)) .mapToPair(new PMapMMRebaseBlocksFunction(i/mc1.getBlocksize())); int rlen = (int)Math.min(mc1.getRows()-i, NUM_ROWBLOCKS*mc1.getBlocksize()); PartitionedBlock<MatrixBlock> pmb = SparkExecutionContext.toPartitionedMatrixBlock(rdd, rlen, (int)mc1.getCols(), mc1.getBlocksize(), -1L); Broadcast<PartitionedBlock<MatrixBlock>> bpmb = sec.getSparkContext().broadcast(pmb); //matrix multiplication JavaPairRDD<MatrixIndexes,MatrixBlock> rdd2 = in2 .flatMapToPair(new PMapMMFunction(bpmb, i/mc1.getBlocksize())); rdd2 = RDDAggregateUtils.sumByKeyStable(rdd2, false); rdd2.persist(pmapmmStorageLevel) .count(); bpmb.unpersist(false); if( out == null ) out = rdd2; else out = out.union(rdd2); } //cache final result out = out.persist(pmapmmStorageLevel); out.count(); //put output RDD handle into symbol table sec.setRDDHandleForVariable(output.getName(), out); sec.addLineageRDD(output.getName(), input1.getName()); sec.addLineageRDD(output.getName(), input2.getName()); //update output statistics if not inferred updateBinaryMMOutputDataCharacteristics(sec, true); }