org.apache.kylin.job.execution.ExecuteResult Java Examples
The following examples show how to use
org.apache.kylin.job.execution.ExecuteResult.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NSparkUpdateMetaAndCleanupAfterMergeStep.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { String cubeId = getParam(MetadataConstants.P_CUBE_ID); String[] segments = StringUtils.split(getParam(MetadataConstants.P_SEGMENT_NAMES), ","); KylinConfig config = KylinConfig.getInstanceFromEnv(); CubeInstance cube = CubeManager.getInstance(config).getCubeByUuid(cubeId); updateMetadataAfterMerge(cubeId); for (String segmentName : segments) { String path = config.getHdfsWorkingDirectory() + cube.getProject() + "/parquet/" + cube.getName() + "/" + segmentName; try { HadoopUtil.deletePath(HadoopUtil.getCurrentConfiguration(), new Path(path)); } catch (IOException e) { throw new ExecuteException("Can not delete segment: " + segmentName + ", in cube: " + cube.getName()); } } return ExecuteResult.createSucceed(); }
Example #2
Source File: HDFSPathGarbageCollectionStep.java From kylin with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { config = new JobEngineConfig(context.getConfig()); List<String> toDeletePaths = getDeletePaths(); dropHdfsPathOnCluster(toDeletePaths, HadoopUtil.getWorkingFileSystem()); if (StringUtils.isNotEmpty(context.getConfig().getHBaseClusterFs())) { dropHdfsPathOnCluster(toDeletePaths, FileSystem.get(HBaseConnection.getCurrentHBaseConfiguration())); } } catch (IOException e) { logger.error("job:" + getId() + " execute finished with exception", e); output.append("\n").append(e.getLocalizedMessage()); } return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString()); }
Example #3
Source File: SelfStopExecutable.java From kylin with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { doingWork = true; try { for (int i = 0; i < 60; i++) { sleepOneSecond(); if (isDiscarded()) return new ExecuteResult(ExecuteResult.State.STOPPED, "stopped"); } return new ExecuteResult(); } finally { doingWork = false; } }
Example #4
Source File: CreateFlatHiveTableByLivyStep.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { stepLogger.setILogListener((infoKey, info) -> { // only care two properties here if (ExecutableConstants.YARN_APP_ID.equals(infoKey) || ExecutableConstants.YARN_APP_URL.equals(infoKey)) { getManager().addJobInfo(getId(), info); } } ); KylinConfig config = getCubeSpecificConfig(); try { createFlatHiveTable(config); return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog()); } catch (Exception e) { logger.error("job:" + getId() + " execute finished with exception", e); return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e); } }
Example #5
Source File: CreateFlatHiveTableStep.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { stepLogger.setILogListener((infoKey, info) -> { // only care two properties here if (ExecutableConstants.YARN_APP_ID.equals(infoKey) || ExecutableConstants.YARN_APP_URL.equals(infoKey)) { getManager().addJobInfo(getId(), info); } } ); KylinConfig config = getCubeSpecificConfig(); try { createFlatHiveTable(config); return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog()); } catch (Exception e) { logger.error("job:" + getId() + " execute finished with exception", e); return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e); } }
Example #6
Source File: CreateMrHiveDictStep.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { KylinConfig config = getCubeSpecificConfig(); try { String preHdfsShell = getPreHdfsShell(); if (Objects.nonNull(preHdfsShell) && !"".equalsIgnoreCase(preHdfsShell)) { doRetry(preHdfsShell, config); } createMrHiveDict(config); String postfixHdfsCmd = getPostfixHdfsShell(); if (Objects.nonNull(postfixHdfsCmd) && !"".equalsIgnoreCase(postfixHdfsCmd)) { doRetry(postfixHdfsCmd, config); } return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog()); } catch (Exception e) { logger.error("job:" + getId() + " execute finished with exception", e); return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog()); } }
Example #7
Source File: UpdateCubeInfoAfterCheckpointStep.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager cubeManager = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams())); Set<Long> recommendCuboids = cube.getCuboidsRecommend(); try { List<CubeSegment> newSegments = cube.getSegments(SegmentStatusEnum.READY_PENDING); Map<Long, Long> recommendCuboidsWithStats = CuboidStatsReaderUtil .readCuboidStatsFromSegments(recommendCuboids, newSegments); if (recommendCuboidsWithStats == null) { throw new RuntimeException("Fail to get statistics info for recommended cuboids after optimization!!!"); } cubeManager.promoteCheckpointOptimizeSegments(cube, recommendCuboidsWithStats, newSegments.toArray(new CubeSegment[newSegments.size()])); return new ExecuteResult(); } catch (Exception e) { logger.error("fail to update cube after build", e); return ExecuteResult.createError(e); } }
Example #8
Source File: MergeDictionaryStep.java From Kylin with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { KylinConfig conf = context.getConfig(); final CubeManager mgr = CubeManager.getInstance(conf); final CubeInstance cube = mgr.getCube(getCubeName()); final CubeSegment newSegment = cube.getSegmentById(getSegmentId()); final List<CubeSegment> mergingSegments = getMergingSegments(cube); Collections.sort(mergingSegments); try { checkLookupSnapshotsMustIncremental(mergingSegments); makeDictForNewSegment(conf, cube, newSegment, mergingSegments); makeSnapshotForNewSegment(cube, newSegment, mergingSegments); mgr.updateCube(cube); return new ExecuteResult(ExecuteResult.State.SUCCEED, "succeed"); } catch (IOException e) { logger.error("fail to merge dictionary or lookup snapshots", e); return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage()); } }
Example #9
Source File: CopyDictionaryStep.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager mgr = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams())).latestCopyForWrite(); final CubeSegment optimizeSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams())); CubeSegment oldSegment = optimizeSegment.getCubeInstance().getOriginalSegmentToOptimize(optimizeSegment); Preconditions.checkNotNull(oldSegment, "cannot find the original segment to be optimized by " + optimizeSegment); // --- Copy dictionary optimizeSegment.getDictionaries().putAll(oldSegment.getDictionaries()); optimizeSegment.getSnapshots().putAll(oldSegment.getSnapshots()); optimizeSegment.getRowkeyStats().addAll(oldSegment.getRowkeyStats()); try { CubeUpdate cubeBuilder = new CubeUpdate(cube); cubeBuilder.setToUpdateSegs(optimizeSegment); mgr.updateCube(cubeBuilder); } catch (IOException e) { logger.error("fail to merge dictionary or lookup snapshots", e); return ExecuteResult.createError(e); } return new ExecuteResult(); }
Example #10
Source File: UpdateCubeInfoAfterCheckpointStep.java From kylin with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager cubeManager = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams())); Set<Long> recommendCuboids = cube.getCuboidsRecommend(); try { List<CubeSegment> newSegments = cube.getSegments(SegmentStatusEnum.READY_PENDING); Map<Long, Long> recommendCuboidsWithStats = CuboidStatsReaderUtil .readCuboidStatsFromSegments(recommendCuboids, newSegments); if (recommendCuboidsWithStats == null) { throw new RuntimeException("Fail to get statistics info for recommended cuboids after optimization!!!"); } cubeManager.promoteCheckpointOptimizeSegments(cube, recommendCuboidsWithStats, newSegments.toArray(new CubeSegment[newSegments.size()])); return new ExecuteResult(); } catch (Exception e) { logger.error("fail to update cube after build", e); return ExecuteResult.createError(e); } }
Example #11
Source File: CubingJob.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
protected void updateMetrics(ExecutableContext context, ExecuteResult result, ExecutableState state) { JobMetricsFacade.JobStatisticsResult jobStats = new JobMetricsFacade.JobStatisticsResult(); jobStats.setWrapper(getSubmitter(), getProjectName(), CubingExecutableUtil.getCubeName(getParams()), getId(), getJobType(), getAlgorithm() == null ? "NULL" : getAlgorithm().toString()); if (state == ExecutableState.SUCCEED) { jobStats.setJobStats(findSourceSizeBytes(), findCubeSizeBytes(), getDuration(), getMapReduceWaitTime(), getPerBytesTimeCost(findSourceSizeBytes(), getDuration())); if (CubingJobTypeEnum.getByName(getJobType()) == CubingJobTypeEnum.BUILD) { jobStats.setJobStepStats(getTaskDurationByName(ExecutableConstants.STEP_NAME_FACT_DISTINCT_COLUMNS), getTaskDurationByName(ExecutableConstants.STEP_NAME_BUILD_DICTIONARY), getTaskDurationByName(ExecutableConstants.STEP_NAME_BUILD_IN_MEM_CUBE), getTaskDurationByName(ExecutableConstants.STEP_NAME_CONVERT_CUBOID_TO_HFILE)); } } else if (state == ExecutableState.ERROR) { jobStats.setJobException(result.getThrowable() != null ? result.getThrowable() : new Exception()); } JobMetricsFacade.updateMetrics(jobStats); }
Example #12
Source File: CopyDictionaryStep.java From kylin with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager mgr = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams())).latestCopyForWrite(); final CubeSegment optimizeSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams())); CubeSegment oldSegment = optimizeSegment.getCubeInstance().getOriginalSegmentToOptimize(optimizeSegment); Preconditions.checkNotNull(oldSegment, "cannot find the original segment to be optimized by " + optimizeSegment); // --- Copy dictionary optimizeSegment.getDictionaries().putAll(oldSegment.getDictionaries()); optimizeSegment.getSnapshots().putAll(oldSegment.getSnapshots()); optimizeSegment.getRowkeyStats().addAll(oldSegment.getRowkeyStats()); try { CubeUpdate cubeBuilder = new CubeUpdate(cube); cubeBuilder.setToUpdateSegs(optimizeSegment); mgr.updateCube(cubeBuilder); } catch (IOException e) { logger.error("fail to merge dictionary or lookup snapshots", e); return ExecuteResult.createError(e); } return new ExecuteResult(); }
Example #13
Source File: CreateFlatHiveTableStep.java From kylin with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { stepLogger.setILogListener((infoKey, info) -> { // only care two properties here if (ExecutableConstants.YARN_APP_ID.equals(infoKey) || ExecutableConstants.YARN_APP_URL.equals(infoKey)) { getManager().addJobInfo(getId(), info); } } ); KylinConfig config = getCubeSpecificConfig(); try { createFlatHiveTable(config); return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog()); } catch (Exception e) { logger.error("job:" + getId() + " execute finished with exception", e); return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e); } }
Example #14
Source File: HDFSPathGarbageCollectionStep.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { config = new JobEngineConfig(context.getConfig()); List<String> toDeletePaths = getDeletePaths(); dropHdfsPathOnCluster(toDeletePaths, HadoopUtil.getWorkingFileSystem()); if (StringUtils.isNotEmpty(context.getConfig().getHBaseClusterFs())) { dropHdfsPathOnCluster(toDeletePaths, FileSystem.get(HBaseConnection.getCurrentHBaseConfiguration())); } } catch (IOException e) { logger.error("job:" + getId() + " execute finished with exception", e); output.append("\n").append(e.getLocalizedMessage()); } return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString()); }
Example #15
Source File: NSparkExecutable.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private ExecuteResult runSparkSubmit(KylinConfig config, String hadoopConf, String jars, String kylinJobJar, String appArgs, String jobId) { PatternedLogger patternedLogger; if (config.isJobLogPrintEnabled()) { patternedLogger = new PatternedLogger(logger); } else { patternedLogger = new PatternedLogger(null); } try { String cmd = generateSparkCmd(config, hadoopConf, jars, kylinJobJar, appArgs); CliCommandExecutor exec = new CliCommandExecutor(); Pair<Integer, String> result = exec.execute(cmd, patternedLogger, jobId); Map<String, String> extraInfo = makeExtraInfo(patternedLogger.getInfo()); ExecuteResult ret = ExecuteResult.createSucceed(result.getSecond()); ret.getExtraInfo().putAll(extraInfo); updateMetaAfterBuilding(config); return ret; } catch (Exception e) { return ExecuteResult.createError(e); } }
Example #16
Source File: PersistExceptionExecutable.java From kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws PersistentException { try { Thread.sleep(1000); } catch (InterruptedException e) { } throw new PersistentException("persistent exception"); }
Example #17
Source File: FiveSecondSucceedTestExecutable.java From kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { Thread.sleep(5000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } return ExecuteResult.createSucceed(); }
Example #18
Source File: SucceedTestExecutable.java From Kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { Thread.sleep(1000); } catch (InterruptedException e) { } return new ExecuteResult(ExecuteResult.State.SUCCEED, "succeed"); }
Example #19
Source File: SqoopCmdStep.java From kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { KylinConfig config = KylinConfig.getInstanceFromEnv(); try { sqoopFlatHiveTable(config); return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog()); } catch (Exception e) { logger.error("job:" + getId() + " execute finished with exception", e); return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e); } }
Example #20
Source File: SparkExecutable.java From kylin with Apache License 2.0 | 5 votes |
protected ExecuteResult onResumed(String appId, ExecutableManager mgr) throws ExecuteException { Map<String, String> info = new HashMap<>(); try { logger.info("spark_job_id:" + appId + " resumed"); info.put(ExecutableConstants.SPARK_JOB_ID, appId); while (!isPaused() && !isDiscarded()) { String status = getAppState(appId); if (status.equals("FAILED") || status.equals("KILLED")) { mgr.updateJobOutput(getId(), ExecutableState.ERROR, null, appId + " has failed"); return new ExecuteResult(ExecuteResult.State.FAILED, appId + " has failed"); } if (status.equals("SUCCEEDED")) { mgr.addJobInfo(getId(), info); return new ExecuteResult(ExecuteResult.State.SUCCEED, appId + " has finished"); } Thread.sleep(5000); } killAppRetry(appId); if (isDiscarded()) { return new ExecuteResult(ExecuteResult.State.DISCARDED, appId + " is discarded"); } else { return new ExecuteResult(ExecuteResult.State.STOPPED, appId + " is stopped"); } } catch (Exception e) { logger.error("error run spark job:", e); return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage()); } }
Example #21
Source File: HiveCmdStep.java From kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { KylinConfig config = KylinConfig.getInstanceFromEnv(); try { createFlatHiveTable(config); return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog()); } catch (Exception e) { logger.error("job:" + getId() + " execute finished with exception", e); return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e); } }
Example #22
Source File: FlinkExecutable.java From kylin with Apache License 2.0 | 5 votes |
private ExecuteResult onResumed(String appId, ExecutableManager mgr) throws ExecuteException { Map<String, String> info = new HashMap<>(); try { logger.info("flink_job_id:" + appId + " resumed"); info.put(ExecutableConstants.FLINK_JOB_ID, appId); while (!isPaused() && !isDiscarded()) { String status = getAppState(appId); if (status.equals("FAILED") || status.equals("KILLED")) { mgr.updateJobOutput(getId(), ExecutableState.ERROR, null, appId + " has failed"); return new ExecuteResult(ExecuteResult.State.FAILED, appId + " has failed"); } if (status.equals("SUCCEEDED")) { mgr.addJobInfo(getId(), info); return new ExecuteResult(ExecuteResult.State.SUCCEED, appId + " has finished"); } Thread.sleep(5000); } killAppRetry(appId); if (isDiscarded()) { return new ExecuteResult(ExecuteResult.State.DISCARDED, appId + " is discarded"); } else { return new ExecuteResult(ExecuteResult.State.STOPPED, appId + " is stopped"); } } catch (Exception e) { logger.error("error run spark job:", e); return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage()); } }
Example #23
Source File: CmdStep.java From kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { KylinConfig config = KylinConfig.getInstanceFromEnv(); try { sqoopFlatHiveTable(config); return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog()); } catch (Exception e) { logger.error("job:" + getId() + " execute finished with exception", e); return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e); } }
Example #24
Source File: UpdateSnapshotCacheForQueryServersStepTest.java From kylin with Apache License 2.0 | 5 votes |
@Test public void testExecute() throws ExecuteException { UpdateSnapshotCacheForQueryServersStep step = new UpdateSnapshotCacheForQueryServersStep(); ExecuteResult result = step.doWork(new DefaultContext(Maps.<String, Executable>newConcurrentMap(), kylinConfig)); System.out.println(result.output()); assertTrue(result.succeed()); }
Example #25
Source File: SelfStopExecutable.java From Kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { Thread.sleep(5000); } catch (InterruptedException e) { } if (isDiscarded()) { return new ExecuteResult(ExecuteResult.State.STOPPED, "stopped"); } else { return new ExecuteResult(ExecuteResult.State.SUCCEED, "succeed"); } }
Example #26
Source File: GarbageCollectionStep.java From kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { KylinConfig config = context.getConfig(); StringBuffer output = new StringBuffer(); try { output.append(cleanUpIntermediateFlatTable(config)); } catch (IOException e) { logger.error("job:" + getId() + " execute finished with exception", e); return ExecuteResult.createError(e); } return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString()); }
Example #27
Source File: CreateMrHiveDictStep.java From kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { KylinConfig config = getCubeSpecificConfig(); DistributedLock lock = null; try { if (getIsLock() || getIsUnlock()) { lock = KylinConfig.getInstanceFromEnv().getDistributedLockFactory().lockForCurrentThread(); } createMrHiveDict(config, lock); if (isDiscarded()) { if (getIsLock() && lock != null) { unLock(lock); } return new ExecuteResult(ExecuteResult.State.DISCARDED, stepLogger.getBufferedLog()); } else { return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog()); } } catch (Exception e) { logger.error("job:" + getId() + " execute finished with exception", e); if (isDiscarded()) { if (getIsLock()) { unLock(lock); } return new ExecuteResult(ExecuteResult.State.DISCARDED, stepLogger.getBufferedLog()); } else { return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog()); } } }
Example #28
Source File: NSparkExecutable.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private ExecuteResult runLocalMode(String appArgs, KylinConfig config) { try { Class<? extends Object> appClz = ClassUtil.forName(getSparkSubmitClassName(), Object.class); appClz.getMethod("main", String[].class).invoke(null, (Object) new String[] { appArgs }); updateMetaAfterBuilding(config); return ExecuteResult.createSucceed(); } catch (Exception e) { return ExecuteResult.createError(e); } }
Example #29
Source File: MergeOffsetStep.java From kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager cubeManager = CubeManager.getInstance(context.getConfig()); final CubeInstance cubeCopy = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams())).latestCopyForWrite(); final String segmentId = CubingExecutableUtil.getSegmentId(this.getParams()); final CubeSegment segCopy = cubeCopy.getSegmentById(segmentId); Preconditions.checkNotNull(segCopy, "Cube segment '" + segmentId + "' not found."); Segments<CubeSegment> mergingSegs = cubeCopy.getMergingSegments(segCopy); Preconditions.checkArgument(mergingSegs.size() > 0, "Merging segment not exist."); Collections.sort(mergingSegs); final CubeSegment first = mergingSegs.get(0); final CubeSegment last = mergingSegs.get(mergingSegs.size() - 1); segCopy.setSegRange(new SegmentRange(first.getSegRange().start, last.getSegRange().end)); segCopy.setSourcePartitionOffsetStart(first.getSourcePartitionOffsetStart()); segCopy.setSourcePartitionOffsetEnd(last.getSourcePartitionOffsetEnd()); segCopy.setTSRange(new TSRange(mergingSegs.getTSStart(), mergingSegs.getTSEnd())); CubeUpdate update = new CubeUpdate(cubeCopy); update.setToUpdateSegs(segCopy); try { cubeManager.updateCube(update); return ExecuteResult.createSucceed(); } catch (IOException e) { logger.error("fail to update cube segment offset", e); return ExecuteResult.createError(e); } }
Example #30
Source File: MergeDictionaryStep.java From kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager mgr = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams())); final CubeSegment newSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams())); final List<CubeSegment> mergingSegments = getMergingSegments(cube); KylinConfig conf = cube.getConfig(); Collections.sort(mergingSegments); try { checkLookupSnapshotsMustIncremental(mergingSegments); // work on copy instead of cached objects CubeInstance cubeCopy = cube.latestCopyForWrite(); CubeSegment newSegCopy = cubeCopy.getSegmentById(newSegment.getUuid()); makeDictForNewSegment(conf, cubeCopy, newSegCopy, mergingSegments); makeSnapshotForNewSegment(cubeCopy, newSegCopy, mergingSegments); CubeUpdate update = new CubeUpdate(cubeCopy); update.setToUpdateSegs(newSegCopy); mgr.updateCube(update); return ExecuteResult.createSucceed(); } catch (IOException e) { logger.error("fail to merge dictionary or lookup snapshots", e); return ExecuteResult.createError(e); } }