org.apache.kylin.job.execution.DefaultChainedExecutable Java Examples
The following examples show how to use
org.apache.kylin.job.execution.DefaultChainedExecutable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ExecutableManager.java From Kylin with Apache License 2.0 | 6 votes |
private static AbstractExecutable parseTo(ExecutablePO executablePO) { if (executablePO == null) { return null; } String type = executablePO.getType(); try { Class<? extends AbstractExecutable> clazz = ClassUtil.forName(type, AbstractExecutable.class); Constructor<? extends AbstractExecutable> constructor = clazz.getConstructor(); AbstractExecutable result = constructor.newInstance(); result.setId(executablePO.getUuid()); result.setName(executablePO.getName()); result.setParams(executablePO.getParams()); List<ExecutablePO> tasks = executablePO.getTasks(); if (tasks != null && !tasks.isEmpty()) { Preconditions.checkArgument(result instanceof DefaultChainedExecutable); for (ExecutablePO subTask: tasks) { ((DefaultChainedExecutable) result).addTask(parseTo(subTask)); } } return result; } catch (ReflectiveOperationException e) { throw new IllegalArgumentException("cannot parse this job:" + executablePO.getId(), e); } }
Example #2
Source File: StreamingCubingJobBuilder.java From kylin with Apache License 2.0 | 6 votes |
private MapReduceExecutable createMergeDictStep(String streamingStoragePath, String jobId, DefaultChainedExecutable jobFlow) { MapReduceExecutable mergeDict = new MapReduceExecutable(); mergeDict.setName(ExecutableConstants.STEP_NAME_STREAMING_CREATE_DICTIONARY); StringBuilder cmd = new StringBuilder(); appendMapReduceParameters(cmd, JobEngineConfig.CUBE_MERGE_JOB_CONF_SUFFIX); appendExecCmdParameters(cmd, BatchConstants.ARG_JOB_NAME, ExecutableConstants.STEP_NAME_STREAMING_CREATE_DICTIONARY); appendExecCmdParameters(cmd, BatchConstants.ARG_INPUT, streamingStoragePath); appendExecCmdParameters(cmd, BatchConstants.ARG_CUBE_NAME, seg.getRealization().getName()); appendExecCmdParameters(cmd, BatchConstants.ARG_SEGMENT_NAME, seg.getName()); //Instead of using mr job output, trySaveNewDict api is used, so output path is useless here appendExecCmdParameters(cmd, BatchConstants.ARG_OUTPUT, getDictPath(jobId)); final String cubeName = CubingExecutableUtil.getCubeName(jobFlow.getParams()); mergeDict.setMapReduceParams(cmd.toString()); mergeDict.setMapReduceJobClass(MergeDictJob.class); mergeDict.setLockPathName(cubeName); mergeDict.setIsNeedLock(true); mergeDict.setIsNeedReleaseLock(false); mergeDict.setJobFlowJobId(jobFlow.getId()); return mergeDict; }
Example #3
Source File: DefaultSchedulerTest.java From kylin with Apache License 2.0 | 6 votes |
@Test public void testSchedulerStop() throws Exception { logger.info("testSchedulerStop"); thrown.expect(RuntimeException.class); thrown.expectMessage("too long wait time"); DefaultChainedExecutable job = new DefaultChainedExecutable(); BaseTestExecutable task1 = new FiveSecondSucceedTestExecutable(); job.addTask(task1); execMgr.addJob(job); //sleep 3s to make sure SucceedTestExecutable is running Thread.sleep(3000); //scheduler failed due to some reason scheduler.shutdown(); waitForJobFinish(job.getId(), 6000); }
Example #4
Source File: HBaseLookupMRSteps.java From kylin with Apache License 2.0 | 6 votes |
private void addLookupTableConvertToHFilesStep(DefaultChainedExecutable jobFlow, String tableName, String snapshotID) { MapReduceExecutable createHFilesStep = new MapReduceExecutable(); createHFilesStep .setName(ExecutableConstants.STEP_NAME_MATERIALIZE_LOOKUP_TABLE_CONVERT_HFILE + ":" + tableName); StringBuilder cmd = new StringBuilder(); appendMapReduceParameters(cmd); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_CUBE_NAME, cube.getName()); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_OUTPUT, getLookupTableHFilePath(tableName, jobFlow.getId())); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_TABLE_NAME, tableName); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_CUBING_JOB_ID, jobFlow.getId()); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_LOOKUP_SNAPSHOT_ID, snapshotID); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_JOB_NAME, "Kylin_LookupTable_HFile_Generator_" + tableName + "_Step"); createHFilesStep.setMapReduceParams(cmd.toString()); createHFilesStep.setMapReduceJobClass(LookupTableToHFileJob.class); createHFilesStep.setCounterSaveAs(BatchConstants.LOOKUP_EXT_SNAPSHOT_SRC_RECORD_CNT_PFX + tableName); jobFlow.addTask(createHFilesStep); }
Example #5
Source File: HBaseLookupMRSteps.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private void addLookupTableConvertToHFilesStep(DefaultChainedExecutable jobFlow, String tableName, String snapshotID) { MapReduceExecutable createHFilesStep = new MapReduceExecutable(); createHFilesStep .setName(ExecutableConstants.STEP_NAME_MATERIALIZE_LOOKUP_TABLE_CONVERT_HFILE + ":" + tableName); StringBuilder cmd = new StringBuilder(); appendMapReduceParameters(cmd); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_CUBE_NAME, cube.getName()); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_OUTPUT, getLookupTableHFilePath(tableName, jobFlow.getId())); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_TABLE_NAME, tableName); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_CUBING_JOB_ID, jobFlow.getId()); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_LOOKUP_SNAPSHOT_ID, snapshotID); JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_JOB_NAME, "Kylin_LookupTable_HFile_Generator_" + tableName + "_Step"); createHFilesStep.setMapReduceParams(cmd.toString()); createHFilesStep.setMapReduceJobClass(LookupTableToHFileJob.class); createHFilesStep.setCounterSaveAs(BatchConstants.LOOKUP_EXT_SNAPSHOT_SRC_RECORD_CNT_PFX + tableName); jobFlow.addTask(createHFilesStep); }
Example #6
Source File: NResourceDetectStep.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public NResourceDetectStep(DefaultChainedExecutable parent) { if (parent instanceof NSparkCubingJob) { this.setSparkSubmitClassName(ResourceDetectBeforeCubingJob.class.getName()); } else if (parent instanceof NSparkMergingJob) { this.setSparkSubmitClassName(ResourceDetectBeforeMergingJob.class.getName()); /*} else if (parent instanceof NTableSamplingJob) { this.setSparkSubmitClassName(ResourceDetectBeforeSampling.class.getName()); */} else { throw new IllegalArgumentException("Unsupported resource detect for " + parent.getName() + " job"); } this.setName(ExecutableConstants.STEP_NAME_DETECT_RESOURCE); }
Example #7
Source File: Coordinator.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private boolean triggerSegmentBuild(String cubeName, String segmentName) { CubeManager cubeManager = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()); CubeInstance cubeInstance = cubeManager.getCube(cubeName); try { Pair<Long, Long> segmentRange = CubeSegment.parseSegmentName(segmentName); logger.info("submit streaming segment build, cube:{} segment:{}", cubeName, segmentName); CubeSegment newSeg = getCubeManager().appendSegment(cubeInstance, new TSRange(segmentRange.getFirst(), segmentRange.getSecond())); DefaultChainedExecutable executable = new StreamingCubingEngine().createStreamingCubingJob(newSeg, "SYSTEM"); getExecutableManager().addJob(executable); CubingJob cubingJob = (CubingJob) executable; newSeg.setLastBuildJobID(cubingJob.getId()); SegmentJobBuildInfo segmentJobBuildInfo = new SegmentJobBuildInfo(cubeName, segmentName, cubingJob.getId()); jobStatusChecker.addSegmentBuildJob(segmentJobBuildInfo); SegmentBuildState.BuildState state = new SegmentBuildState.BuildState(); state.setBuildStartTime(System.currentTimeMillis()); state.setState(SegmentBuildState.BuildState.State.BUILDING); state.setJobId(cubingJob.getId()); streamMetadataStore.updateSegmentBuildState(cubeName, segmentName, state); return true; } catch (Exception e) { logger.error("streaming job submit fail, cubeName:" + cubeName + " segment:" + segmentName, e); return false; } }
Example #8
Source File: JobService.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public void resubmitJob(JobInstance job) throws IOException { aclEvaluate.checkProjectOperationPermission(job); Coordinator coordinator = Coordinator.getInstance(); CubeManager cubeManager = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()); String cubeName = job.getRelatedCube(); CubeInstance cubeInstance = cubeManager.getCube(cubeName); String segmentName = job.getRelatedSegmentName(); try { Pair<Long, Long> segmentRange = CubeSegment.parseSegmentName(segmentName); logger.info("submit streaming segment build, cube:{} segment:{}", cubeName, segmentName); CubeSegment newSeg = coordinator.getCubeManager().appendSegment(cubeInstance, new SegmentRange.TSRange(segmentRange.getFirst(), segmentRange.getSecond())); DefaultChainedExecutable executable = new StreamingCubingEngine().createStreamingCubingJob(newSeg, aclEvaluate.getCurrentUserName()); coordinator.getExecutableManager().addJob(executable); CubingJob cubingJob = (CubingJob) executable; newSeg.setLastBuildJobID(cubingJob.getId()); SegmentBuildState.BuildState state = new SegmentBuildState.BuildState(); state.setBuildStartTime(System.currentTimeMillis()); state.setState(SegmentBuildState.BuildState.State.BUILDING); state.setJobId(cubingJob.getId()); coordinator.getStreamMetadataStore().updateSegmentBuildState(cubeName, segmentName, state); } catch (Exception e) { logger.error("streaming job submit fail, cubeName:" + cubeName + " segment:" + segmentName, e); throw e; } }
Example #9
Source File: StreamingCubingJobBuilder.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private SaveDictStep createSaveDictStep(String jobId, DefaultChainedExecutable jobFlow) { SaveDictStep result = new SaveDictStep(); final String cubeName = CubingExecutableUtil.getCubeName(jobFlow.getParams()); result.setName(ExecutableConstants.STEP_NAME_STREAMING_SAVE_DICTS); CubingExecutableUtil.setCubeName(seg.getRealization().getName(), result.getParams()); CubingExecutableUtil.setSegmentId(seg.getUuid(), result.getParams()); CubingExecutableUtil.setDictsPath(getDictPath(jobId), result.getParams()); CubingExecutableUtil.setCubingJobId(jobId, result.getParams()); result.setIsNeedReleaseLock(true); result.setJobFlowJobId(jobFlow.getId()); result.setLockPathName(cubeName); return result; }
Example #10
Source File: DefaultSchedulerTest.java From kylin with Apache License 2.0 | 5 votes |
@Test public void testSucceedAndFailed() throws Exception { logger.info("testSucceedAndFailed"); DefaultChainedExecutable job = new DefaultChainedExecutable(); BaseTestExecutable task1 = new SucceedTestExecutable(); BaseTestExecutable task2 = new FailedTestExecutable(); job.addTask(task1); job.addTask(task2); execMgr.addJob(job); waitForJobFinish(job.getId(), MAX_WAIT_TIME); Assert.assertEquals(ExecutableState.ERROR, execMgr.getOutput(job.getId()).getState()); Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(task1.getId()).getState()); Assert.assertEquals(ExecutableState.ERROR, execMgr.getOutput(task2.getId()).getState()); }
Example #11
Source File: JobService.java From kylin with Apache License 2.0 | 5 votes |
public void resubmitJob(JobInstance job) throws IOException { aclEvaluate.checkProjectOperationPermission(job); Coordinator coordinator = Coordinator.getInstance(); CubeManager cubeManager = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()); String cubeName = job.getRelatedCube(); CubeInstance cubeInstance = cubeManager.getCube(cubeName); String segmentName = job.getRelatedSegmentName(); try { Pair<Long, Long> segmentRange = CubeSegment.parseSegmentName(segmentName); logger.info("submit streaming segment build, cube:{} segment:{}", cubeName, segmentName); CubeSegment newSeg = coordinator.getCubeManager().appendSegment(cubeInstance, new SegmentRange.TSRange(segmentRange.getFirst(), segmentRange.getSecond())); DefaultChainedExecutable executable = new StreamingCubingEngine().createStreamingCubingJob(newSeg, aclEvaluate.getCurrentUserName()); coordinator.getExecutableManager().addJob(executable); CubingJob cubingJob = (CubingJob) executable; newSeg.setLastBuildJobID(cubingJob.getId()); SegmentBuildState.BuildState state = new SegmentBuildState.BuildState(); state.setBuildStartTime(System.currentTimeMillis()); state.setState(SegmentBuildState.BuildState.State.BUILDING); state.setJobId(cubingJob.getId()); coordinator.getStreamMetadataStore().updateSegmentBuildState(cubeName, segmentName, state); } catch (Exception e) { logger.error("streaming job submit fail, cubeName:" + cubeName + " segment:" + segmentName, e); throw e; } }
Example #12
Source File: CubeService.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public String mergeCubeSegment(String cubeName) { CubeInstance cube = getCubeManager().getCube(cubeName); if (!cube.needAutoMerge()) return null; if (!cube.isReady()) { logger.info("The cube: {} is disabled", cubeName); return null; } synchronized (CubeService.class) { try { cube = getCubeManager().getCube(cubeName); SegmentRange offsets = cube.autoMergeCubeSegments(); if (offsets != null && !isMergingJobBeenDiscarded(cube, cubeName, cube.getProject(), offsets)) { CubeSegment newSeg = getCubeManager().mergeSegments(cube, null, offsets, true); logger.info("Will submit merge job on " + newSeg); DefaultChainedExecutable job = EngineFactory.createBatchMergeJob(newSeg, "SYSTEM"); getExecutableManager().addJob(job); return job.getId(); } else { logger.info("Not ready for merge on cube " + cubeName); } } catch (IOException e) { logger.error("Failed to auto merge cube " + cubeName, e); } } return null; }
Example #13
Source File: JobStepFactory.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public static NSparkExecutable addStep(DefaultChainedExecutable parent, JobStepType type, CubeInstance cube) { NSparkExecutable step; KylinConfig config = cube.getConfig(); switch (type) { case RESOURCE_DETECT: step = new NResourceDetectStep(parent); break; case CUBING: step = new NSparkCubingStep(config.getSparkBuildClassName()); break; case MERGING: step = new NSparkMergingStep(config.getSparkMergeClassName()); break; case CLEAN_UP_AFTER_MERGE: step = new NSparkUpdateMetaAndCleanupAfterMergeStep(); break; default: throw new IllegalArgumentException(); } step.setParams(parent.getParams()); step.setProject(parent.getProject()); step.setTargetSubject(parent.getTargetSubject()); if (step instanceof NSparkUpdateMetaAndCleanupAfterMergeStep) { CubeSegment mergeSegment = cube.getSegmentById(parent.getTargetSegments().iterator().next()); final Segments<CubeSegment> mergingSegments = cube.getMergingSegments(mergeSegment); step.setParam(MetadataConstants.P_SEGMENT_NAMES, String.join(",", NSparkCubingUtil.toSegmentNames(mergingSegments))); step.setParam(CubingExecutableUtil.SEGMENT_ID, parent.getParam(CubingExecutableUtil.SEGMENT_ID)); step.setParam(MetadataConstants.P_JOB_TYPE, parent.getParam(MetadataConstants.P_JOB_TYPE)); step.setParam(MetadataConstants.P_OUTPUT_META_URL, parent.getParam(MetadataConstants.P_OUTPUT_META_URL)); } parent.addTask(step); //after addTask, step's id is changed step.setDistMetaUrl(config.getJobTmpMetaStoreUrl(parent.getProject(), step.getId())); return step; }
Example #14
Source File: HiveMRInput.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Override public IBatchMergeInputSide getBatchMergeInputSide(ISegment seg) { return new IMRBatchMergeInputSide() { @Override public void addStepPhase1_MergeDictionary(DefaultChainedExecutable jobFlow) { // doing nothing } }; }
Example #15
Source File: HiveInputBase.java From kylin with Apache License 2.0 | 5 votes |
@Override public void addStepPhase4_Cleanup(DefaultChainedExecutable jobFlow) { final String jobWorkingDir = getJobWorkingDir(jobFlow, hdfsWorkingDir); org.apache.kylin.source.hive.GarbageCollectionStep step = new org.apache.kylin.source.hive.GarbageCollectionStep(); step.setName(ExecutableConstants.STEP_NAME_HIVE_CLEANUP); List<String> deleteTables = new ArrayList<>(); deleteTables.add(getIntermediateTableIdentity()); // mr-hive dict and inner table do not need delete hdfs String[] mrHiveDicts = flatDesc.getSegment().getConfig().getMrHiveDictColumns(); if (Objects.nonNull(mrHiveDicts) && mrHiveDicts.length > 0) { String dictDb = flatDesc.getSegment().getConfig().getMrHiveDictDB(); String tableName = dictDb + "." + flatDesc.getTableName() + flatDesc.getSegment().getConfig().getMrHiveDistinctValueTableSuffix(); String tableName2 = dictDb + "." + flatDesc.getTableName() + flatDesc.getSegment().getConfig().getMrHiveDictTableSuffix(); deleteTables.add(tableName); deleteTables.add(tableName2); } step.setIntermediateTables(deleteTables); step.setExternalDataPaths(Collections.singletonList(JoinedFlatTable.getTableDir(flatDesc, jobWorkingDir))); step.setHiveViewIntermediateTableIdentities(StringUtil.join(hiveViewIntermediateTables, ",")); jobFlow.addTask(step); }
Example #16
Source File: HBaseLookupMRSteps.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private void addUpdateSnapshotQueryCacheStep(DefaultChainedExecutable jobFlow, String tableName, String snapshotID) { UpdateSnapshotCacheForQueryServersStep updateSnapshotCacheStep = new UpdateSnapshotCacheForQueryServersStep(); updateSnapshotCacheStep.setName(ExecutableConstants.STEP_NAME_LOOKUP_SNAPSHOT_CACHE_UPDATE + ":" + tableName); LookupExecutableUtil.setProjectName(cube.getProject(), updateSnapshotCacheStep.getParams()); LookupExecutableUtil.setLookupTableName(tableName, updateSnapshotCacheStep.getParams()); LookupExecutableUtil.setLookupSnapshotID(snapshotID, updateSnapshotCacheStep.getParams()); jobFlow.addTask(updateSnapshotCacheStep); }
Example #17
Source File: HBaseJobSteps.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public void addMergingGarbageCollectionSteps(DefaultChainedExecutable jobFlow) { String jobId = jobFlow.getId(); MergeGCStep hBaseGCStep = createHBaseGCStep(getMergingHTables()); jobFlow.addTask(hBaseGCStep); List<String> toDeletePaths = new ArrayList<>(); toDeletePaths.addAll(getMergingHDFSPaths()); toDeletePaths.add(getHFilePath(jobId)); HDFSPathGarbageCollectionStep step = createHDFSPathGCStep(toDeletePaths, jobId); jobFlow.addTask(step); }
Example #18
Source File: JdbcHiveMRInput.java From kylin with Apache License 2.0 | 5 votes |
@Override public IBatchMergeInputSide getBatchMergeInputSide(ISegment seg) { return new IMRBatchMergeInputSide() { @Override public void addStepPhase1_MergeDictionary(DefaultChainedExecutable jobFlow) { // doing nothing } }; }
Example #19
Source File: HiveFlinkInput.java From kylin with Apache License 2.0 | 5 votes |
@Override public void addStepPhase4_Cleanup(DefaultChainedExecutable jobFlow) { final String jobWorkingDir = getJobWorkingDir(jobFlow, hdfsWorkingDir); GarbageCollectionStep step = new GarbageCollectionStep(); step.setName(ExecutableConstants.STEP_NAME_HIVE_CLEANUP); step.setIntermediateTables(Collections.singletonList(getIntermediateTableIdentity())); step.setExternalDataPaths(Collections.singletonList(JoinedFlatTable.getTableDir(flatDesc, jobWorkingDir))); step.setHiveViewIntermediateTableIdentities(StringUtil.join(hiveViewIntermediateTables, ",")); jobFlow.addTask(step); }
Example #20
Source File: DefaultSchedulerTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Test public void testSucceedAndFailed() throws Exception { logger.info("testSucceedAndFailed"); DefaultChainedExecutable job = new DefaultChainedExecutable(); BaseTestExecutable task1 = new SucceedTestExecutable(); BaseTestExecutable task2 = new FailedTestExecutable(); job.addTask(task1); job.addTask(task2); execMgr.addJob(job); waitForJobFinish(job.getId(), MAX_WAIT_TIME); Assert.assertEquals(ExecutableState.ERROR, execMgr.getOutput(job.getId()).getState()); Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(task1.getId()).getState()); Assert.assertEquals(ExecutableState.ERROR, execMgr.getOutput(task2.getId()).getState()); }
Example #21
Source File: HiveInputBase.java From kylin with Apache License 2.0 | 5 votes |
protected void addStepPhase1_DoMaterializeLookupTable(DefaultChainedExecutable jobFlow) { final String hiveInitStatements = JoinedFlatTable.generateHiveInitStatements(flatTableDatabase); final String jobWorkingDir = getJobWorkingDir(jobFlow, hdfsWorkingDir); AbstractExecutable task = createLookupHiveViewMaterializationStep(hiveInitStatements, jobWorkingDir, flatDesc, hiveViewIntermediateTables, jobFlow.getId()); if (task != null) { jobFlow.addTask(task); } }
Example #22
Source File: BuildCubeWithEngine.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private Boolean buildSegment(String cubeName, long startDate, long endDate, boolean isEmpty) throws Exception { CubeInstance cubeInstance = cubeManager.getCube(cubeName); CubeSegment segment = cubeManager.appendSegment(cubeInstance, new TSRange(0L, endDate)); DefaultChainedExecutable job = EngineFactory.createBatchCubingJob(segment, "TEST"); jobService.addJob(job); if (fastBuildMode) { jobSegmentMap.put(job.getId(), segment); jobCheckActionMap.put(job.getId(), isEmpty ? "checkEmptySegRangeInfo" : "checkNormalSegRangeInfo"); return true; } ExecutableState state = waitForJob(job.getId()); return Boolean.valueOf(ExecutableState.SUCCEED == state); }
Example #23
Source File: ITDistributedSchedulerTakeOverTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Test public void testSchedulerTakeOver() throws Exception { if (!lock(jobLock1, jobId2)) { throw new JobException("fail to get the lock"); } DefaultChainedExecutable job = new DefaultChainedExecutable(); job.setId(jobId2); AbstractExecutable task1 = new SucceedTestExecutable(); AbstractExecutable task2 = new SucceedTestExecutable(); AbstractExecutable task3 = new SucceedTestExecutable(); job.addTask(task1); job.addTask(task2); job.addTask(task3); execMgr.addJob(job); waitForJobStatus(job.getId(), ExecutableState.RUNNING, 500); scheduler1.shutdown(); scheduler1 = null; waitForJobFinish(job.getId()); Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(task1.getId()).getState()); Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(task2.getId()).getState()); Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(task3.getId()).getState()); Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(job.getId()).getState()); }
Example #24
Source File: Coordinator.java From kylin with Apache License 2.0 | 5 votes |
private boolean triggerSegmentBuild(String cubeName, String segmentName) { CubeManager cubeManager = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()); CubeInstance cubeInstance = cubeManager.getCube(cubeName); try { Pair<Long, Long> segmentRange = CubeSegment.parseSegmentName(segmentName); logger.info("submit streaming segment build, cube:{} segment:{}", cubeName, segmentName); CubeSegment newSeg = getCubeManager().appendSegment(cubeInstance, new TSRange(segmentRange.getFirst(), segmentRange.getSecond())); DefaultChainedExecutable executable = new StreamingCubingEngine().createStreamingCubingJob(newSeg, "SYSTEM"); getExecutableManager().addJob(executable); CubingJob cubingJob = (CubingJob) executable; newSeg.setLastBuildJobID(cubingJob.getId()); SegmentJobBuildInfo segmentJobBuildInfo = new SegmentJobBuildInfo(cubeName, segmentName, cubingJob.getId()); jobStatusChecker.addSegmentBuildJob(segmentJobBuildInfo); SegmentBuildState.BuildState state = new SegmentBuildState.BuildState(); state.setBuildStartTime(System.currentTimeMillis()); state.setState(SegmentBuildState.BuildState.State.BUILDING); state.setJobId(cubingJob.getId()); streamMetadataStore.updateSegmentBuildState(cubeName, segmentName, state); return true; } catch (Exception e) { logger.error("streaming job submit fail, cubeName:" + cubeName + " segment:" + segmentName, e); return false; } }
Example #25
Source File: DefaultSchedulerTest.java From Kylin with Apache License 2.0 | 5 votes |
@Test public void testSucceedAndError() throws Exception { DefaultChainedExecutable job = new DefaultChainedExecutable(); BaseTestExecutable task1 = new ErrorTestExecutable(); BaseTestExecutable task2 = new SucceedTestExecutable(); job.addTask(task1); job.addTask(task2); jobService.addJob(job); waitForJobFinish(job.getId()); assertEquals(ExecutableState.ERROR, jobService.getOutput(job.getId()).getState()); assertEquals(ExecutableState.ERROR, jobService.getOutput(task1.getId()).getState()); assertEquals(ExecutableState.READY, jobService.getOutput(task2.getId()).getState()); }
Example #26
Source File: JdbcHiveMRInput.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Override public IBatchMergeInputSide getBatchMergeInputSide(ISegment seg) { return new IMRBatchMergeInputSide() { @Override public void addStepPhase1_MergeDictionary(DefaultChainedExecutable jobFlow) { // doing nothing } }; }
Example #27
Source File: ExecutableManager.java From Kylin with Apache License 2.0 | 5 votes |
public void discardJob(String jobId) { AbstractExecutable job = getJob(jobId); if (job instanceof DefaultChainedExecutable) { List<AbstractExecutable> tasks = ((DefaultChainedExecutable) job).getTasks(); for (AbstractExecutable task : tasks) { if (!task.getStatus().isFinalState()) { updateJobOutput(task.getId(), ExecutableState.DISCARDED, null, null); } } } updateJobOutput(jobId, ExecutableState.DISCARDED, null, null); }
Example #28
Source File: StreamingCubingJobBuilder.java From kylin with Apache License 2.0 | 5 votes |
private SaveDictStep createSaveDictStep(String jobId, DefaultChainedExecutable jobFlow) { SaveDictStep result = new SaveDictStep(); final String cubeName = CubingExecutableUtil.getCubeName(jobFlow.getParams()); result.setName(ExecutableConstants.STEP_NAME_STREAMING_SAVE_DICTS); CubingExecutableUtil.setCubeName(seg.getRealization().getName(), result.getParams()); CubingExecutableUtil.setSegmentId(seg.getUuid(), result.getParams()); CubingExecutableUtil.setDictsPath(getDictPath(jobId), result.getParams()); CubingExecutableUtil.setCubingJobId(jobId, result.getParams()); result.setIsNeedReleaseLock(true); result.setJobFlowJobId(jobFlow.getId()); result.setLockPathName(cubeName); return result; }
Example #29
Source File: DefaultSchedulerTest.java From Kylin with Apache License 2.0 | 5 votes |
@Test public void testSucceed() throws Exception { DefaultChainedExecutable job = new DefaultChainedExecutable(); BaseTestExecutable task1 = new SucceedTestExecutable(); BaseTestExecutable task2 = new SucceedTestExecutable(); job.addTask(task1); job.addTask(task2); jobService.addJob(job); waitForJobFinish(job.getId()); assertEquals(ExecutableState.SUCCEED, jobService.getOutput(job.getId()).getState()); assertEquals(ExecutableState.SUCCEED, jobService.getOutput(task1.getId()).getState()); assertEquals(ExecutableState.SUCCEED, jobService.getOutput(task2.getId()).getState()); }
Example #30
Source File: DefaultSchedulerTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Test public void testDiscard() throws Exception { logger.info("testDiscard"); DefaultChainedExecutable job = new DefaultChainedExecutable(); SelfStopExecutable task1 = new SelfStopExecutable(); job.addTask(task1); execMgr.addJob(job); Thread.sleep(1100); // give time to launch job/task1 waitForJobStatus(job.getId(), ExecutableState.RUNNING, 500); execMgr.discardJob(job.getId()); waitForJobFinish(job.getId(), MAX_WAIT_TIME); Assert.assertEquals(ExecutableState.DISCARDED, execMgr.getOutput(job.getId()).getState()); Assert.assertEquals(ExecutableState.DISCARDED, execMgr.getOutput(task1.getId()).getState()); task1.waitForDoWork(); }