org.apache.kylin.storage.StorageContext Java Examples
The following examples show how to use
org.apache.kylin.storage.StorageContext.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ObserverEnabler.java From Kylin with Apache License 2.0 | 6 votes |
public static ResultScanner scanWithCoprocessorIfBeneficial(CubeSegment segment, Cuboid cuboid, TupleFilter tupleFiler, // Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context, HTableInterface table, Scan scan) throws IOException { if (context.isCoprocessorEnabled() == false) { return table.getScanner(scan); } CoprocessorRowType type = CoprocessorRowType.fromCuboid(segment, cuboid); CoprocessorFilter filter = CoprocessorFilter.fromFilter(segment, tupleFiler); CoprocessorProjector projector = CoprocessorProjector.makeForObserver(segment, cuboid, groupBy); ObserverAggregators aggrs = ObserverAggregators.fromValueDecoders(rowValueDecoders); if (DEBUG_LOCAL_COPROCESSOR) { RegionScanner innerScanner = new RegionScannerAdapter(table.getScanner(scan)); AggregationScanner aggrScanner = new AggregationScanner(type, filter, projector, aggrs, innerScanner); return new ResultScannerAdapter(aggrScanner); } else { scan.setAttribute(AggregateRegionObserver.COPROCESSOR_ENABLE, new byte[] { 0x01 }); scan.setAttribute(AggregateRegionObserver.TYPE, CoprocessorRowType.serialize(type)); scan.setAttribute(AggregateRegionObserver.PROJECTOR, CoprocessorProjector.serialize(projector)); scan.setAttribute(AggregateRegionObserver.AGGREGATORS, ObserverAggregators.serialize(aggrs)); scan.setAttribute(AggregateRegionObserver.FILTER, CoprocessorFilter.serialize(filter)); return table.getScanner(scan); } }
Example #2
Source File: GTCubeStorageQueryBase.java From kylin with Apache License 2.0 | 6 votes |
private void enableStreamAggregateIfBeneficial(Cuboid cuboid, Set<TblColRef> groupsD, StorageContext context) { CubeDesc cubeDesc = cuboid.getCubeDesc(); boolean enabled = cubeDesc.getConfig().isStreamAggregateEnabled(); Set<TblColRef> shardByInGroups = Sets.newHashSet(); for (TblColRef col : cubeDesc.getShardByColumns()) { if (groupsD.contains(col)) { shardByInGroups.add(col); } } if (!shardByInGroups.isEmpty()) { enabled = false; logger.debug("Aggregate partition results is not beneficial because shard by columns in groupD: {}", shardByInGroups); } if (!context.isNeedStorageAggregation()) { enabled = false; logger.debug("Aggregate partition results is not beneficial because no storage aggregation"); } if (enabled) { context.enableStreamAggregate(); } }
Example #3
Source File: GTCubeStorageQueryBase.java From kylin with Apache License 2.0 | 6 votes |
@Override public ITupleIterator search(StorageContext context, SQLDigest sqlDigest, TupleInfo returnTupleInfo) { GTCubeStorageQueryRequest request = getStorageQueryRequest(context, sqlDigest, returnTupleInfo); List<CubeSegmentScanner> scanners = Lists.newArrayList(); SegmentPruner segPruner = new SegmentPruner(sqlDigest.filter); for (CubeSegment cubeSeg : segPruner.listSegmentsForQuery(cubeInstance)) { CubeSegmentScanner scanner = new CubeSegmentScanner(cubeSeg, request.getCuboid(), request.getDimensions(), // request.getGroups(), request.getDynGroups(), request.getDynGroupExprs(), // request.getMetrics(), request.getDynFuncs(), // request.getFilter(), request.getHavingFilter(), request.getContext()); if (!scanner.isSegmentSkipped()) scanners.add(scanner); } if (scanners.isEmpty()) return ITupleIterator.EMPTY_TUPLE_ITERATOR; return new SequentialCubeTupleIterator(scanners, request.getCuboid(), request.getDimensions(), request.getDynGroups(), request.getGroups(), request.getMetrics(), returnTupleInfo, request.getContext(), sqlDigest); }
Example #4
Source File: SequentialCubeTupleIterator.java From kylin with Apache License 2.0 | 6 votes |
public SequentialCubeTupleIterator(List<CubeSegmentScanner> scanners, Cuboid cuboid, Set<TblColRef> selectedDimensions, List<TblColRef> rtGroups, Set<TblColRef> groups, // Set<FunctionDesc> selectedMetrics, TupleInfo returnTupleInfo, StorageContext context, SQLDigest sqlDigest) { this.context = context; this.scanners = scanners; Set<TblColRef> selectedDims = Sets.newHashSet(selectedDimensions); selectedDims.addAll(rtGroups); segmentCubeTupleIterators = Lists.newArrayList(); for (CubeSegmentScanner scanner : scanners) { segmentCubeTupleIterators.add(new SegmentCubeTupleIterator(scanner, cuboid, selectedDims, selectedMetrics, returnTupleInfo, context)); } if (context.mergeSortPartitionResults() && !sqlDigest.isRawQuery) { //query with limit logger.info("Using SortedIteratorMergerWithLimit to merge segment results"); Iterator<Iterator<ITuple>> transformed = (Iterator<Iterator<ITuple>>) (Iterator<?>) segmentCubeTupleIterators.iterator(); tupleIterator = new SortedIteratorMergerWithLimit<ITuple>(transformed, context.getFinalPushDownLimit(), getTupleDimensionComparator(cuboid, groups, returnTupleInfo)).getIterator(); } else { //normal case logger.info("Using Iterators.concat to merge segment results"); tupleIterator = Iterators.concat(segmentCubeTupleIterators.iterator()); } }
Example #5
Source File: SegmentCubeTupleIterator.java From kylin with Apache License 2.0 | 6 votes |
public SegmentCubeTupleIterator(CubeSegmentScanner scanner, Cuboid cuboid, Set<TblColRef> selectedDimensions, // Set<FunctionDesc> selectedMetrics, TupleInfo returnTupleInfo, StorageContext context) { this.scanner = scanner; this.cuboid = cuboid; this.selectedDimensions = selectedDimensions; this.selectedMetrics = selectedMetrics; this.tupleInfo = returnTupleInfo; this.tuple = new Tuple(returnTupleInfo); this.context = context; CuboidToGridTableMapping mapping = context.getMapping(); int[] gtDimsIdx = mapping.getDimIndexes(selectedDimensions); int[] gtMetricsIdx = mapping.getMetricsIndexes(selectedMetrics); // gtColIdx = gtDimsIdx + gtMetricsIdx int[] gtColIdx = new int[gtDimsIdx.length + gtMetricsIdx.length]; System.arraycopy(gtDimsIdx, 0, gtColIdx, 0, gtDimsIdx.length); System.arraycopy(gtMetricsIdx, 0, gtColIdx, gtDimsIdx.length, gtMetricsIdx.length); this.gtValues = getGTValuesIterator(scanner.iterator(), scanner.getScanRequest(), gtDimsIdx, gtMetricsIdx); this.cubeTupleConverter = ((GTCubeStorageQueryBase) context.getStorageQuery()).newCubeTupleConverter( scanner.cubeSeg, cuboid, selectedDimensions, selectedMetrics, gtColIdx, tupleInfo); }
Example #6
Source File: GTCubeStorageQueryRequest.java From kylin with Apache License 2.0 | 6 votes |
public GTCubeStorageQueryRequest(Cuboid cuboid, Set<TblColRef> dimensions, // Set<TblColRef> groups, List<TblColRef> dynGroups, List<TupleExpression> dynGroupExprs, // Set<TblColRef> filterCols, Set<FunctionDesc> metrics, List<DynamicFunctionDesc> dynFuncs, // TupleFilter filter, TupleFilter havingFilter, StorageContext context) { this.cuboid = cuboid; this.dimensions = dimensions; this.groups = groups; this.dynGroups = dynGroups; this.dynGroupExprs = dynGroupExprs; this.filterCols = filterCols; this.metrics = metrics; this.dynFuncs = dynFuncs; this.filter = filter; this.havingFilter = havingFilter; this.context = context; }
Example #7
Source File: ITStorageTest.java From kylin with Apache License 2.0 | 6 votes |
private int search(List<TblColRef> groups, List<FunctionDesc> aggregations, TupleFilter filter, StorageContext context) { int count = 0; ITupleIterator iterator = null; try { SQLDigest sqlDigest = new SQLDigest("default.test_kylin_fact", /*allCol*/ Collections.<TblColRef> emptySet(), /*join*/ null, // groups, /*subqueryJoinParticipants*/ Sets.<TblColRef> newHashSet(), // /*dynamicGroupByColumns*/ Collections.<TblColRef, TupleExpression> emptyMap(), // /*groupByExpression*/ false, // /*metricCol*/ Collections.<TblColRef> emptySet(), aggregations, /*aggrSqlCalls*/ Collections.<SQLCall> emptyList(), // /*dynamicAggregations*/ Collections.<DynamicFunctionDesc> emptyList(), // /*runtimeDimensionColumns*/ Collections.<TblColRef> emptySet(), // /*runtimeMetricColumns*/ Collections.<TblColRef> emptySet(), // /*filter col*/ Collections.<TblColRef> emptySet(), filter, null, // /*sortCol*/ new ArrayList<TblColRef>(), new ArrayList<SQLDigest.OrderEnum>(), false, false, false, new HashSet<MeasureDesc>()); iterator = storageEngine.search(context, sqlDigest, mockup.newTupleInfo(groups, aggregations)); while (iterator.hasNext()) { ITuple tuple = iterator.next(); System.out.println("Tuple = " + tuple); count++; } } finally { if (iterator != null) iterator.close(); } return count; }
Example #8
Source File: OLAPContext.java From kylin with Apache License 2.0 | 6 votes |
public OLAPContext(int seq) { this.id = seq; this.storageContext = new StorageContext(seq); this.sortColumns = Lists.newArrayList(); this.sortOrders = Lists.newArrayList(); Map<String, String> parameters = _localPrarameters.get(); if (parameters != null) { String acceptPartialResult = parameters.get(PRM_ACCEPT_PARTIAL_RESULT); if (acceptPartialResult != null) { this.storageContext.setAcceptPartialResult(Boolean.parseBoolean(acceptPartialResult)); } String acceptUserInfo = parameters.get(PRM_USER_AUTHEN_INFO); if (null != acceptUserInfo) this.olapAuthen.parseUserInfo(acceptUserInfo); } }
Example #9
Source File: OLAPSortRel.java From Kylin with Apache License 2.0 | 6 votes |
@Override public void implementRewrite(RewriteImplementor implementor) { implementor.visitChild(this, getChild()); for (RelFieldCollation fieldCollation : this.collation.getFieldCollations()) { int index = fieldCollation.getFieldIndex(); StorageContext.OrderEnum order = getOrderEnum(fieldCollation.getDirection()); OLAPRel olapChild = (OLAPRel) this.getChild(); TblColRef orderCol = olapChild.getColumnRowType().getAllColumns().get(index); MeasureDesc measure = findMeasure(orderCol); if (measure != null) { this.context.storageContext.addSort(measure, order); } this.context.storageContext.markSort(); } this.rowType = this.deriveRowType(); this.columnRowType = buildColumnRowType(); }
Example #10
Source File: CubeSegmentTupleIterator.java From Kylin with Apache License 2.0 | 6 votes |
public CubeSegmentTupleIterator(CubeSegment cubeSeg, Collection<HBaseKeyRange> keyRanges, HConnection conn, Collection<TblColRef> dimensions, TupleFilter filter, Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context) { this.cube = cubeSeg.getCubeInstance(); this.cubeSeg = cubeSeg; this.dimensions = dimensions; this.filter = filter; this.groupBy = groupBy; this.rowValueDecoders = rowValueDecoders; this.context = context; this.tableName = cubeSeg.getStorageLocationIdentifier(); this.rowKeyDecoder = new RowKeyDecoder(this.cubeSeg); this.scanCount = 0; try { this.table = conn.getTable(tableName); } catch (Throwable t) { throw new StorageException("Error when open connection to table " + tableName, t); } this.rangeIterator = keyRanges.iterator(); scanNextRange(); }
Example #11
Source File: GTCubeStorageQueryRequest.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
public GTCubeStorageQueryRequest(Cuboid cuboid, Set<TblColRef> dimensions, // Set<TblColRef> groups, List<TblColRef> dynGroups, List<TupleExpression> dynGroupExprs, // Set<TblColRef> filterCols, Set<FunctionDesc> metrics, List<DynamicFunctionDesc> dynFuncs, // TupleFilter filter, TupleFilter havingFilter, StorageContext context) { this.cuboid = cuboid; this.dimensions = dimensions; this.groups = groups; this.dynGroups = dynGroups; this.dynGroupExprs = dynGroupExprs; this.filterCols = filterCols; this.metrics = metrics; this.dynFuncs = dynFuncs; this.filter = filter; this.havingFilter = havingFilter; this.context = context; }
Example #12
Source File: SerializedHBaseTupleIterator.java From Kylin with Apache License 2.0 | 6 votes |
public SerializedHBaseTupleIterator(HConnection conn, List<HBaseKeyRange> segmentKeyRanges, CubeInstance cube, Collection<TblColRef> dimensions, TupleFilter filter, Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context) { this.context = context; int limit = context.getLimit(); this.partialResultLimit = Math.max(limit, PARTIAL_DEFAULT_LIMIT); this.segmentIteratorList = new ArrayList<CubeSegmentTupleIterator>(segmentKeyRanges.size()); Map<CubeSegment, List<HBaseKeyRange>> rangesMap = makeRangesMap(segmentKeyRanges); for (Map.Entry<CubeSegment, List<HBaseKeyRange>> entry : rangesMap.entrySet()) { CubeSegmentTupleIterator segIter = new CubeSegmentTupleIterator(entry.getKey(), entry.getValue(), conn, dimensions, filter, groupBy, rowValueDecoders, context); this.segmentIteratorList.add(segIter); } this.segmentIteratorIterator = this.segmentIteratorList.iterator(); if (this.segmentIteratorIterator.hasNext()) { this.segmentIterator = this.segmentIteratorIterator.next(); } else { this.segmentIterator = ITupleIterator.EMPTY_TUPLE_ITERATOR; } }
Example #13
Source File: GTCubeStorageQueryBase.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override public ITupleIterator search(StorageContext context, SQLDigest sqlDigest, TupleInfo returnTupleInfo) { GTCubeStorageQueryRequest request = getStorageQueryRequest(context, sqlDigest, returnTupleInfo); List<CubeSegmentScanner> scanners = Lists.newArrayList(); SegmentPruner segPruner = new SegmentPruner(sqlDigest.filter); for (CubeSegment cubeSeg : segPruner.listSegmentsForQuery(cubeInstance)) { CubeSegmentScanner scanner = new CubeSegmentScanner(cubeSeg, request.getCuboid(), request.getDimensions(), // request.getGroups(), request.getDynGroups(), request.getDynGroupExprs(), // request.getMetrics(), request.getDynFuncs(), // request.getFilter(), request.getHavingFilter(), request.getContext()); if (!scanner.isSegmentSkipped()) scanners.add(scanner); } if (scanners.isEmpty()) return ITupleIterator.EMPTY_TUPLE_ITERATOR; return new SequentialCubeTupleIterator(scanners, request.getCuboid(), request.getDimensions(), request.getDynGroups(), request.getGroups(), request.getMetrics(), returnTupleInfo, request.getContext(), sqlDigest); }
Example #14
Source File: OLAPContext.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
public OLAPContext(int seq) { this.id = seq; this.storageContext = new StorageContext(seq); this.sortColumns = Lists.newArrayList(); this.sortOrders = Lists.newArrayList(); Map<String, String> parameters = _localPrarameters.get(); if (parameters != null) { String acceptPartialResult = parameters.get(PRM_ACCEPT_PARTIAL_RESULT); if (acceptPartialResult != null) { this.storageContext.setAcceptPartialResult(Boolean.parseBoolean(acceptPartialResult)); } String acceptUserInfo = parameters.get(PRM_USER_AUTHEN_INFO); if (null != acceptUserInfo) this.olapAuthen.parseUserInfo(acceptUserInfo); } }
Example #15
Source File: StorageTest.java From Kylin with Apache License 2.0 | 6 votes |
private int search(List<TblColRef> groups, List<FunctionDesc> aggregations, TupleFilter filter, StorageContext context) { int count = 0; ITupleIterator iterator = null; try { SQLDigest sqlDigest = new SQLDigest("default.test_kylin_fact", filter, null, Collections.<TblColRef> emptySet(), groups, Collections.<TblColRef> emptySet(), Collections.<TblColRef> emptySet(), aggregations); iterator = storageEngine.search(context, sqlDigest); while (iterator.hasNext()) { ITuple tuple = iterator.next(); System.out.println("Tuple = " + tuple); count++; } } catch (Exception e) { e.printStackTrace(); } finally { if (iterator != null) { iterator.close(); } } return count; }
Example #16
Source File: ITStorageTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private int search(List<TblColRef> groups, List<FunctionDesc> aggregations, TupleFilter filter, StorageContext context) { int count = 0; ITupleIterator iterator = null; try { SQLDigest sqlDigest = new SQLDigest("default.test_kylin_fact", /*allCol*/ Collections.<TblColRef> emptySet(), /*join*/ null, // groups, /*subqueryJoinParticipants*/ Sets.<TblColRef> newHashSet(), // /*dynamicGroupByColumns*/ Collections.<TblColRef, TupleExpression> emptyMap(), // /*groupByExpression*/ false, // /*metricCol*/ Collections.<TblColRef> emptySet(), aggregations, /*aggrSqlCalls*/ Collections.<SQLCall> emptyList(), // /*dynamicAggregations*/ Collections.<DynamicFunctionDesc> emptyList(), // /*runtimeDimensionColumns*/ Collections.<TblColRef> emptySet(), // /*runtimeMetricColumns*/ Collections.<TblColRef> emptySet(), // /*filter col*/ Collections.<TblColRef> emptySet(), filter, null, // /*sortCol*/ new ArrayList<TblColRef>(), new ArrayList<SQLDigest.OrderEnum>(), false, false, false, new HashSet<MeasureDesc>()); iterator = storageEngine.search(context, sqlDigest, mockup.newTupleInfo(groups, aggregations)); while (iterator.hasNext()) { ITuple tuple = iterator.next(); System.out.println("Tuple = " + tuple); count++; } } finally { if (iterator != null) iterator.close(); } return count; }
Example #17
Source File: SegmentCubeTupleIterator.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
public SegmentCubeTupleIterator(CubeSegmentScanner scanner, Cuboid cuboid, Set<TblColRef> selectedDimensions, // Set<FunctionDesc> selectedMetrics, TupleInfo returnTupleInfo, StorageContext context) { this.scanner = scanner; this.cuboid = cuboid; this.selectedDimensions = selectedDimensions; this.selectedMetrics = selectedMetrics; this.tupleInfo = returnTupleInfo; this.tuple = new Tuple(returnTupleInfo); this.context = context; CuboidToGridTableMapping mapping = context.getMapping(); int[] gtDimsIdx = mapping.getDimIndexes(selectedDimensions); int[] gtMetricsIdx = mapping.getMetricsIndexes(selectedMetrics); // gtColIdx = gtDimsIdx + gtMetricsIdx int[] gtColIdx = new int[gtDimsIdx.length + gtMetricsIdx.length]; System.arraycopy(gtDimsIdx, 0, gtColIdx, 0, gtDimsIdx.length); System.arraycopy(gtMetricsIdx, 0, gtColIdx, gtDimsIdx.length, gtMetricsIdx.length); this.gtValues = getGTValuesIterator(scanner.iterator(), scanner.getScanRequest(), gtDimsIdx, gtMetricsIdx); this.cubeTupleConverter = ((GTCubeStorageQueryBase) context.getStorageQuery()).newCubeTupleConverter( scanner.cubeSeg, cuboid, selectedDimensions, selectedMetrics, gtColIdx, tupleInfo); }
Example #18
Source File: SequentialCubeTupleIterator.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
public SequentialCubeTupleIterator(List<CubeSegmentScanner> scanners, Cuboid cuboid, Set<TblColRef> selectedDimensions, List<TblColRef> rtGroups, Set<TblColRef> groups, // Set<FunctionDesc> selectedMetrics, TupleInfo returnTupleInfo, StorageContext context, SQLDigest sqlDigest) { this.context = context; this.scanners = scanners; Set<TblColRef> selectedDims = Sets.newHashSet(selectedDimensions); selectedDims.addAll(rtGroups); segmentCubeTupleIterators = Lists.newArrayList(); for (CubeSegmentScanner scanner : scanners) { segmentCubeTupleIterators.add(new SegmentCubeTupleIterator(scanner, cuboid, selectedDims, selectedMetrics, returnTupleInfo, context)); } if (context.mergeSortPartitionResults() && !sqlDigest.isRawQuery) { //query with limit logger.info("Using SortedIteratorMergerWithLimit to merge segment results"); Iterator<Iterator<ITuple>> transformed = (Iterator<Iterator<ITuple>>) (Iterator<?>) segmentCubeTupleIterators.iterator(); tupleIterator = new SortedIteratorMergerWithLimit<ITuple>(transformed, context.getFinalPushDownLimit(), getTupleDimensionComparator(cuboid, groups, returnTupleInfo)).getIterator(); } else { //normal case logger.info("Using Iterators.concat to merge segment results"); tupleIterator = Iterators.concat(segmentCubeTupleIterators.iterator()); } }
Example #19
Source File: CubeStorageEngine.java From Kylin with Apache License 2.0 | 6 votes |
private void setThreshold(Collection<TblColRef> dimensions, List<RowValueDecoder> valueDecoders, StorageContext context) { if (RowValueDecoder.hasMemHungryCountDistinct(valueDecoders) == false) { return; } int rowSizeEst = dimensions.size() * 3; for (RowValueDecoder decoder : valueDecoders) { MeasureDesc[] measures = decoder.getMeasures(); BitSet projectionIndex = decoder.getProjectionIndex(); for (int i = projectionIndex.nextSetBit(0); i >= 0; i = projectionIndex.nextSetBit(i + 1)) { FunctionDesc func = measures[i].getFunction(); rowSizeEst += func.getReturnDataType().getSpaceEstimate(); } } long rowEst = MEM_BUDGET_PER_QUERY / rowSizeEst; context.setThreshold((int) rowEst); }
Example #20
Source File: GTCubeStorageQueryBase.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private void enableStreamAggregateIfBeneficial(Cuboid cuboid, Set<TblColRef> groupsD, StorageContext context) { CubeDesc cubeDesc = cuboid.getCubeDesc(); boolean enabled = cubeDesc.getConfig().isStreamAggregateEnabled(); Set<TblColRef> shardByInGroups = Sets.newHashSet(); for (TblColRef col : cubeDesc.getShardByColumns()) { if (groupsD.contains(col)) { shardByInGroups.add(col); } } if (!shardByInGroups.isEmpty()) { enabled = false; logger.debug("Aggregate partition results is not beneficial because shard by columns in groupD: {}", shardByInGroups); } if (!context.isNeedStorageAggregation()) { enabled = false; logger.debug("Aggregate partition results is not beneficial because no storage aggregation"); } if (enabled) { context.enableStreamAggregate(); } }
Example #21
Source File: OLAPSortRel.java From Kylin with Apache License 2.0 | 5 votes |
private StorageContext.OrderEnum getOrderEnum(RelFieldCollation.Direction direction) { if (direction == RelFieldCollation.Direction.DESCENDING) { return StorageContext.OrderEnum.DESCENDING; } else { return StorageContext.OrderEnum.ASCENDING; } }
Example #22
Source File: CubeStorageEngine.java From Kylin with Apache License 2.0 | 5 votes |
private void setLimit(TupleFilter filter, StorageContext context) { boolean goodAggr = context.isExactAggregation(); boolean goodFilter = filter == null || (TupleFilter.isEvaluableRecursively(filter) && context.isCoprocessorEnabled()); boolean goodSort = context.hasSort() == false; if (goodAggr && goodFilter && goodSort) { logger.info("Enable limit " + context.getLimit()); context.enableLimit(); } }
Example #23
Source File: OLAPContext.java From Kylin with Apache License 2.0 | 5 votes |
public OLAPContext(int seq) { this.id = seq; this.storageContext = new StorageContext(); Map<String, String> parameters = _localPrarameters.get(); if (parameters != null) { String acceptPartialResult = parameters.get(PRM_ACCEPT_PARTIAL_RESULT); if (acceptPartialResult != null) { this.storageContext.setAcceptPartialResult(Boolean.parseBoolean(acceptPartialResult)); } } }
Example #24
Source File: CubeStorageEngine.java From Kylin with Apache License 2.0 | 5 votes |
private List<RowValueDecoder> translateAggregation(HBaseMappingDesc hbaseMapping, Collection<FunctionDesc> metrics, // StorageContext context) { Map<HBaseColumnDesc, RowValueDecoder> codecMap = Maps.newHashMap(); for (FunctionDesc aggrFunc : metrics) { Collection<HBaseColumnDesc> hbCols = hbaseMapping.findHBaseColumnByFunction(aggrFunc); if (hbCols.isEmpty()) { throw new IllegalStateException("can't find HBaseColumnDesc for function " + aggrFunc.getFullExpression()); } HBaseColumnDesc bestHBCol = null; int bestIndex = -1; for (HBaseColumnDesc hbCol : hbCols) { bestHBCol = hbCol; bestIndex = hbCol.findMeasureIndex(aggrFunc); MeasureDesc measure = hbCol.getMeasures()[bestIndex]; // criteria for holistic measure: Exact Aggregation && Exact Cuboid if (measure.isHolisticCountDistinct() && context.isExactAggregation()) { logger.info("Holistic count distinct chosen for " + aggrFunc); break; } } RowValueDecoder codec = codecMap.get(bestHBCol); if (codec == null) { codec = new RowValueDecoder(bestHBCol); codecMap.put(bestHBCol, codec); } codec.setIndex(bestIndex); } return new ArrayList<RowValueDecoder>(codecMap.values()); }
Example #25
Source File: CubeHBaseRPC.java From kylin with Apache License 2.0 | 5 votes |
public CubeHBaseRPC(ISegment segment, Cuboid cuboid, GTInfo fullGTInfo, StorageContext context) { Preconditions.checkArgument(segment instanceof CubeSegment, "segment must be CubeSegment"); this.cubeSeg = (CubeSegment) segment; this.cuboid = cuboid; this.fullGTInfo = fullGTInfo; this.queryContext = QueryContextFacade.current(); this.storageContext = context; this.fuzzyKeyEncoder = new FuzzyKeyEncoder(cubeSeg, cuboid); this.fuzzyMaskEncoder = new FuzzyMaskEncoder(cubeSeg, cuboid); }
Example #26
Source File: KylinQueryTimeoutTest.java From kylin with Apache License 2.0 | 5 votes |
@Override public ITupleIterator search(StorageContext context, SQLDigest sqlDigest, TupleInfo returnTupleInfo) { try { Thread.sleep(5 * 1000); } catch (InterruptedException e) { throw new KylinTimeoutException("Kylin query timeout"); } return null; }
Example #27
Source File: InvertedIndexStorageEngine.java From Kylin with Apache License 2.0 | 5 votes |
@Override public ITupleIterator search(StorageContext context, SQLDigest sqlDigest) { String tableName = seg.getStorageLocationIdentifier(); //HConnection is cached, so need not be closed HConnection conn = HBaseConnection.get(context.getConnUrl()); try { return new EndpointTupleIterator(seg, sqlDigest.filter, sqlDigest.groupbyColumns, new ArrayList<>(sqlDigest.aggregations), context, conn); } catch (Throwable e) { e.printStackTrace(); throw new IllegalStateException("Error when connecting to II htable " + tableName, e); } }
Example #28
Source File: StorageTest.java From Kylin with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { this.createTestMetadata(); CubeManager cubeMgr = CubeManager.getInstance(getTestConfig()); cube = cubeMgr.getCube("TEST_KYLIN_CUBE_WITHOUT_SLR_EMPTY"); Assert.assertNotNull(cube); storageEngine = StorageEngineFactory.getStorageEngine(cube); String url = KylinConfig.getInstanceFromEnv().getStorageUrl(); context = new StorageContext(); context.setConnUrl(url); }
Example #29
Source File: LocalStreamStorageQuery.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Override public ITupleIterator search(StorageContext context, SQLDigest sqlDigest, TupleInfo returnTupleInfo) { StreamingSegmentManager cubeDataStore = StreamingServer.getInstance().getStreamingSegmentManager( cubeInstance.getName()); boolean enableStreamProfile = BackdoorToggles.isStreamingProfileEnable(); StreamingQueryProfile queryProfile = new StreamingQueryProfile(QueryContextFacade.current().getQueryId(), System.currentTimeMillis()); if (enableStreamProfile) { queryProfile.enableDetailProfile(); } StreamingQueryProfile.set(queryProfile); GTCubeStorageQueryRequest request = getStorageQueryRequest(context, sqlDigest, returnTupleInfo); return cubeDataStore.getSearcher().search(returnTupleInfo, request.getFilter(), request.getHavingFilter(), request.getDimensions(), request.getGroups(), request.getMetrics(), context.isNeedStorageAggregation()); }
Example #30
Source File: ObserverEnabler.java From Kylin with Apache License 2.0 | 5 votes |
private static boolean isCoprocessorBeneficial(CubeInstance cube, Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context) { String forceFlag = System.getProperty(FORCE_COPROCESSOR); if (forceFlag != null) { return Boolean.parseBoolean(forceFlag); } Boolean cubeOverride = CUBE_OVERRIDES.get(cube.getName()); if (cubeOverride != null) { return cubeOverride.booleanValue(); } if (RowValueDecoder.hasMemHungryCountDistinct(rowValueDecoders)) { logger.info("Coprocessor is disabled because there is memory hungry count distinct"); return false; } if (context.isExactAggregation()) { logger.info("Coprocessor is disabled because exactAggregation is true"); return false; } Cuboid cuboid = context.getCuboid(); Set<TblColRef> toAggr = Sets.newHashSet(cuboid.getAggregationColumns()); toAggr.removeAll(groupBy); if (toAggr.isEmpty()) { logger.info("Coprocessor is disabled because no additional columns to aggregate"); return false; } logger.info("Coprocessor is enabled to aggregate " + toAggr + ", returning " + groupBy); return true; }