org.apache.kylin.cube.model.HBaseColumnFamilyDesc Java Examples
The following examples show how to use
org.apache.kylin.cube.model.HBaseColumnFamilyDesc.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CubeHFileMapper.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected void doSetup(Context context) throws IOException { super.bindCurrentConfiguration(context.getConfiguration()); cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME); KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata(); CubeManager cubeMgr = CubeManager.getInstance(config); cubeDesc = cubeMgr.getCube(cubeName).getDescriptor(); inputCodec = new MeasureCodec(cubeDesc.getMeasures()); inputMeasures = new Object[cubeDesc.getMeasures().size()]; keyValueCreators = Lists.newArrayList(); for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHbaseMapping().getColumnFamily()) { for (HBaseColumnDesc colDesc : cfDesc.getColumns()) { keyValueCreators.add(new KeyValueCreator(cubeDesc, colDesc)); } } }
Example #2
Source File: CubeHBaseRPC.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
/** * prune untouched hbase columns */ protected List<Pair<byte[], byte[]>> makeHBaseColumns(ImmutableBitSet selectedColBlocks) { List<Pair<byte[], byte[]>> result = Lists.newArrayList(); int colBlkIndex = 1; HBaseMappingDesc hbaseMapping = cubeSeg.getCubeDesc().getHbaseMapping(); for (HBaseColumnFamilyDesc familyDesc : hbaseMapping.getColumnFamily()) { byte[] byteFamily = Bytes.toBytes(familyDesc.getName()); for (HBaseColumnDesc hbaseColDesc : familyDesc.getColumns()) { if (selectedColBlocks.get(colBlkIndex)) { byte[] byteQualifier = Bytes.toBytes(hbaseColDesc.getQualifier()); result.add(Pair.newPair(byteFamily, byteQualifier)); } colBlkIndex++; } } return result; }
Example #3
Source File: CubeHBaseRPC.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
/** * for each selected hbase column, it might contain values of multiple GT columns. * The mapping should be passed down to storage */ protected List<List<Integer>> getHBaseColumnsGTMapping(ImmutableBitSet selectedColBlocks) { List<List<Integer>> ret = Lists.newArrayList(); int colBlkIndex = 1; int metricOffset = fullGTInfo.getPrimaryKey().trueBitCount(); HBaseMappingDesc hbaseMapping = cubeSeg.getCubeDesc().getHbaseMapping(); for (HBaseColumnFamilyDesc familyDesc : hbaseMapping.getColumnFamily()) { for (HBaseColumnDesc hbaseColDesc : familyDesc.getColumns()) { if (selectedColBlocks.get(colBlkIndex)) { int[] metricIndexes = hbaseColDesc.getMeasureIndex(); Integer[] gtIndexes = new Integer[metricIndexes.length]; for (int i = 0; i < gtIndexes.length; i++) { gtIndexes[i] = metricIndexes[i] + metricOffset; } ret.add(Arrays.asList(gtIndexes)); } colBlkIndex++; } } Preconditions.checkState(selectedColBlocks.trueBitCount() == ret.size() + 1); return ret; }
Example #4
Source File: CubeHFileMapper.java From kylin with Apache License 2.0 | 6 votes |
@Override protected void doSetup(Context context) throws IOException { super.bindCurrentConfiguration(context.getConfiguration()); cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME); KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata(); CubeManager cubeMgr = CubeManager.getInstance(config); cubeDesc = cubeMgr.getCube(cubeName).getDescriptor(); inputCodec = new MeasureCodec(cubeDesc.getMeasures()); inputMeasures = new Object[cubeDesc.getMeasures().size()]; keyValueCreators = Lists.newArrayList(); for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHbaseMapping().getColumnFamily()) { for (HBaseColumnDesc colDesc : cfDesc.getColumns()) { keyValueCreators.add(new KeyValueCreator(cubeDesc, colDesc)); } } }
Example #5
Source File: CubeHBaseRPC.java From kylin with Apache License 2.0 | 6 votes |
/** * prune untouched hbase columns */ protected List<Pair<byte[], byte[]>> makeHBaseColumns(ImmutableBitSet selectedColBlocks) { List<Pair<byte[], byte[]>> result = Lists.newArrayList(); int colBlkIndex = 1; HBaseMappingDesc hbaseMapping = cubeSeg.getCubeDesc().getHbaseMapping(); for (HBaseColumnFamilyDesc familyDesc : hbaseMapping.getColumnFamily()) { byte[] byteFamily = Bytes.toBytes(familyDesc.getName()); for (HBaseColumnDesc hbaseColDesc : familyDesc.getColumns()) { if (selectedColBlocks.get(colBlkIndex)) { byte[] byteQualifier = Bytes.toBytes(hbaseColDesc.getQualifier()); result.add(Pair.newPair(byteFamily, byteQualifier)); } colBlkIndex++; } } return result; }
Example #6
Source File: CubeHBaseRPC.java From kylin with Apache License 2.0 | 6 votes |
/** * for each selected hbase column, it might contain values of multiple GT columns. * The mapping should be passed down to storage */ protected List<List<Integer>> getHBaseColumnsGTMapping(ImmutableBitSet selectedColBlocks) { List<List<Integer>> ret = Lists.newArrayList(); int colBlkIndex = 1; int metricOffset = fullGTInfo.getPrimaryKey().trueBitCount(); HBaseMappingDesc hbaseMapping = cubeSeg.getCubeDesc().getHbaseMapping(); for (HBaseColumnFamilyDesc familyDesc : hbaseMapping.getColumnFamily()) { for (HBaseColumnDesc hbaseColDesc : familyDesc.getColumns()) { if (selectedColBlocks.get(colBlkIndex)) { int[] metricIndexes = hbaseColDesc.getMeasureIndex(); Integer[] gtIndexes = new Integer[metricIndexes.length]; for (int i = 0; i < gtIndexes.length; i++) { gtIndexes[i] = metricIndexes[i] + metricOffset; } ret.add(Arrays.asList(gtIndexes)); } colBlkIndex++; } } Preconditions.checkState(selectedColBlocks.trueBitCount() == ret.size() + 1); return ret; }
Example #7
Source File: CubeHFileMapper.java From Kylin with Apache License 2.0 | 6 votes |
@Override protected void setup(Context context) throws IOException { super.publishConfiguration(context.getConfiguration()); cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME); KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata(context.getConfiguration()); CubeManager cubeMgr = CubeManager.getInstance(config); cubeDesc = cubeMgr.getCube(cubeName).getDescriptor(); inputCodec = new MeasureCodec(cubeDesc.getMeasures()); inputMeasures = new Object[cubeDesc.getMeasures().size()]; keyValueCreators = Lists.newArrayList(); for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHBaseMapping().getColumnFamily()) { for (HBaseColumnDesc colDesc : cfDesc.getColumns()) { keyValueCreators.add(new KeyValueCreator(cubeDesc, colDesc)); } } }
Example #8
Source File: CubeDesc.java From Kylin with Apache License 2.0 | 6 votes |
private void initMeasureReferenceToColumnFamily() { if (measures == null || measures.size() == 0) return; Map<String, MeasureDesc> measureCache = new HashMap<String, MeasureDesc>(); for (MeasureDesc m : measures) measureCache.put(m.getName(), m); for (HBaseColumnFamilyDesc cf : getHBaseMapping().getColumnFamily()) { for (HBaseColumnDesc c : cf.getColumns()) { MeasureDesc[] measureDescs = new MeasureDesc[c.getMeasureRefs().length]; for (int i = 0; i < c.getMeasureRefs().length; i++) { measureDescs[i] = measureCache.get(c.getMeasureRefs()[i]); } c.setMeasures(measureDescs); c.setColumnFamilyName(cf.getName()); } } }
Example #9
Source File: CubeController.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private void validateColumnFamily(CubeDesc cubeDesc) { Set<String> columnFamilyMetricsSet = Sets.newHashSet(); for (HBaseColumnFamilyDesc hBaseColumnFamilyDesc : cubeDesc.getHbaseMapping().getColumnFamily()) { for (HBaseColumnDesc hBaseColumnDesc : hBaseColumnFamilyDesc.getColumns()) { for (String columnName : hBaseColumnDesc.getMeasureRefs()) { columnFamilyMetricsSet.add(columnName); } } } for (MeasureDesc measureDesc : cubeDesc.getMeasures()) { if (!columnFamilyMetricsSet.contains(measureDesc.getName())) { throw new BadRequestException("column family lack measure:" + measureDesc.getName()); } } if (cubeDesc.getMeasures().size() != columnFamilyMetricsSet.size()) { throw new BadRequestException( "the number of input measure and the number of measure defined in cubedesc are not consistent"); } for (RowKeyColDesc rowKeyColDesc : cubeDesc.getRowkey().getRowKeyColumns()) { Object[] encodingConf = DimensionEncoding.parseEncodingConf(rowKeyColDesc.getEncoding()); String encodingName = (String) encodingConf[0]; String[] encodingArgs = (String[]) encodingConf[1]; if (!DimensionEncodingFactory.isValidEncoding(encodingName, encodingArgs, rowKeyColDesc.getEncodingVersion())) { throw new BadRequestException("Illegal row key column desc: " + rowKeyColDesc); } } }
Example #10
Source File: CubeController.java From kylin with Apache License 2.0 | 5 votes |
private void validateColumnFamily(CubeDesc cubeDesc) { Set<String> columnFamilyMetricsSet = Sets.newHashSet(); for (HBaseColumnFamilyDesc hBaseColumnFamilyDesc : cubeDesc.getHbaseMapping().getColumnFamily()) { for (HBaseColumnDesc hBaseColumnDesc : hBaseColumnFamilyDesc.getColumns()) { for (String columnName : hBaseColumnDesc.getMeasureRefs()) { columnFamilyMetricsSet.add(columnName); } } } for (MeasureDesc measureDesc : cubeDesc.getMeasures()) { if (!columnFamilyMetricsSet.contains(measureDesc.getName())) { throw new BadRequestException("column family lack measure:" + measureDesc.getName()); } } if (cubeDesc.getMeasures().size() != columnFamilyMetricsSet.size()) { throw new BadRequestException( "the number of input measure and the number of measure defined in cubedesc are not consistent"); } for (RowKeyColDesc rowKeyColDesc : cubeDesc.getRowkey().getRowKeyColumns()) { Object[] encodingConf = DimensionEncoding.parseEncodingConf(rowKeyColDesc.getEncoding()); String encodingName = (String) encodingConf[0]; String[] encodingArgs = (String[]) encodingConf[1]; if (!DimensionEncodingFactory.isValidEncoding(encodingName, encodingArgs, rowKeyColDesc.getEncodingVersion())) { throw new BadRequestException("Illegal row key column desc: " + rowKeyColDesc); } } }
Example #11
Source File: CubeHTableUtil.java From kylin-on-parquet-v2 with Apache License 2.0 | 4 votes |
public static void createHTable(CubeSegment cubeSegment, byte[][] splitKeys) throws IOException { String tableName = cubeSegment.getStorageLocationIdentifier(); CubeInstance cubeInstance = cubeSegment.getCubeInstance(); CubeDesc cubeDesc = cubeInstance.getDescriptor(); KylinConfig kylinConfig = cubeDesc.getConfig(); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(cubeSegment.getStorageLocationIdentifier())); tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName()); tableDesc.setValue(IRealizationConstants.HTableTag, kylinConfig.getMetadataUrlPrefix()); tableDesc.setValue(IRealizationConstants.HTableCreationTime, String.valueOf(System.currentTimeMillis())); if (!StringUtils.isEmpty(kylinConfig.getKylinOwner())) { //HTableOwner is the team that provides kylin service tableDesc.setValue(IRealizationConstants.HTableOwner, kylinConfig.getKylinOwner()); } String commitInfo = KylinVersion.getGitCommitInfo(); if (!StringUtils.isEmpty(commitInfo)) { tableDesc.setValue(IRealizationConstants.HTableGitTag, commitInfo); } //HTableUser is the cube owner, which will be the "user" tableDesc.setValue(IRealizationConstants.HTableUser, cubeInstance.getOwner()); tableDesc.setValue(IRealizationConstants.HTableSegmentTag, cubeSegment.toString()); Configuration conf = HBaseConnection.getCurrentHBaseConfiguration(); Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl()); Admin admin = conn.getAdmin(); try { if (User.isHBaseSecurityEnabled(conf)) { // add coprocessor for bulk load tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint"); } for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHbaseMapping().getColumnFamily()) { HColumnDescriptor cf = createColumnFamily(kylinConfig, cfDesc.getName(), cfDesc.isMemoryHungry()); tableDesc.addFamily(cf); } if (admin.tableExists(TableName.valueOf(tableName))) { // admin.disableTable(tableName); // admin.deleteTable(tableName); throw new RuntimeException("HBase table " + tableName + " exists!"); } DeployCoprocessorCLI.deployCoprocessor(tableDesc); admin.createTable(tableDesc, splitKeys); Preconditions.checkArgument(admin.isTableAvailable(TableName.valueOf(tableName)), "table " + tableName + " created, but is not available due to some reasons"); logger.info("create hbase table " + tableName + " done."); } finally { IOUtils.closeQuietly(admin); } }
Example #12
Source File: CreateHTableJob.java From Kylin with Apache License 2.0 | 4 votes |
@Override public int run(String[] args) throws Exception { Options options = new Options(); options.addOption(OPTION_CUBE_NAME); options.addOption(OPTION_PARTITION_FILE_PATH); options.addOption(OPTION_HTABLE_NAME); parseOptions(options, args); Path partitionFilePath = new Path(getOptionValue(OPTION_PARTITION_FILE_PATH)); String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase(); KylinConfig config = KylinConfig.getInstanceFromEnv(); CubeManager cubeMgr = CubeManager.getInstance(config); CubeInstance cube = cubeMgr.getCube(cubeName); CubeDesc cubeDesc = cube.getDescriptor(); String tableName = getOptionValue(OPTION_HTABLE_NAME).toUpperCase(); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName)); // https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.html tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); tableDesc.setValue(IRealizationConstants.HTableTag, config.getMetadataUrlPrefix()); Configuration conf = HBaseConfiguration.create(getConf()); HBaseAdmin admin = new HBaseAdmin(conf); try { if (User.isHBaseSecurityEnabled(conf)) { // add coprocessor for bulk load tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint"); } for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHBaseMapping().getColumnFamily()) { HColumnDescriptor cf = new HColumnDescriptor(cfDesc.getName()); cf.setMaxVersions(1); if (LZOSupportnessChecker.getSupportness()) { logger.info("hbase will use lzo to compress data"); cf.setCompressionType(Algorithm.LZO); } else { logger.info("hbase will not use lzo to compress data"); } cf.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); cf.setInMemory(false); cf.setBlocksize(4 * 1024 * 1024); // set to 4MB tableDesc.addFamily(cf); } byte[][] splitKeys = getSplits(conf, partitionFilePath); if (admin.tableExists(tableName)) { // admin.disableTable(tableName); // admin.deleteTable(tableName); throw new RuntimeException("HBase table " + tableName + " exists!"); } DeployCoprocessorCLI.deployCoprocessor(tableDesc); admin.createTable(tableDesc, splitKeys); logger.info("create hbase table " + tableName + " done."); return 0; } catch (Exception e) { printUsage(options); e.printStackTrace(System.err); logger.error(e.getLocalizedMessage(), e); return 2; } finally { admin.close(); } }
Example #13
Source File: BulkLoadJob.java From Kylin with Apache License 2.0 | 4 votes |
@Override public int run(String[] args) throws Exception { Options options = new Options(); try { options.addOption(OPTION_INPUT_PATH); options.addOption(OPTION_HTABLE_NAME); options.addOption(OPTION_CUBE_NAME); parseOptions(options, args); String tableName = getOptionValue(OPTION_HTABLE_NAME).toUpperCase(); // e.g // /tmp/kylin-3f150b00-3332-41ca-9d3d-652f67f044d7/test_kylin_cube_with_slr_ready_2_segments/hfile/ // end with "/" String input = getOptionValue(OPTION_INPUT_PATH); Configuration conf = HBaseConfiguration.create(getConf()); FileSystem fs = FileSystem.get(conf); String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase(); KylinConfig config = KylinConfig.getInstanceFromEnv(); CubeManager cubeMgr = CubeManager.getInstance(config); CubeInstance cube = cubeMgr.getCube(cubeName); CubeDesc cubeDesc = cube.getDescriptor(); FsPermission permission = new FsPermission((short) 0777); for (HBaseColumnFamilyDesc cf : cubeDesc.getHBaseMapping().getColumnFamily()) { String cfName = cf.getName(); fs.setPermission(new Path(input + cfName), permission); } String[] newArgs = new String[2]; newArgs[0] = input; newArgs[1] = tableName; log.debug("Start to run LoadIncrementalHFiles"); int ret = ToolRunner.run(new LoadIncrementalHFiles(conf), newArgs); log.debug("End to run LoadIncrementalHFiles"); return ret; } catch (Exception e) { printUsage(options); throw e; } }