Java Code Examples for org.apache.hadoop.hbase.HColumnDescriptor#setBloomFilterType()
The following examples show how to use
org.apache.hadoop.hbase.HColumnDescriptor#setBloomFilterType() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseBasedAuditRepository.java From atlas with Apache License 2.0 | 6 votes |
private void createTableIfNotExists() throws AtlasException { Admin admin = null; try { admin = connection.getAdmin(); LOG.info("Checking if table {} exists", tableName.getNameAsString()); if (!admin.tableExists(tableName)) { LOG.info("Creating table {}", tableName.getNameAsString()); HTableDescriptor tableDescriptor = new HTableDescriptor(tableName); HColumnDescriptor columnFamily = new HColumnDescriptor(COLUMN_FAMILY); columnFamily.setMaxVersions(1); columnFamily.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); columnFamily.setCompressionType(Compression.Algorithm.GZ); columnFamily.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnFamily); admin.createTable(tableDescriptor); } else { LOG.info("Table {} exists", tableName.getNameAsString()); } } catch (IOException e) { throw new AtlasException(e); } finally { close(admin); } }
Example 2
Source File: HBaseBasedAuditRepository.java From incubator-atlas with Apache License 2.0 | 6 votes |
private void createTableIfNotExists() throws AtlasException { Admin admin = null; try { admin = connection.getAdmin(); LOG.info("Checking if table {} exists", tableName.getNameAsString()); if (!admin.tableExists(tableName)) { LOG.info("Creating table {}", tableName.getNameAsString()); HTableDescriptor tableDescriptor = new HTableDescriptor(tableName); HColumnDescriptor columnFamily = new HColumnDescriptor(COLUMN_FAMILY); columnFamily.setMaxVersions(1); columnFamily.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); columnFamily.setCompressionType(Compression.Algorithm.GZ); columnFamily.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnFamily); admin.createTable(tableDescriptor); } else { LOG.info("Table {} exists", tableName.getNameAsString()); } } catch (IOException e) { throw new AtlasException(e); } finally { close(admin); } }
Example 3
Source File: HBaseEntitySchemaManager.java From eagle with Apache License 2.0 | 6 votes |
private void createTable(EntityDefinition entityDefinition) throws IOException { String tableName = entityDefinition.getTable(); if (admin.tableExists(tableName)) { LOG.info("Table {} already exists", tableName); } else { HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName)); // Adding column families to table descriptor HColumnDescriptor columnDescriptor = new HColumnDescriptor(entityDefinition.getColumnFamily()); columnDescriptor.setBloomFilterType(BloomType.ROW); //columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setMaxVersions(DEFAULT_MAX_VERSIONS); tableDescriptor.addFamily(columnDescriptor); // Execute the table through admin admin.createTable(tableDescriptor); LOG.info("Successfully create Table {}", tableName); } }
Example 4
Source File: Create2.java From examples with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { Configuration conf = HBaseConfiguration.create(); HBaseAdmin admin = new HBaseAdmin(conf); // tag::CREATE2[] HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("pages")); byte[][] splits = {Bytes.toBytes("b"), Bytes.toBytes("f"), Bytes.toBytes("k"), Bytes.toBytes("n"), Bytes.toBytes("t")}; desc.setValue(Bytes.toBytes("comment"), Bytes.toBytes("Create 10012014")); HColumnDescriptor family = new HColumnDescriptor("c"); family.setCompressionType(Algorithm.GZ); family.setMaxVersions(52); family.setBloomFilterType(BloomType.ROW); desc.addFamily(family); admin.createTable(desc, splits); // end::CREATE2[] admin.close(); }
Example 5
Source File: CreateTable.java From examples with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { try (Connection connection = ConnectionFactory.createConnection(); Admin admin = connection.getAdmin();) { LOG.info("Starting table creation"); // tag::CREATE[] TableName documents = TableName.valueOf("documents"); HTableDescriptor desc = new HTableDescriptor(documents); HColumnDescriptor family = new HColumnDescriptor("c"); family.setCompressionType(Algorithm.GZ); family.setBloomFilterType(BloomType.NONE); desc.addFamily(family); UniformSplit uniformSplit = new UniformSplit(); admin.createTable(desc, uniformSplit.split(8)); // end::CREATE[] LOG.info("Table successfuly created"); } }
Example 6
Source File: Configure.java From learning-hadoop with Apache License 2.0 | 5 votes |
public static void configColumnFamily(HColumnDescriptor desc) { desc.setMaxVersions(1); // 设置使用的过滤器的类型--- // setBloomFilter:指定是否使用BloomFilter,可提高随机查询效率。默认关闭 desc.setBloomFilterType(BloomType.ROW); // 设定数据压缩类型。默认无压缩 desc.setCompressionType(COMPRESS_TYPE); }
Example 7
Source File: TableBuilder.java From learning-hadoop with Apache License 2.0 | 5 votes |
/** * @param args */ public static void main(String[] args) { Configuration conf = HBaseConfiguration.create(); byte[] columnFamily = Bytes.toBytes("f"); String tableName = "t"; try { ZKUtil.applyClusterKeyToConf(conf, "edh1:2181:/hbase"); HBaseAdmin hba = new HBaseAdmin(conf); if (hba.tableExists(tableName)) { hba.disableTable(tableName); hba.deleteTable(tableName); } HTableDescriptor tableDescriptor = new HTableDescriptor(tableName); HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamily); columnDescriptor.setMaxVersions(1); columnDescriptor.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnDescriptor); hba.createTable(tableDescriptor); hba.close(); } catch (IOException e) { e.printStackTrace(); } }
Example 8
Source File: HBaseCreateTable.java From SparkOnALog with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws IOException { if (args.length == 0) { System.out.println("CreateTable {tableName} {columnFamilyName}"); return; } String tableName = args[0]; String columnFamilyName = args[1]; HBaseAdmin admin = new HBaseAdmin(new Configuration()); HTableDescriptor tableDescriptor = new HTableDescriptor(); tableDescriptor.setName(Bytes.toBytes(tableName)); HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName); columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setBlocksize(64 * 1024); columnDescriptor.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnDescriptor); //tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); System.out.println("-Creating Table"); admin.createTable(tableDescriptor); admin.close(); System.out.println("-Done"); }
Example 9
Source File: App.java From hadoop-arch-book with Apache License 2.0 | 5 votes |
private static boolean createTable(byte[] tableName, byte[] columnFamilyName, short regionCount, long regionMaxSize, HBaseAdmin admin) throws IOException { if (admin.tableExists(tableName)) { return false; } HTableDescriptor tableDescriptor = new HTableDescriptor(); tableDescriptor.setName(tableName); HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName); columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setBlocksize(64 * 1024); columnDescriptor.setBloomFilterType(BloomType.ROW); columnDescriptor.setMaxVersions(10); tableDescriptor.addFamily(columnDescriptor); tableDescriptor.setMaxFileSize(regionMaxSize); tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); tableDescriptor.setDeferredLogFlush(true); regionCount = (short) Math.abs(regionCount); int regionRange = Short.MAX_VALUE / regionCount; int counter = 0; byte[][] splitKeys = new byte[regionCount][]; for (byte[] splitKey : splitKeys) { counter = counter + regionRange; String key = StringUtils.leftPad(Integer.toString(counter), 5, '0'); splitKey = Bytes.toBytes(key); System.out.println(" - Split: " + splitKey); } return true; }
Example 10
Source File: CreateTable.java From HBase-ToHDFS with Apache License 2.0 | 5 votes |
private static void createTable(String tableName, String columnFamilyName, short regionCount, long regionMaxSize, HBaseAdmin admin) throws IOException { System.out.println("Creating Table: " + tableName); HTableDescriptor tableDescriptor = new HTableDescriptor(); tableDescriptor.setName(Bytes.toBytes(tableName)); HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName); columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setBlocksize(64 * 1024); columnDescriptor.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnDescriptor); tableDescriptor.setMaxFileSize(regionMaxSize); tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); tableDescriptor.setDeferredLogFlush(true); regionCount = (short)Math.abs(regionCount); int regionRange = Short.MAX_VALUE/regionCount; int counter = 0; byte[][] splitKeys = new byte[regionCount][]; for (int i = 0 ; i < splitKeys.length; i++) { counter = counter + regionRange; String key = StringUtils.leftPad(Integer.toString(counter), 5, '0'); splitKeys[i] = Bytes.toBytes(key); System.out.println(" - Split: " + i + " '" + key + "'"); } admin.createTable(tableDescriptor, splitKeys); }
Example 11
Source File: HBaseCreateTable.java From Kafka-Spark-Hbase-Example with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws IOException { if (args.length == 0) { System.out.println("CreateTable {tableName} {columnFamilyName}"); return; } String tableName = args[0]; String columnFamilyName = args[1]; HBaseAdmin admin = new HBaseAdmin(new Configuration()); HTableDescriptor tableDescriptor = new HTableDescriptor(); tableDescriptor.setName(Bytes.toBytes(tableName)); HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName); columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setBlocksize(64 * 1024); columnDescriptor.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnDescriptor); //tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); System.out.println("-Creating Table"); admin.createTable(tableDescriptor); admin.close(); System.out.println("-Done"); }
Example 12
Source File: HBaseSITestEnv.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
private static HTableDescriptor generateTransactionTable() throws IOException{ HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("splice",HConfiguration.TRANSACTION_TABLE)); desc.addCoprocessor(TxnLifecycleEndpoint.class.getName()); HColumnDescriptor columnDescriptor = new HColumnDescriptor(SIConstants.DEFAULT_FAMILY_BYTES); columnDescriptor.setMaxVersions(5); columnDescriptor.setCompressionType(Compression.Algorithm.NONE); columnDescriptor.setInMemory(true); columnDescriptor.setBlockCacheEnabled(true); columnDescriptor.setBloomFilterType(BloomType.ROWCOL); desc.addFamily(columnDescriptor); desc.addFamily(new HColumnDescriptor(Bytes.toBytes(SIConstants.SI_PERMISSION_FAMILY))); return desc; }
Example 13
Source File: HBaseSITestEnv.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
public static HColumnDescriptor createDataFamily() { HColumnDescriptor snapshot = new HColumnDescriptor(SIConstants.DEFAULT_FAMILY_BYTES); snapshot.setMaxVersions(Integer.MAX_VALUE); snapshot.setCompressionType(Compression.Algorithm.NONE); snapshot.setInMemory(true); snapshot.setBlockCacheEnabled(true); snapshot.setBloomFilterType(BloomType.ROW); return snapshot; }
Example 14
Source File: CubeHTableUtil.java From kylin-on-parquet-v2 with Apache License 2.0 | 4 votes |
public static HColumnDescriptor createColumnFamily(KylinConfig kylinConfig, String cfName, boolean isMemoryHungry) { HColumnDescriptor cf = new HColumnDescriptor(cfName); cf.setMaxVersions(1); if (isMemoryHungry) { cf.setBlocksize(kylinConfig.getHbaseDefaultBlockSize()); } else { cf.setBlocksize(kylinConfig.getHbaseSmallFamilyBlockSize()); } String hbaseDefaultCC = kylinConfig.getHbaseDefaultCompressionCodec().toLowerCase(Locale.ROOT); switch (hbaseDefaultCC) { case "snappy": { logger.info("hbase will use snappy to compress data"); cf.setCompressionType(Algorithm.SNAPPY); break; } case "lzo": { logger.info("hbase will use lzo to compress data"); cf.setCompressionType(Algorithm.LZO); break; } case "gz": case "gzip": { logger.info("hbase will use gzip to compress data"); cf.setCompressionType(Algorithm.GZ); break; } case "lz4": { logger.info("hbase will use lz4 to compress data"); cf.setCompressionType(Algorithm.LZ4); break; } case "none": default: { logger.info("hbase will not use any compression algorithm to compress data"); cf.setCompressionType(Algorithm.NONE); } } try { String encodingStr = kylinConfig.getHbaseDefaultEncoding(); DataBlockEncoding encoding = DataBlockEncoding.valueOf(encodingStr); cf.setDataBlockEncoding(encoding); } catch (Exception e) { logger.info("hbase will not use any encoding", e); cf.setDataBlockEncoding(DataBlockEncoding.NONE); } cf.setInMemory(false); cf.setBloomFilterType(BloomType.NONE); cf.setScope(kylinConfig.getHBaseReplicationScope()); return cf; }
Example 15
Source File: CubeHTableUtil.java From kylin with Apache License 2.0 | 4 votes |
public static HColumnDescriptor createColumnFamily(KylinConfig kylinConfig, String cfName, boolean isMemoryHungry) { HColumnDescriptor cf = new HColumnDescriptor(cfName); cf.setMaxVersions(1); if (isMemoryHungry) { cf.setBlocksize(kylinConfig.getHbaseDefaultBlockSize()); } else { cf.setBlocksize(kylinConfig.getHbaseSmallFamilyBlockSize()); } String hbaseDefaultCC = kylinConfig.getHbaseDefaultCompressionCodec().toLowerCase(Locale.ROOT); switch (hbaseDefaultCC) { case "snappy": { logger.info("hbase will use snappy to compress data"); cf.setCompressionType(Algorithm.SNAPPY); break; } case "lzo": { logger.info("hbase will use lzo to compress data"); cf.setCompressionType(Algorithm.LZO); break; } case "gz": case "gzip": { logger.info("hbase will use gzip to compress data"); cf.setCompressionType(Algorithm.GZ); break; } case "lz4": { logger.info("hbase will use lz4 to compress data"); cf.setCompressionType(Algorithm.LZ4); break; } case "none": default: { logger.info("hbase will not use any compression algorithm to compress data"); cf.setCompressionType(Algorithm.NONE); } } try { String encodingStr = kylinConfig.getHbaseDefaultEncoding(); DataBlockEncoding encoding = DataBlockEncoding.valueOf(encodingStr); cf.setDataBlockEncoding(encoding); } catch (Exception e) { logger.info("hbase will not use any encoding", e); cf.setDataBlockEncoding(DataBlockEncoding.NONE); } cf.setInMemory(false); cf.setBloomFilterType(BloomType.NONE); cf.setScope(kylinConfig.getHBaseReplicationScope()); return cf; }
Example 16
Source File: TableCommand.java From pinpoint with Apache License 2.0 | 4 votes |
private HColumnDescriptor newColumnDescriptor(ColumnFamilyChange columnFamilyChange) { HColumnDescriptor hcd = new HColumnDescriptor(columnFamilyChange.getName()); ColumnFamilyConfiguration columnFamilyConfiguration = columnFamilyChange.getColumnFamilyConfiguration(); Boolean blockCacheEnabled = columnFamilyConfiguration.getBlockCacheEnabled(); if (blockCacheEnabled != null) { hcd.setBlockCacheEnabled(blockCacheEnabled); } Integer replicationScope = columnFamilyConfiguration.getReplicationScope(); if (replicationScope != null) { hcd.setScope(replicationScope); } Boolean inMemory = columnFamilyConfiguration.getInMemory(); if (inMemory != null) { hcd.setInMemory(inMemory); } Integer timeToLive = columnFamilyConfiguration.getTimeToLive(); if (timeToLive != null) { hcd.setTimeToLive(timeToLive); } ColumnFamilyConfiguration.DataBlockEncoding dataBlockEncoding = columnFamilyConfiguration.getDataBlockEncoding(); if (dataBlockEncoding != null) { hcd.setDataBlockEncoding(DataBlockEncoding.valueOf(dataBlockEncoding.name())); } Integer blockSize = columnFamilyConfiguration.getBlockSize(); if (blockSize != null) { hcd.setBlocksize(blockSize); } Integer maxVersions = columnFamilyConfiguration.getMaxVersions(); if (maxVersions != null) { hcd.setMaxVersions(maxVersions); } Integer minVersions = columnFamilyConfiguration.getMinVersions(); if (minVersions != null) { hcd.setMinVersions(minVersions); } ColumnFamilyConfiguration.BloomFilter bloomFilter = columnFamilyConfiguration.getBloomFilter(); if (bloomFilter != null) { hcd.setBloomFilterType(BloomType.valueOf(bloomFilter.name())); } if (compressionAlgorithm != Compression.Algorithm.NONE) { hcd.setCompressionType(compressionAlgorithm); } return hcd; }