Java Code Examples for org.apache.hadoop.hbase.io.compress.Compression#getCompressionAlgorithmByName()
The following examples show how to use
org.apache.hadoop.hbase.io.compress.Compression#getCompressionAlgorithmByName() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ThriftUtilities.java From hbase with Apache License 2.0 | 6 votes |
/** * This utility method creates a new Hbase HColumnDescriptor object based on a * Thrift ColumnDescriptor "struct". * * @param in Thrift ColumnDescriptor object * @return ModifyableColumnFamilyDescriptor * @throws IllegalArgument if the column name is empty */ static public ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor colDescFromThrift( ColumnDescriptor in) throws IllegalArgument { Compression.Algorithm comp = Compression.getCompressionAlgorithmByName(in.compression.toLowerCase(Locale.ROOT)); BloomType bt = BloomType.valueOf(in.bloomFilterType); if (in.name == null || !in.name.hasRemaining()) { throw new IllegalArgument("column name is empty"); } byte [] parsedName = CellUtil.parseColumn(Bytes.getBytes(in.name))[0]; ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor = new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(parsedName) .setMaxVersions(in.maxVersions) .setCompressionType(comp) .setInMemory(in.inMemory) .setBlockCacheEnabled(in.blockCacheEnabled) .setTimeToLive(in.timeToLive > 0 ? in.timeToLive : Integer.MAX_VALUE) .setBloomFilterType(bt); return familyDescriptor; }
Example 2
Source File: CompressionTest.java From hbase with Apache License 2.0 | 6 votes |
public static boolean testCompression(String codec) { codec = codec.toLowerCase(Locale.ROOT); Compression.Algorithm a; try { a = Compression.getCompressionAlgorithmByName(codec); } catch (IllegalArgumentException e) { LOG.warn("Codec type: " + codec + " is not known"); return false; } try { testCompression(a); return true; } catch (IOException ignored) { LOG.warn("Can't instantiate codec: " + codec, ignored); return false; } }
Example 3
Source File: HBaseTestingUtility.java From hbase with Apache License 2.0 | 5 votes |
/** * Get supported compression algorithms. * @return supported compression algorithms. */ public static Compression.Algorithm[] getSupportedCompressionAlgorithms() { String[] allAlgos = HFile.getSupportedCompressionAlgorithms(); List<Compression.Algorithm> supportedAlgos = new ArrayList<>(); for (String algoName : allAlgos) { try { Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName); algo.getCompressor(); supportedAlgos.add(algo); } catch (Throwable t) { // this algo is not available } } return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]); }
Example 4
Source File: DataBlockEncodingTool.java From hbase with Apache License 2.0 | 5 votes |
/** * @param compressionAlgorithmName What kind of algorithm should be used * as baseline for comparison (e.g. lzo, gz). */ public DataBlockEncodingTool(String compressionAlgorithmName) { this.compressionAlgorithmName = compressionAlgorithmName; this.compressionAlgorithm = Compression.getCompressionAlgorithmByName( compressionAlgorithmName); this.compressor = this.compressionAlgorithm.getCompressor(); this.decompressor = this.compressionAlgorithm.getDecompressor(); }
Example 5
Source File: HBaseConnectionFactory.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
public HTableDescriptor generateTransactionTable(){ HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(namespaceBytes, HConfiguration.TRANSACTION_TABLE_BYTES)); HColumnDescriptor columnDescriptor=new HColumnDescriptor(DEFAULT_FAMILY_BYTES); columnDescriptor.setMaxVersions(5); Compression.Algorithm compress=Compression.getCompressionAlgorithmByName(config.getCompressionAlgorithm()); columnDescriptor.setCompressionType(compress); columnDescriptor.setInMemory(HConfiguration.DEFAULT_IN_MEMORY); columnDescriptor.setBlockCacheEnabled(HConfiguration.DEFAULT_BLOCKCACHE); columnDescriptor.setBloomFilterType(BloomType.valueOf(HConfiguration.DEFAULT_BLOOMFILTER.toUpperCase())); columnDescriptor.setTimeToLive(HConfiguration.DEFAULT_TTL); desc.addFamily(columnDescriptor); desc.addFamily(new HColumnDescriptor(Bytes.toBytes(SI_PERMISSION_FAMILY))); return desc; }
Example 6
Source File: HBaseConnectionFactory.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
public HColumnDescriptor createDataFamily(){ HColumnDescriptor snapshot=new HColumnDescriptor(DEFAULT_FAMILY_BYTES); snapshot.setMaxVersions(Integer.MAX_VALUE); Compression.Algorithm compress=Compression.getCompressionAlgorithmByName(config.getCompressionAlgorithm()); snapshot.setCompressionType(compress); snapshot.setInMemory(HConfiguration.DEFAULT_IN_MEMORY); snapshot.setBlockCacheEnabled(HConfiguration.DEFAULT_BLOCKCACHE); snapshot.setBloomFilterType(BloomType.ROW); snapshot.setTimeToLive(HConfiguration.DEFAULT_TTL); return snapshot; }
Example 7
Source File: HFileWriterImpl.java From hbase with Apache License 2.0 | 4 votes |
public static Compression.Algorithm compressionByName(String algoName) { if (algoName == null) { return HFile.DEFAULT_COMPRESSION_ALGORITHM; } return Compression.getCompressionAlgorithmByName(algoName); }
Example 8
Source File: HFileGenerationFunction.java From spliceengine with GNU Affero General Public License v3.0 | 4 votes |
private StoreFileWriter getNewWriter(Configuration conf, BulkImportPartition partition) throws IOException { Compression.Algorithm compression = Compression.getCompressionAlgorithmByName(compressionAlgorithm); BloomType bloomType = BloomType.ROW; Integer blockSize = HConstants.DEFAULT_BLOCKSIZE; DataBlockEncoding encoding = DataBlockEncoding.NONE; Configuration tempConf = new Configuration(conf); tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); HFileContextBuilder contextBuilder = new HFileContextBuilder() .withCompression(compression) .withChecksumType(HStore.getChecksumType(conf)) .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)) .withBlockSize(blockSize); if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) { contextBuilder.withIncludesTags(true); } contextBuilder.withDataBlockEncoding(encoding); HFileContext hFileContext = contextBuilder.build(); try { Path familyPath = new Path(partition.getFilePath()); // Get favored nodes as late as possible. This is the best we can do. If the region gets moved after this // point, locality is not guaranteed. InetSocketAddress favoredNode = getFavoredNode(partition); StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs)) .withOutputDir(familyPath).withBloomType(bloomType) .withFileContext(hFileContext); if (favoredNode != null) { InetSocketAddress[] favoredNodes = new InetSocketAddress[1]; favoredNodes[0] = favoredNode; builder.withFavoredNodes(favoredNodes); } return builder.build(); } catch (Exception e) { throw new IOException(e); } }