Java Code Examples for org.apache.hadoop.hbase.client.HTable#setAutoFlush()
The following examples show how to use
org.apache.hadoop.hbase.client.HTable#setAutoFlush() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 6 votes |
/** * Create the primary table (to which you should write), setup properly for indexing the given * {@link ColumnGroup}s. Also creates the necessary index tables to match the passes groups. * @param groups {@link ColumnGroup}s to index, creating one index table per column group. * @return reference to the primary table * @throws IOException if there is an issue communicating with HBase */ @SuppressWarnings("deprecation") private HTable createSetupTables(ColumnGroup... groups) throws IOException { HBaseAdmin admin = UTIL.getHBaseAdmin(); // setup the index CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); for (ColumnGroup group : groups) { builder.addIndexGroup(group); // create the index tables CoveredColumnIndexer.createIndexTable(admin, group.getTable()); } // setup the primary table String indexedTableName = Bytes.toString(TestTable.getTableName()); @SuppressWarnings("deprecation") HTableDescriptor pTable = new HTableDescriptor(indexedTableName); pTable.addFamily(new HColumnDescriptor(FAM)); pTable.addFamily(new HColumnDescriptor(FAM2)); builder.build(pTable); // create the primary table admin.createTable(pTable); HTable primary = new HTable(UTIL.getConfiguration(), indexedTableName); primary.setAutoFlush(false); return primary; }
Example 2
Source File: TestEndToEndCoveredIndexing.java From phoenix with BSD 3-Clause "New" or "Revised" License | 6 votes |
/** * Create the primary table (to which you should write), setup properly for indexing the given * {@link ColumnGroup}s. Also creates the necessary index tables to match the passes groups. * @param groups {@link ColumnGroup}s to index, creating one index table per column group. * @return reference to the primary table * @throws IOException if there is an issue communicating with HBase */ private HTable createSetupTables(ColumnGroup... groups) throws IOException { HBaseAdmin admin = UTIL.getHBaseAdmin(); // setup the index CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); for (ColumnGroup group : groups) { builder.addIndexGroup(group); // create the index tables CoveredColumnIndexer.createIndexTable(admin, group.getTable()); } // setup the primary table String indexedTableName = Bytes.toString(TestTable.getTableName()); HTableDescriptor pTable = new HTableDescriptor(indexedTableName); pTable.addFamily(new HColumnDescriptor(FAM)); pTable.addFamily(new HColumnDescriptor(FAM2)); builder.build(pTable); // create the primary table admin.createTable(pTable); HTable primary = new HTable(UTIL.getConfiguration(), indexedTableName); primary.setAutoFlush(false); return primary; }
Example 3
Source File: Tailer.java From zerowing with MIT License | 6 votes |
private HTable ensureTable(String tableName) throws Exception { if (_knownTables.containsKey(tableName)) { return _knownTables.get(tableName); } HBaseAdmin admin = getHBaseAdmin(); if (!admin.tableExists(tableName)) { HTableDescriptor tableDesc = _translator.describeHBaseTable(tableName); admin.createTable(tableDesc); } HTable table = new HTable(_conf, tableName); if (_bufferWrites) { table.setAutoFlush(false, true); } _knownTables.put(tableName, table); return table; }
Example 4
Source File: HBaseTable.java From wifi with Apache License 2.0 | 5 votes |
public static void put(String row, String column, String data) throws Exception { HTable table = new HTable(cfg, tableName); table.setAutoFlush(false); table.setWriteBufferSize(10 * 1024 * 1024); Put p1 = new Put(Bytes.toBytes(row)); p1.add(Bytes.toBytes(familyName), Bytes.toBytes(column), Bytes.toBytes(data)); table.put(p1); System.out.println("put '" + row + "','" + familyName + ":" + column + "','" + data + "'"); }
Example 5
Source File: HBaseTable.java From wifi with Apache License 2.0 | 5 votes |
public static void put(String row,String column,String data) throws Exception { HTable table = new HTable(cfg, tableName); table.setAutoFlush(false); table.setWriteBufferSize(10*1024*1024); Put p1=new Put(Bytes.toBytes(row)); p1.add(Bytes.toBytes(familyName), Bytes.toBytes(column), Bytes.toBytes(data)); table.put(p1); System.out.println("put '"+row+"','"+familyName+":"+column+"','"+data+"'"); }
Example 6
Source File: Mapper2HbaseDemo.java From bigdata-tutorial with Apache License 2.0 | 5 votes |
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); conf = HBaseConfiguration.create(context.getConfiguration()); conf.set("hbase.zookeeper.quorum", "zk1.hadoop,zk2.hadoop,zk3.hadoop"); conf.set("hbase.zookeeper.property.clientPort", "2181"); htable = new HTable(conf, "micmiu"); htable.setAutoFlush(false); htable.setWriteBufferSize(12 * 1024 * 1024);//12M wal = true; }
Example 7
Source File: FailWithoutRetriesIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * If this test times out, then we didn't fail quickly enough. {@link Indexer} maybe isn't * rethrowing the exception correctly? * <p> * We use a custom codec to enforce the thrown exception. * @throws Exception */ @Test(timeout = 300000) public void testQuickFailure() throws Exception { // incorrectly setup indexing for the primary table - target index table doesn't exist, which // should quickly return to the client byte[] family = Bytes.toBytes("family"); ColumnGroup fam1 = new ColumnGroup(getIndexTableName()); // values are [col1] fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); // add the index family builder.addIndexGroup(fam1); // usually, we would create the index table here, but we don't for the sake of the test. // setup the primary table String primaryTable = Bytes.toString(table.getTableName()); @SuppressWarnings("deprecation") HTableDescriptor pTable = new HTableDescriptor(primaryTable); pTable.addFamily(new HColumnDescriptor(family)); // override the codec so we can use our test one builder.build(pTable, FailingTestCodec.class); // create the primary table HBaseAdmin admin = UTIL.getHBaseAdmin(); admin.createTable(pTable); Configuration conf = new Configuration(UTIL.getConfiguration()); // up the number of retries/wait time to make it obvious that we are failing with retries here conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 20); conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 1000); HTable primary = new HTable(conf, primaryTable); primary.setAutoFlush(false, true); // do a simple put that should be indexed Put p = new Put(Bytes.toBytes("row")); p.add(family, null, Bytes.toBytes("value")); primary.put(p); try { primary.flushCommits(); fail("Shouldn't have gotten a successful write to the primary table"); } catch (RetriesExhaustedWithDetailsException e) { LOG.info("Correclty got a failure of the put!"); } primary.close(); }
Example 8
Source File: EndToEndCoveredColumnsIndexBuilderIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Similar to {@link #testExpectedResultsInTableStateForSinglePut()}, but against batches of puts. * Previous implementations managed batches by playing current state against each element in the * batch, rather than combining all the per-row updates into a single mutation for the batch. This * test ensures that we see the correct expected state. * @throws Exception on failure */ @SuppressWarnings("deprecation") @Test public void testExpectedResultsInTableStateForBatchPuts() throws Exception { long ts = state.ts; // build up a list of puts to make, all on the same row Put p1 = new Put(row, ts); p1.add(family, qual, Bytes.toBytes("v1")); Put p2 = new Put(row, ts + 1); p2.add(family, qual, Bytes.toBytes("v2")); // setup all the verifiers we need. This is just the same as above, but will be called twice // since we need to iterate the batch. // get all the underlying kvs for the put final List<Cell> allKvs = new ArrayList<Cell>(2); allKvs.addAll(p2.getFamilyCellMap().get(family)); allKvs.addAll(p1.getFamilyCellMap().get(family)); // setup the verifier for the data we expect to write // both puts should be put into a single batch final ColumnReference familyRef = new ColumnReference(EndToEndCoveredColumnsIndexBuilderIT.family, ColumnReference.ALL_QUALIFIERS); VerifyingIndexCodec codec = state.codec; // no previous state in the table codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections .<Cell> emptyList(), familyRef)); codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyCellMap().get(family), familyRef)); codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", p1.getFamilyCellMap().get(family), familyRef)); // kvs from both puts should be in the table now codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef)); // do the actual put (no indexing will actually be done) HTable primary = state.table; primary.setAutoFlush(false); primary.put(Arrays.asList(p1, p2)); primary.flushCommits(); // cleanup after ourselves cleanup(state); }
Example 9
Source File: HBaseWriter.java From hiped2 with Apache License 2.0 | 4 votes |
/** * The MapReduce driver - setup and launch the job. * * @param args the command-line arguments * @return the process exit code * @throws Exception if something goes wrong */ public int run(final String[] args) throws Exception { Cli cli = Cli.builder().setArgs(args).addOptions(CliCommonOpts.InputFileOption.values()).build(); int result = cli.runCmd(); if (result != 0) { return result; } File inputFile = new File(cli.getArgValueAsString(CliCommonOpts.InputFileOption.INPUT)); Configuration conf = HBaseConfiguration.create(); createTableAndColumn(conf, STOCKS_TABLE_NAME, STOCK_DETAILS_COLUMN_FAMILY_AS_BYTES); HTable htable = new HTable(conf, STOCKS_TABLE_NAME); htable.setAutoFlush(false); htable.setWriteBufferSize(1024 * 1024 * 12); SpecificDatumWriter<Stock> writer = new SpecificDatumWriter<Stock>(); writer.setSchema(Stock.SCHEMA$); ByteArrayOutputStream bao = new ByteArrayOutputStream(); BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(bao, null); for (Stock stock: AvroStockUtils.fromCsvFile(inputFile)) { writer.write(stock, encoder); encoder.flush(); byte[] rowkey = Bytes.add( Bytes.toBytes(stock.getSymbol().toString()), Bytes.toBytes(stock.getDate().toString())); byte[] stockAsAvroBytes = bao.toByteArray(); Put put = new Put(rowkey); put.add(STOCK_DETAILS_COLUMN_FAMILY_AS_BYTES, STOCK_COLUMN_QUALIFIER_AS_BYTES, stockAsAvroBytes); htable.put(put); bao.reset(); } htable.flushCommits(); htable.close(); System.out.println("done"); return 0; }
Example 10
Source File: TestFailWithoutRetries.java From phoenix with BSD 3-Clause "New" or "Revised" License | 4 votes |
/** * If this test times out, then we didn't fail quickly enough. {@link Indexer} maybe isn't * rethrowing the exception correctly? * <p> * We use a custom codec to enforce the thrown exception. * @throws Exception */ @Test(timeout = 300000) public void testQuickFailure() throws Exception { // incorrectly setup indexing for the primary table - target index table doesn't exist, which // should quickly return to the client byte[] family = Bytes.toBytes("family"); ColumnGroup fam1 = new ColumnGroup(getIndexTableName()); // values are [col1] fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); // add the index family builder.addIndexGroup(fam1); // usually, we would create the index table here, but we don't for the sake of the test. // setup the primary table String primaryTable = Bytes.toString(table.getTableName()); HTableDescriptor pTable = new HTableDescriptor(primaryTable); pTable.addFamily(new HColumnDescriptor(family)); // override the codec so we can use our test one builder.build(pTable, FailingTestCodec.class); // create the primary table HBaseAdmin admin = UTIL.getHBaseAdmin(); admin.createTable(pTable); Configuration conf = new Configuration(UTIL.getConfiguration()); // up the number of retries/wait time to make it obvious that we are failing with retries here conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 20); conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 1000); HTable primary = new HTable(conf, primaryTable); primary.setAutoFlush(false, true); // do a simple put that should be indexed Put p = new Put(Bytes.toBytes("row")); p.add(family, null, Bytes.toBytes("value")); primary.put(p); try { primary.flushCommits(); fail("Shouldn't have gotten a successful write to the primary table"); } catch (RetriesExhaustedWithDetailsException e) { LOG.info("Correclty got a failure of the put!"); } primary.close(); }
Example 11
Source File: TestEndToEndCoveredColumnsIndexBuilder.java From phoenix with BSD 3-Clause "New" or "Revised" License | 4 votes |
/** * Similar to {@link #testExpectedResultsInTableStateForSinglePut()}, but against batches of puts. * Previous implementations managed batches by playing current state against each element in the * batch, rather than combining all the per-row updates into a single mutation for the batch. This * test ensures that we see the correct expected state. * @throws Exception on failure */ @Test public void testExpectedResultsInTableStateForBatchPuts() throws Exception { long ts = state.ts; // build up a list of puts to make, all on the same row Put p1 = new Put(row, ts); p1.add(family, qual, Bytes.toBytes("v1")); Put p2 = new Put(row, ts + 1); p2.add(family, qual, Bytes.toBytes("v2")); // setup all the verifiers we need. This is just the same as above, but will be called twice // since we need to iterate the batch. // get all the underlying kvs for the put final List<KeyValue> allKvs = new ArrayList<KeyValue>(2); allKvs.addAll(p2.getFamilyMap().get(family)); allKvs.addAll(p1.getFamilyMap().get(family)); // setup the verifier for the data we expect to write // both puts should be put into a single batch final ColumnReference familyRef = new ColumnReference(TestEndToEndCoveredColumnsIndexBuilder.family, ColumnReference.ALL_QUALIFIERS); VerifyingIndexCodec codec = state.codec; // no previous state in the table codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections .<KeyValue> emptyList(), familyRef)); codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyMap().get(family), familyRef)); codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", p1.getFamilyMap().get(family), familyRef)); // kvs from both puts should be in the table now codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef)); // do the actual put (no indexing will actually be done) HTable primary = state.table; primary.setAutoFlush(false); primary.put(Arrays.asList(p1, p2)); primary.flushCommits(); // cleanup after ourselves cleanup(state); }