Java Code Examples for org.apache.hadoop.hbase.client.HTableInterface#close()
The following examples show how to use
org.apache.hadoop.hbase.client.HTableInterface#close() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: UngroupedAggregateRegionObserver.java From phoenix with Apache License 2.0 | 6 votes |
private void commitIndexMutations(final ObserverContext<RegionCoprocessorEnvironment> c, HRegion region, List<Mutation> indexMutations) throws IOException { // Get indexRegion corresponding to data region HRegion indexRegion = IndexUtil.getIndexRegion(c.getEnvironment()); if (indexRegion != null) { commitBatch(indexRegion, indexMutations, null); } else { TableName indexTable = TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(region.getTableDesc() .getName())); HTableInterface table = null; try { table = c.getEnvironment().getTable(indexTable); table.batch(indexMutations); } catch (InterruptedException ie) { ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), ie); } finally { if (table != null) table.close(); } } indexMutations.clear(); }
Example 2
Source File: MappingTableDataTypeIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testMappingHbaseTableToPhoenixTable() throws Exception { final TableName tableName = TableName.valueOf("MTEST"); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class); HBaseAdmin admin = conn.getQueryServices().getAdmin(); try { // Create table then get the single region for our new table. HTableDescriptor descriptor = new HTableDescriptor(tableName); HColumnDescriptor columnDescriptor = new HColumnDescriptor(Bytes.toBytes("cf")); descriptor.addFamily(columnDescriptor); admin.createTable(descriptor); HTableInterface t = conn.getQueryServices().getTable(Bytes.toBytes("MTEST")); insertData(tableName.getName(), admin, t); t.close(); try { testCreateTableMismatchedType(); fail(); } catch (SQLException e) { assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(),e.getErrorCode()); } } finally { admin.close(); } }
Example 3
Source File: HBaseUtils.java From hadoop-arch-book with Apache License 2.0 | 5 votes |
public static void populateValidationRules(HConnection connection, ValidationRules rules) throws Exception { HTableInterface table = connection.getTable(HBaseTableMetaModel.profileCacheTableName); try { Put put = new Put(HBaseTableMetaModel.validationRulesRowKey); put.add(HBaseTableMetaModel.profileCacheColumnFamily, HBaseTableMetaModel.validationRulesRowKey, Bytes.toBytes(rules.getJSONObject().toString())); table.put(put); } finally { table.close(); } }
Example 4
Source File: HBaseClientTemplate.java From kite with Apache License 2.0 | 5 votes |
/** * Execute a Put on HBase. * * Any PutModifers registered with registerPutModifier will be invoked before * the Put is executed. * * @param putAction * The put to execute on HBase. * @return True if the put succeeded, False if the put failed due to update * conflict */ public boolean put(PutAction putAction) { HTableInterface table = pool.getTable(tableName); try { return put(putAction, table); } finally { if (table != null) { try { table.close(); } catch (IOException e) { throw new DatasetIOException("Error putting table back into pool", e); } } } }
Example 5
Source File: AbstractHBaseClient.java From jstorm with Apache License 2.0 | 5 votes |
public void closeTable(HTableInterface table) { if (table != null) { try { table.close(); } catch (Exception ignored) { } } }
Example 6
Source File: DynamicColumnTest.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
private static void initTableValues() throws Exception { ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES); HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_DYNAMIC_COLUMNS_SCHEMA_NAME,HBASE_DYNAMIC_COLUMNS)); try { // Insert rows using standard HBase mechanism with standard HBase "types" List<Row> mutations = new ArrayList<Row>(); byte[] dv = Bytes.toBytes("DV"); byte[] first = Bytes.toBytes("F"); byte[] f1v1 = Bytes.toBytes("F1V1"); byte[] f1v2 = Bytes.toBytes("F1V2"); byte[] f2v1 = Bytes.toBytes("F2V1"); byte[] f2v2 = Bytes.toBytes("F2V2"); byte[] key = Bytes.toBytes("entry1"); Put put = new Put(key); put.add(QueryConstants.EMPTY_COLUMN_BYTES, dv, Bytes.toBytes("default")); put.add(QueryConstants.EMPTY_COLUMN_BYTES, first, Bytes.toBytes("first")); put.add(FAMILY_NAME, f1v1, Bytes.toBytes("f1value1")); put.add(FAMILY_NAME, f1v2, Bytes.toBytes("f1value2")); put.add(FAMILY_NAME2, f2v1, Bytes.toBytes("f2value1")); put.add(FAMILY_NAME2, f2v2, Bytes.toBytes("f2value2")); mutations.add(put); hTable.batch(mutations); } finally { hTable.close(); } // Create Phoenix table after HBase table was created through the native APIs // The timestamp of the table creation must be later than the timestamp of the data ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS); }
Example 7
Source File: CachingHTableFactory.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Override protected boolean removeLRU(LinkEntry entry) { HTableInterface table = (HTableInterface) entry.getValue(); if (LOG.isDebugEnabled()) { LOG.debug("Closing connection to table: " + Bytes.toString(table.getTableName()) + " because it was evicted from the cache."); } try { table.close(); } catch (IOException e) { LOG.info("Failed to correctly close HTable: " + Bytes.toString(table.getTableName()) + " ignoring since being removed from queue."); } return true; }
Example 8
Source File: TestHBaseDAO.java From DistributedCrawler with Apache License 2.0 | 5 votes |
@Test public void testPut() throws IOException { Configuration configuration = HBaseConfiguration.create(); HConnection connection = HConnectionManager.createConnection(configuration); HTableInterface table = connection.getTable("page"); // use the table as needed, for a single operation and a single thread Put put = new Put("2".getBytes()); put.add("content".getBytes(), null, "我吃包子".getBytes()); put.add("title".getBytes(), null, "吃包子".getBytes()); put.add("url".getBytes(), null, "http://www.sina.com.cn".getBytes()); table.put(put); table.close(); connection.close(); }
Example 9
Source File: HConnectionController.java From recsys-offline with Apache License 2.0 | 5 votes |
public void closeTable(HTableInterface table) { try { table.close(); } catch (IOException e) { logger.error("Cannot to close HBase table.", e.getCause()); } }
Example 10
Source File: HbOperate.java From tddl5 with Apache License 2.0 | 5 votes |
private void closeHtable(HTableInterface htable, HbData opData) { this.locked = false; if (htable != null) { try { htable.close(); } catch (IOException e) { throw new RuntimeException("close hbase fail with table(" + opData.getTableName() + ")", e); } } }
Example 11
Source File: HBaseUtils.java From hadoop-arch-book with Apache License 2.0 | 5 votes |
public static void populateUserProfile(HConnection connection, UserProfile userProfile) throws Exception { HTableInterface table = connection.getTable(HBaseTableMetaModel.profileCacheTableName); try { Put put = new Put(convertKeyToRowKey(HBaseTableMetaModel.profileCacheTableName, userProfile.userId)); put.add(HBaseTableMetaModel.profileCacheColumnFamily, HBaseTableMetaModel.profileCacheJsonColumn, Bytes.toBytes(userProfile.getJSONObject().toString())); put.add(HBaseTableMetaModel.profileCacheColumnFamily, HBaseTableMetaModel.profileCacheTsColumn, Bytes.toBytes(System.currentTimeMillis())); table.put(put); } finally { table.close(); } }
Example 12
Source File: DynamicColumnIT.java From phoenix with Apache License 2.0 | 5 votes |
@SuppressWarnings("deprecation") private static void initTableValues() throws Exception { ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_DYNAMIC_COLUMNS_SCHEMA_NAME,HBASE_DYNAMIC_COLUMNS)); try { // Insert rows using standard HBase mechanism with standard HBase "types" List<Row> mutations = new ArrayList<Row>(); byte[] dv = Bytes.toBytes("DV"); byte[] first = Bytes.toBytes("F"); byte[] f1v1 = Bytes.toBytes("F1V1"); byte[] f1v2 = Bytes.toBytes("F1V2"); byte[] f2v1 = Bytes.toBytes("F2V1"); byte[] f2v2 = Bytes.toBytes("F2V2"); byte[] key = Bytes.toBytes("entry1"); Put put = new Put(key); put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default")); put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first")); put.add(FAMILY_NAME, f1v1, Bytes.toBytes("f1value1")); put.add(FAMILY_NAME, f1v2, Bytes.toBytes("f1value2")); put.add(FAMILY_NAME2, f2v1, Bytes.toBytes("f2value1")); put.add(FAMILY_NAME2, f2v2, Bytes.toBytes("f2value2")); mutations.add(put); hTable.batch(mutations); } finally { hTable.close(); } // Create Phoenix table after HBase table was created through the native APIs // The timestamp of the table creation must be later than the timestamp of the data ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS); }
Example 13
Source File: CachingHTableFactory.java From phoenix with Apache License 2.0 | 5 votes |
@Override protected boolean removeLRU(LinkEntry entry) { HTableInterface table = (HTableInterface) entry.getValue(); if (LOG.isDebugEnabled()) { LOG.debug("Closing connection to table: " + Bytes.toString(table.getTableName()) + " because it was evicted from the cache."); } try { table.close(); } catch (IOException e) { LOG.info("Failed to correctly close HTable: " + Bytes.toString(table.getTableName()) + " ignoring since being removed from queue."); } return true; }
Example 14
Source File: HBaseFactoryTest.java From bigdata-tutorial with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { String quorum = "192.168.0.30,192.168.0.31,192.168.0.32"; quorum = "192.168.8.191,192.168.1.192,192.168.1.193"; int port = 2181; HBaseFactoryTest factory = new HBaseFactoryTest(quorum, port); String tableName = "demo_test"; String columnFamily = "cf"; System.out.println("=============================== : create"); factory.createTable(tableName, columnFamily); System.out.println("=============================== : print"); factory.printTableDesc(tableName); System.out.println("=============================== : put"); HTableInterface table = factory.getTable(tableName); table.setAutoFlushTo(false); for (int i = 0; i < 10; i++) { putCell(table, "rowkey" + i, columnFamily, "info", "micmiu-" + i); } table.flushCommits(); table.close(); System.out.println("=============================== : query"); ResultScanner rs = HBaseFactoryTest.scanAll(table); for (Result result : rs) { System.out.println(">>>> result Empty : " + result.isEmpty()); for (Cell cell : result.rawCells()) { System.out.print(">>>> cell rowkey= " + new String(CellUtil.cloneRow(cell))); System.out.print(",family= " + new String(CellUtil.cloneFamily(cell)) + ":" + new String(CellUtil.cloneQualifier(cell))); System.out.println(", value= " + new String(CellUtil.cloneValue(cell))); } } System.out.println("=============================== : delete"); factory.deleteTable(tableName); factory.closeConn(); }
Example 15
Source File: HBaseRowDigestTest.java From Kylin with Apache License 2.0 | 4 votes |
@Test public static void test() throws IOException { String hbaseUrl = "hbase"; // use hbase-site.xml on classpath HConnection conn = null; HTableInterface table = null; try { conn = HBaseConnection.get(hbaseUrl); table = conn.getTable("KYLIN_II_YTYWP3CQGJ"); ResultScanner scanner = table.getScanner(CF, QN); StringBuffer sb = new StringBuffer(); while (true) { Result r = scanner.next(); if (r == null) break; Cell[] cells = r.rawCells(); Cell c = cells[0]; k.set(c.getRowArray(), c.getRowOffset(), c.getRowLength()); v.set(c.getValueArray(), c.getValueOffset(), c.getValueLength()); byte[] row = k.copyBytes(); byte[] value = v.copyBytes(); // byte[] row = r.getRow(); // byte[] value = r.getValue(CF, QN); // sb.append("row length: " + row.length + "\r\n"); sb.append(BytesUtil.toReadableText(row) + "\r\n"); sb.append("value length: " + value.length + "\r\n"); sb.append(BytesUtil.toReadableText(value) + "\r\n"); } System.out.println(sb.toString()); FileUtils.writeStringToFile(new File("/Users/honma/Desktop/a3"), sb.toString()); } catch (IOException e) { e.printStackTrace(); } finally { if (table != null) table.close(); if (conn != null) conn.close(); } }
Example 16
Source File: PingHBaseCLI.java From Kylin with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws IOException { String metadataUrl = args[0]; String hbaseTable = args[1]; System.out.println("Hello friend."); Configuration hconf = HadoopUtil.newHBaseConfiguration(metadataUrl); if (User.isHBaseSecurityEnabled(hconf)) { try { System.out.println("--------------Getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); TokenUtil.obtainAndCacheToken(hconf, UserGroupInformation.getCurrentUser()); } catch (InterruptedException e) { System.out.println("--------------Error while getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); } } Scan scan = new Scan(); int limit = 20; HConnection conn = null; HTableInterface table = null; ResultScanner scanner = null; try { conn = HConnectionManager.createConnection(hconf); table = conn.getTable(hbaseTable); scanner = table.getScanner(scan); int count = 0; for (Result r : scanner) { byte[] rowkey = r.getRow(); System.out.println(Bytes.toStringBinary(rowkey)); count++; if (count == limit) break; } } finally { if (scanner != null) { scanner.close(); } if (table != null) { table.close(); } if (conn != null) { conn.close(); } } }
Example 17
Source File: HBaseFactory.java From zxl with Apache License 2.0 | 4 votes |
public static void closeHTableInterface(HTableInterface hTableInterface) throws IOException { hTableInterface.close(); }
Example 18
Source File: MetaDataEndpointImpl.java From phoenix with Apache License 2.0 | 4 votes |
/** * @param tableName parent table's name * Looks for whether child views exist for the table specified by table. * TODO: should we pass a timestamp here? */ private TableViewFinderResult findChildViews(HRegion region, byte[] tenantId, PTable table) throws IOException { byte[] schemaName = table.getSchemaName().getBytes(); byte[] tableName = table.getTableName().getBytes(); boolean isMultiTenant = table.isMultiTenant(); Scan scan = new Scan(); // If the table is multi-tenant, we need to check across all tenant_ids, // so we can't constrain the row key. Otherwise, any views would have // the same tenantId. if (!isMultiTenant) { byte[] startRow = ByteUtil.concat(tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY); byte[] stopRow = ByteUtil.nextKey(startRow); scan.setStartRow(startRow); scan.setStopRow(stopRow); } SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, PHYSICAL_TABLE_BYTES); linkFilter.setFilterIfMissing(true); byte[] suffix = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, SchemaUtil.getTableNameAsBytes(schemaName, tableName)); SuffixFilter rowFilter = new SuffixFilter(suffix); Filter filter = new FilterList(linkFilter, rowFilter); scan.setFilter(filter); scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); // Original region-only scanner modified due to PHOENIX-1208 // RegionScanner scanner = region.getScanner(scan); // The following *should* work, but doesn't due to HBASE-11837 // TableName systemCatalogTableName = region.getTableDesc().getTableName(); // HTableInterface hTable = env.getTable(systemCatalogTableName); // These deprecated calls work around the issue HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES); try { boolean allViewsInCurrentRegion = true; int numOfChildViews = 0; List<Result> results = Lists.newArrayList(); ResultScanner scanner = hTable.getScanner(scan); try { for (Result result = scanner.next(); (result != null); result = scanner.next()) { numOfChildViews++; ImmutableBytesWritable ptr = new ImmutableBytesWritable(); ResultTuple resultTuple = new ResultTuple(result); resultTuple.getKey(ptr); byte[] key = ptr.copyBytes(); if (checkTableKeyInRegion(key, region) != null) { allViewsInCurrentRegion = false; } results.add(result); } TableViewFinderResult tableViewFinderResult = new TableViewFinderResult(results); if (numOfChildViews > 0 && !allViewsInCurrentRegion) { tableViewFinderResult.setAllViewsNotInSingleRegion(); } return tableViewFinderResult; } finally { scanner.close(); } } finally { hTable.close(); } }