Java Code Examples for org.apache.hadoop.hbase.util.FSUtils#getTableDir()
The following examples show how to use
org.apache.hadoop.hbase.util.FSUtils#getTableDir() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor cfd = new HColumnDescriptor(family); if (ttl > 0) { cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl)); } cfd.setMaxVersions(10); htd.addFamily(cfd); htd.addCoprocessor(TransactionProcessor.class.getName()); Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName()); FileSystem fs = FileSystem.get(conf); assertTrue(fs.mkdirs(tablePath)); WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog"); WAL hLog = walFactory.getWAL(new byte[]{1}, null); HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName)); HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo); return new HRegion(regionFS, hLog, conf, htd, new LocalRegionServerServices(conf, ServerName.valueOf( InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis()))); }
Example 2
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor cfd = new HColumnDescriptor(family); if (ttl > 0) { cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl)); } cfd.setMaxVersions(10); htd.addFamily(cfd); htd.addCoprocessor(TransactionProcessor.class.getName()); Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName()); Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog"); FileSystem fs = FileSystem.get(conf); assertTrue(fs.mkdirs(tablePath)); HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf); HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName)); HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo); return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null)); }
Example 3
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor cfd = new HColumnDescriptor(family); if (ttl > 0) { cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl)); } cfd.setMaxVersions(10); htd.addFamily(cfd); htd.addCoprocessor(TransactionProcessor.class.getName()); Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName()); FileSystem fs = FileSystem.get(conf); assertTrue(fs.mkdirs(tablePath)); WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog"); WAL hLog = walFactory.getWAL(new byte[]{1}, null); HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName)); HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo); return new HRegion(regionFS, hLog, conf, htd, new LocalRegionServerServices(conf, ServerName.valueOf( InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis()))); }
Example 4
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor cfd = new HColumnDescriptor(family); if (ttl > 0) { cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl)); } cfd.setMaxVersions(10); htd.addFamily(cfd); htd.addCoprocessor(TransactionProcessor.class.getName()); Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName()); FileSystem fs = FileSystem.get(conf); assertTrue(fs.mkdirs(tablePath)); WALFactory walFactory = new WALFactory(conf, tableName + ".hlog"); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); WAL hLog = walFactory.getWAL(info); HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName)); HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo); return new HRegion(regionFS, hLog, conf, htd, new LocalRegionServerServices(conf, ServerName.valueOf( InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis()))); }
Example 5
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor cfd = new HColumnDescriptor(family); if (ttl > 0) { cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl)); } cfd.setMaxVersions(10); htd.addFamily(cfd); htd.addCoprocessor(TransactionProcessor.class.getName()); Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName()); FileSystem fs = FileSystem.get(conf); assertTrue(fs.mkdirs(tablePath)); WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog"); WAL hLog = walFactory.getWAL(new byte[]{1}); HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName)); HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo); return new HRegion(regionFS, hLog, conf, htd, new LocalRegionServerServices(conf, ServerName.valueOf( InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis()))); }
Example 6
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor cfd = new HColumnDescriptor(family); if (ttl > 0) { cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl)); } cfd.setMaxVersions(10); htd.addFamily(cfd); htd.addCoprocessor(TransactionProcessor.class.getName()); Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName()); FileSystem fs = FileSystem.get(conf); assertTrue(fs.mkdirs(tablePath)); WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog"); WAL hLog = walFactory.getWAL(new byte[]{1}); HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName)); HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo); return new HRegion(regionFS, hLog, conf, htd, new LocalRegionServerServices(conf, ServerName.valueOf( InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis()))); }
Example 7
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor cfd = new HColumnDescriptor(family); if (ttl > 0) { cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl)); } cfd.setMaxVersions(10); htd.addFamily(cfd); htd.addCoprocessor(TransactionProcessor.class.getName()); Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName()); FileSystem fs = FileSystem.get(conf); assertTrue(fs.mkdirs(tablePath)); WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog"); WAL hLog = walFactory.getWAL(new byte[]{1}); HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName)); HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo); return new HRegion(regionFS, hLog, conf, htd, new LocalRegionServerServices(conf, ServerName.valueOf( InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis()))); }
Example 8
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor cfd = new HColumnDescriptor(family); if (ttl > 0) { cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl)); } cfd.setMaxVersions(10); htd.addFamily(cfd); htd.addCoprocessor(TransactionProcessor.class.getName()); Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName()); Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog"); FileSystem fs = FileSystem.get(conf); assertTrue(fs.mkdirs(tablePath)); HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf); HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName)); HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo); return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null)); }
Example 9
Source File: TestPerRegionIndexWriteCache.java From phoenix with Apache License 2.0 | 5 votes |
@SuppressWarnings("deprecation") @Before public void setUp() throws Exception { Path hbaseRootDir = TEST_UTIL.getDataTestDir(); TEST_UTIL.getConfiguration().set("hbase.rootdir", hbaseRootDir.toString()); FileSystem newFS = FileSystem.newInstance(TEST_UTIL.getConfiguration()); HRegionInfo hri = new HRegionInfo(tableName, null, null, false); Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName); WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), null, "TestPerRegionIndexWriteCache"); WAL wal = walFactory.getWAL(Bytes.toBytes("logs")); HTableDescriptor htd = new HTableDescriptor(tableName); HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a")); htd.addFamily(a); r1 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) { @Override public int hashCode() { return 1; } @Override public String toString() { return "testRegion1"; } }; r2 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) { @Override public int hashCode() { return 2; } @Override public String toString() { return "testRegion1"; } }; }
Example 10
Source File: LocalIndexIT.java From phoenix with Apache License 2.0 | 5 votes |
private void copyLocalIndexHFiles(Configuration conf, RegionInfo fromRegion, RegionInfo toRegion, boolean move) throws IOException { Path root = FSUtils.getRootDir(conf); Path seondRegion = new Path(FSUtils.getTableDir(root, fromRegion.getTable()) + Path.SEPARATOR + fromRegion.getEncodedName() + Path.SEPARATOR + "L#0/"); Path hfilePath = FSUtils.getCurrentFileSystem(conf).listFiles(seondRegion, true).next().getPath(); Path firstRegionPath = new Path(FSUtils.getTableDir(root, toRegion.getTable()) + Path.SEPARATOR + toRegion.getEncodedName() + Path.SEPARATOR + "L#0/"); FileSystem currentFileSystem = FSUtils.getCurrentFileSystem(conf); assertTrue(FileUtil.copy(currentFileSystem, hfilePath, currentFileSystem, firstRegionPath, move, conf)); }
Example 11
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify * seqids. * @throws Exception on failure */ @SuppressWarnings("deprecation") @Test public void testReplayEditsWrittenViaHRegion() throws Exception { final String tableNameStr = "testReplayEditsWrittenViaHRegion"; final HRegionInfo hri = new HRegionInfo(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr), null, null, false); final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)); deleteDir(basedir); final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); //setup basic indexing for the table // enable indexing to a non-existant index table byte[] family = new byte[] { 'a' }; ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME); fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); builder.addIndexGroup(fam1); builder.build(htd); // create the region + its WAL HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); region0.close(); region0.getWAL().close(); WALFactory walFactory = new WALFactory(this.conf, null, "localhost,1234"); WAL wal = createWAL(this.conf, walFactory); RegionServerServices mockRS = Mockito.mock(RegionServerServices.class); // mock out some of the internals of the RSS, so we can run CPs Mockito.when(mockRS.getWAL(null)).thenReturn(wal); RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class); Mockito.when(mockRS.getRegionServerAccounting()).thenReturn(rsa); ServerName mockServerName = Mockito.mock(ServerName.class); Mockito.when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234"); Mockito.when(mockRS.getServerName()).thenReturn(mockServerName); HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS); region.initialize(); region.getSequenceId().set(0); //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(p); // we should then see the server go down Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(), Mockito.any(Exception.class)); // then create the index table so we are successful on WAL replay CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME); // run the WAL split and setup the region runWALSplit(this.conf, walFactory); WAL wal2 = createWAL(this.conf, walFactory); HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS); // initialize the region - this should replay the WALEdits from the WAL region1.initialize(); // now check to ensure that we wrote to the index table HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME); int indexSize = getKeyValueCount(index); assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize); Get g = new Get(rowkey); final Result result = region1.get(g); assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size()); // cleanup the index table HBaseAdmin admin = UTIL.getHBaseAdmin(); admin.disableTable(INDEX_TABLE_NAME); admin.deleteTable(INDEX_TABLE_NAME); admin.close(); }
Example 12
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Test writing edits into an region, closing it, splitting logs, opening Region again. Verify * seqids. * @throws Exception on failure */ @Test public void testReplayEditsWrittenViaHRegion() throws Exception { final String tableNameStr = "testReplayEditsWrittenViaHRegion"; final RegionInfo hri = RegionInfoBuilder.newBuilder(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)).setSplit(false).build(); final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)); deleteDir(basedir); final TableDescriptor htd = createBasic3FamilyHTD(tableNameStr); //setup basic indexing for the table // enable indexing to a non-existant index table byte[] family = new byte[] { 'a' }; ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME); fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); builder.addIndexGroup(fam1); builder.build(htd); WALFactory walFactory = new WALFactory(this.conf, "localhost,1234"); WAL wal = createWAL(this.conf, walFactory); // create the region + its WAL HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd, wal); // FIXME: Uses private type region0.close(); region0.getWAL().close(); HRegionServer mockRS = Mockito.mock(HRegionServer.class); // mock out some of the internals of the RSS, so we can run CPs when(mockRS.getWAL(null)).thenReturn(wal); RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class); when(mockRS.getRegionServerAccounting()).thenReturn(rsa); ServerName mockServerName = Mockito.mock(ServerName.class); when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234"); when(mockRS.getServerName()).thenReturn(mockServerName); HRegion region = spy(new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS)); region.initialize(); //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(p); // we should then see the server go down Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(), Mockito.any(Exception.class)); // then create the index table so we are successful on WAL replay TestIndexManagementUtil.createIndexTable(UTIL.getAdmin(), INDEX_TABLE_NAME); // run the WAL split and setup the region runWALSplit(this.conf, walFactory); WAL wal2 = createWAL(this.conf, walFactory); HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS); // initialize the region - this should replay the WALEdits from the WAL region1.initialize(); org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(UTIL.getConfiguration()); // now check to ensure that we wrote to the index table Table index = hbaseConn.getTable(org.apache.hadoop.hbase.TableName.valueOf(INDEX_TABLE_NAME)); int indexSize = getKeyValueCount(index); assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize); Get g = new Get(rowkey); final Result result = region1.get(g); assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size()); // cleanup the index table Admin admin = UTIL.getAdmin(); admin.disableTable(TableName.valueOf(INDEX_TABLE_NAME)); admin.deleteTable(TableName.valueOf(INDEX_TABLE_NAME)); admin.close(); }
Example 13
Source File: TestPerRegionIndexWriteCache.java From phoenix with Apache License 2.0 | 4 votes |
@SuppressWarnings("deprecation") @Before public void setUp() throws Exception { Path hbaseRootDir = new Path(getClass().getSimpleName() + "_" + testName.getMethodName()); TEST_UTIL.getConfiguration().set("hbase.rootdir", hbaseRootDir.toString()); FileSystem newFS = miniDfs.getFileSystem(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(null).setEndKey(null).setSplit(false).build(); Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName); Random rn = new Random(); tableName = TableName.valueOf("TestPerRegion" + rn.nextInt()); WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), getClass().getSimpleName()); wal = walFactory.getWAL(RegionInfoBuilder.newBuilder(TableName.valueOf("logs")).build()); TableDescriptor htd = TableDescriptorBuilder .newBuilder(tableName) .addColumnFamily( ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("a")).build()) .build(); r1 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) { @Override public int hashCode() { return 1; } @Override public String toString() { return "testRegion1"; } }; r2 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) { @Override public int hashCode() { return 2; } @Override public String toString() { return "testRegion1"; } }; }
Example 14
Source File: SkeletonClientSideRegionScanner.java From spliceengine with GNU Affero General Public License v3.0 | 4 votes |
private HRegion openHRegion() throws IOException { Path tableDir = FSUtils.getTableDir(rootDir, hri.getTable()); Set<String> compactedFilesPaths = getCompactedFilesPathsFromHBaseRegionServer(); return new SpliceHRegion( tableDir, null, fs, conf, hri, htd, null, compactedFilesPaths); }