Java Code Examples for org.apache.hadoop.hbase.regionserver.HRegion#initialize()
The following examples show how to use
org.apache.hadoop.hbase.regionserver.HRegion#initialize() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseTestingUtility.java From hbase with Apache License 2.0 | 5 votes |
/** * Create a region with it's own WAL. Be sure to call * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources. */ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir, final Configuration conf, final TableDescriptor htd, BlockCache blockCache) throws IOException { HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false); region.setBlockCache(blockCache); region.initialize(); return region; }
Example 2
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDeleteFiltering() throws Exception { String tableName = "TestDeleteFiltering"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, 0); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); byte[] row = Bytes.toBytes(1); for (int i = 4; i < V.length; i++) { Put p = new Put(row); p.addColumn(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i])); region.put(p); } // delete from the third entry back // take that cell's timestamp + 1 to simulate a delete in a new tx long deleteTs = V[5] + 1; Delete d = new Delete(row, deleteTs); LOG.info("Issuing delete at timestamp " + deleteTs); // row deletes are not yet supported (TransactionAwareHTable normally handles this) d.addColumns(familyBytes, columnBytes); region.delete(d); List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, we should drop the deleted version, but not the others LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString()); region.flushcache(true, false, new FlushLifeCycleTracker() { }); // now a normal scan should return row with versions at: V[8], V[6]. // V[7] is invalid and V[5] and prior are deleted. Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // should be only one row assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 1, new long[]{V[8], V[6], deleteTs}, new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]}); } finally { region.close(); } }
Example 3
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDeleteFiltering() throws Exception { String tableName = "TestDeleteFiltering"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, 0); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); byte[] row = Bytes.toBytes(1); for (int i = 4; i < V.length; i++) { Put p = new Put(row); p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i])); region.put(p); } // delete from the third entry back // take that cell's timestamp + 1 to simulate a delete in a new tx long deleteTs = V[5] + 1; Delete d = new Delete(row, deleteTs); LOG.info("Issuing delete at timestamp " + deleteTs); // row deletes are not yet supported (TransactionAwareHTable normally handles this) d.deleteColumns(familyBytes, columnBytes); region.delete(d); List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, we should drop the deleted version, but not the others LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString()); region.flushcache(true, false); // now a normal scan should return row with versions at: V[8], V[6]. // V[7] is invalid and V[5] and prior are deleted. Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // should be only one row assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 1, new long[]{V[8], V[6], deleteTs}, new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]}); } finally { region.close(); } }
Example 4
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDataJanitorRegionScanner() throws Exception { String tableName = "TestRegionScanner"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, TimeUnit.HOURS.toMillis(3)); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); for (int i = 1; i <= 8; i++) { for (int k = 1; k <= i; k++) { Put p = new Put(Bytes.toBytes(i)); p.add(familyBytes, columnBytes, V[k], Bytes.toBytes(V[k])); region.put(p); } } List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, the coprocessor should drop all KeyValues with timestamps in the invalid set LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString()); Region.FlushResult flushResult = region.flushcache(true, false); Assert.assertTrue("Unexpected flush result: " + flushResult, flushResult.isFlushSucceeded()); // now a normal scan should only return the valid rows // do not use a filter here to test that cleanup works on flush Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // first returned value should be "4" with version "4" results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 4, new long[]{V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 5, new long[] {V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 6, new long[]{V[6], V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 7, new long[]{V[6], V[4]}); results.clear(); assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 8, new long[] {V[8], V[6], V[4]}); } finally { region.close(); } }
Example 5
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPreExistingData() throws Exception { String tableName = "TestPreExistingData"; byte[] familyBytes = Bytes.toBytes("f"); long ttlMillis = TimeUnit.DAYS.toMillis(14); HRegion region = createRegion(tableName, familyBytes, ttlMillis); try { region.initialize(); // timestamps for pre-existing, non-transactional data long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS; long older = now - ttlMillis / 2; long newer = now - ttlMillis / 3; // timestamps for transactional data long nowTx = txVisibilityState.getVisibilityUpperBound(); long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS; long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS; Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); ttls.put(familyBytes, ttlMillis); List<Cell> cells = new ArrayList<>(); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11"))); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32"))); // Write non-transactional and transactional data for (Cell c : cells) { region.put(new Put(c.getRow()).add(c.getFamily(), c.getQualifier(), c.getTimestamp(), c.getValue())); } Scan rawScan = new Scan(); rawScan.setMaxVersions(); Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState); Scan txScan = new Scan(); txScan.setMaxVersions(); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // read all back with raw scanner scanAndAssert(region, cells, rawScan); // read all back with transaction filter scanAndAssert(region, cells, txScan); // force a flush to clear the memstore region.flushcache(); scanAndAssert(region, cells, txScan); // force a major compaction to remove any expired cells region.compactStores(true); scanAndAssert(region, cells, txScan); // Reduce TTL, this should make cells with timestamps older and olderTx expire long newTtl = ttlMillis / 2 - 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // Raw scan should still give all cells scanAndAssert(region, cells, rawScan); // However, tx scan should not return expired cells scanAndAssert(region, select(cells, 1, 3, 5), txScan); region.flushcache(); scanAndAssert(region, cells, rawScan); // force a major compaction to remove any expired cells region.compactStores(true); // This time raw scan too should not return expired cells, as they would be dropped during major compaction scanAndAssert(region, select(cells, 1, 3, 5), rawScan); // Reduce TTL again to 1 ms, this should expire all cells newTtl = 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // force a major compaction to remove expired cells region.compactStores(true); // This time raw scan should not return any cells, as all cells have expired. scanAndAssert(region, Collections.<Cell>emptyList(), rawScan); } finally { region.close(); } }
Example 6
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPreExistingData() throws Exception { String tableName = "TestPreExistingData"; byte[] familyBytes = Bytes.toBytes("f"); long ttlMillis = TimeUnit.DAYS.toMillis(14); HRegion region = createRegion(tableName, familyBytes, ttlMillis); try { region.initialize(); // timestamps for pre-existing, non-transactional data long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS; long older = now - ttlMillis / 2; long newer = now - ttlMillis / 3; // timestamps for transactional data long nowTx = txVisibilityState.getVisibilityUpperBound(); long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS; long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS; Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); ttls.put(familyBytes, ttlMillis); List<Cell> cells = new ArrayList<>(); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11"))); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32"))); // Write non-transactional and transactional data for (Cell c : cells) { region.put(new Put(c.getRow()).add(c.getFamily(), c.getQualifier(), c.getTimestamp(), c.getValue())); } Scan rawScan = new Scan(); rawScan.setMaxVersions(); Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState); Scan txScan = new Scan(); txScan.setMaxVersions(); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // read all back with raw scanner scanAndAssert(region, cells, rawScan); // read all back with transaction filter scanAndAssert(region, cells, txScan); // force a flush to clear the memstore region.flushcache(true, false); scanAndAssert(region, cells, txScan); // force a major compaction to remove any expired cells region.compact(true); scanAndAssert(region, cells, txScan); // Reduce TTL, this should make cells with timestamps older and olderTx expire long newTtl = ttlMillis / 2 - 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // Raw scan should still give all cells scanAndAssert(region, cells, rawScan); // However, tx scan should not return expired cells scanAndAssert(region, select(cells, 1, 3, 5), txScan); region.flushcache(true, false); scanAndAssert(region, cells, rawScan); // force a major compaction to remove any expired cells region.compact(true); // This time raw scan too should not return expired cells, as they would be dropped during major compaction scanAndAssert(region, select(cells, 1, 3, 5), rawScan); // Reduce TTL again to 1 ms, this should expire all cells newTtl = 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // force a major compaction to remove expired cells region.compact(true); // This time raw scan should not return any cells, as all cells have expired. scanAndAssert(region, Collections.<Cell>emptyList(), rawScan); } finally { region.close(); } }
Example 7
Source File: TestWALReplayWithIndexWritesAndCompressedWAL.java From phoenix with BSD 3-Clause "New" or "Revised" License | 4 votes |
/** * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify * seqids. * @throws Exception on failure */ @Test public void testReplayEditsWrittenViaHRegion() throws Exception { final String tableNameStr = "testReplayEditsWrittenViaHRegion"; final HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableNameStr), null, null, false); final Path basedir = new Path(this.hbaseRootDir, tableNameStr); deleteDir(basedir); final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); //setup basic indexing for the table // enable indexing to a non-existant index table byte[] family = new byte[] { 'a' }; ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME); fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); builder.addIndexGroup(fam1); builder.build(htd); // create the region + its WAL HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); region0.close(); region0.getLog().closeAndDelete(); HLog wal = createWAL(this.conf); RegionServerServices mockRS = Mockito.mock(RegionServerServices.class); // mock out some of the internals of the RSS, so we can run CPs Mockito.when(mockRS.getWAL()).thenReturn(wal); RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class); Mockito.when(mockRS.getRegionServerAccounting()).thenReturn(rsa); ServerName mockServerName = Mockito.mock(ServerName.class); Mockito.when(mockServerName.getServerName()).thenReturn(tableNameStr + "-server-1234"); Mockito.when(mockRS.getServerName()).thenReturn(mockServerName); HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS); long seqid = region.initialize(); // HRegionServer usually does this. It knows the largest seqid across all regions. wal.setSequenceNumber(seqid); //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(new Put[] { p }); // we should then see the server go down Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(), Mockito.any(Exception.class)); region.close(true); wal.close(); // then create the index table so we are successful on WAL replay CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME); // run the WAL split and setup the region runWALSplit(this.conf); HLog wal2 = createWAL(this.conf); HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS); // initialize the region - this should replay the WALEdits from the WAL region1.initialize(); // now check to ensure that we wrote to the index table HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME); int indexSize = getKeyValueCount(index); assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize); Get g = new Get(rowkey); final Result result = region1.get(g); assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size()); // cleanup the index table HBaseAdmin admin = UTIL.getHBaseAdmin(); admin.disableTable(INDEX_TABLE_NAME); admin.deleteTable(INDEX_TABLE_NAME); admin.close(); }
Example 8
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPreExistingData() throws Exception { String tableName = "TestPreExistingData"; byte[] familyBytes = Bytes.toBytes("f"); long ttlMillis = TimeUnit.DAYS.toMillis(14); HRegion region = createRegion(tableName, familyBytes, ttlMillis); try { region.initialize(); // timestamps for pre-existing, non-transactional data long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS; long older = now - ttlMillis / 2; long newer = now - ttlMillis / 3; // timestamps for transactional data long nowTx = txVisibilityState.getVisibilityUpperBound(); long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS; long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS; Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); ttls.put(familyBytes, ttlMillis); List<Cell> cells = new ArrayList<>(); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11"))); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32"))); // Write non-transactional and transactional data for (Cell c : cells) { region.put(new Put(c.getRow()).add(c.getFamily(), c.getQualifier(), c.getTimestamp(), c.getValue())); } Scan rawScan = new Scan(); rawScan.setMaxVersions(); Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState); Scan txScan = new Scan(); txScan.setMaxVersions(); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // read all back with raw scanner scanAndAssert(region, cells, rawScan); // read all back with transaction filter scanAndAssert(region, cells, txScan); // force a flush to clear the memstore region.flushcache(); scanAndAssert(region, cells, txScan); // force a major compaction to remove any expired cells region.compactStores(true); scanAndAssert(region, cells, txScan); // Reduce TTL, this should make cells with timestamps older and olderTx expire long newTtl = ttlMillis / 2 - 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // Raw scan should still give all cells scanAndAssert(region, cells, rawScan); // However, tx scan should not return expired cells scanAndAssert(region, select(cells, 1, 3, 5), txScan); region.flushcache(); scanAndAssert(region, cells, rawScan); // force a major compaction to remove any expired cells region.compactStores(true); // This time raw scan too should not return expired cells, as they would be dropped during major compaction scanAndAssert(region, select(cells, 1, 3, 5), rawScan); // Reduce TTL again to 1 ms, this should expire all cells newTtl = 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // force a major compaction to remove expired cells region.compactStores(true); // This time raw scan should not return any cells, as all cells have expired. scanAndAssert(region, Collections.<Cell>emptyList(), rawScan); } finally { region.close(); } }
Example 9
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDeleteFiltering() throws Exception { String tableName = "TestDeleteFiltering"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, 0); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); byte[] row = Bytes.toBytes(1); for (int i = 4; i < V.length; i++) { Put p = new Put(row); p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i])); region.put(p); } // delete from the third entry back // take that cell's timestamp + 1 to simulate a delete in a new tx long deleteTs = V[5] + 1; Delete d = new Delete(row, deleteTs); LOG.info("Issuing delete at timestamp " + deleteTs); // row deletes are not yet supported (TransactionAwareHTable normally handles this) d.deleteColumns(familyBytes, columnBytes); region.delete(d); List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, we should drop the deleted version, but not the others LOG.info("Flushing region " + region.getRegionNameAsString()); region.flushcache(); // now a normal scan should return row with versions at: V[8], V[6]. // V[7] is invalid and V[5] and prior are deleted. Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // should be only one row assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 1, new long[]{V[8], V[6], deleteTs}, new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]}); } finally { region.close(); } }
Example 10
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDataJanitorRegionScanner() throws Exception { String tableName = "TestRegionScanner"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, TimeUnit.HOURS.toMillis(3)); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); for (int i = 1; i <= 8; i++) { for (int k = 1; k <= i; k++) { Put p = new Put(Bytes.toBytes(i)); p.add(familyBytes, columnBytes, V[k], Bytes.toBytes(V[k])); region.put(p); } } List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, the coprocessor should drop all KeyValues with timestamps in the invalid set LOG.info("Flushing region " + region.getRegionNameAsString()); HRegion.FlushResult flushResult = region.flushcache(); Assert.assertTrue("Unexpected flush result: " + flushResult.toString(), flushResult.isFlushSucceeded()); // now a normal scan should only return the valid rows // do not use a filter here to test that cleanup works on flush Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // first returned value should be "4" with version "4" results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 4, new long[] {V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 5, new long[] {V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 6, new long[] {V[6], V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 7, new long[] {V[6], V[4]}); results.clear(); assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 8, new long[] {V[8], V[6], V[4]}); } finally { region.close(); } }
Example 11
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify * seqids. * @throws Exception on failure */ @SuppressWarnings("deprecation") @Test public void testReplayEditsWrittenViaHRegion() throws Exception { final String tableNameStr = "testReplayEditsWrittenViaHRegion"; final HRegionInfo hri = new HRegionInfo(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr), null, null, false); final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)); deleteDir(basedir); final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); //setup basic indexing for the table // enable indexing to a non-existant index table byte[] family = new byte[] { 'a' }; ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME); fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); builder.addIndexGroup(fam1); builder.build(htd); // create the region + its WAL HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); region0.close(); region0.getWAL().close(); WALFactory walFactory = new WALFactory(this.conf, null, "localhost,1234"); WAL wal = createWAL(this.conf, walFactory); RegionServerServices mockRS = Mockito.mock(RegionServerServices.class); // mock out some of the internals of the RSS, so we can run CPs Mockito.when(mockRS.getWAL(null)).thenReturn(wal); RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class); Mockito.when(mockRS.getRegionServerAccounting()).thenReturn(rsa); ServerName mockServerName = Mockito.mock(ServerName.class); Mockito.when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234"); Mockito.when(mockRS.getServerName()).thenReturn(mockServerName); HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS); region.initialize(); region.getSequenceId().set(0); //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(p); // we should then see the server go down Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(), Mockito.any(Exception.class)); // then create the index table so we are successful on WAL replay CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME); // run the WAL split and setup the region runWALSplit(this.conf, walFactory); WAL wal2 = createWAL(this.conf, walFactory); HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS); // initialize the region - this should replay the WALEdits from the WAL region1.initialize(); // now check to ensure that we wrote to the index table HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME); int indexSize = getKeyValueCount(index); assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize); Get g = new Get(rowkey); final Result result = region1.get(g); assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size()); // cleanup the index table HBaseAdmin admin = UTIL.getHBaseAdmin(); admin.disableTable(INDEX_TABLE_NAME); admin.deleteTable(INDEX_TABLE_NAME); admin.close(); }
Example 12
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDataJanitorRegionScanner() throws Exception { String tableName = "TestRegionScanner"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, TimeUnit.HOURS.toMillis(3)); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); for (int i = 1; i <= 8; i++) { for (int k = 1; k <= i; k++) { Put p = new Put(Bytes.toBytes(i)); p.add(familyBytes, columnBytes, V[k], Bytes.toBytes(V[k])); region.put(p); } } List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, the coprocessor should drop all KeyValues with timestamps in the invalid set LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString()); Region.FlushResult flushResult = region.flushcache(true, false); Assert.assertTrue("Unexpected flush result: " + flushResult, flushResult.isFlushSucceeded()); // now a normal scan should only return the valid rows // do not use a filter here to test that cleanup works on flush Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // first returned value should be "4" with version "4" results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 4, new long[]{V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 5, new long[] {V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 6, new long[]{V[6], V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 7, new long[]{V[6], V[4]}); results.clear(); assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 8, new long[] {V[8], V[6], V[4]}); } finally { region.close(); } }
Example 13
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDataJanitorRegionScanner() throws Exception { String tableName = "TestRegionScanner"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, TimeUnit.HOURS.toMillis(3)); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); for (int i = 1; i <= 8; i++) { for (int k = 1; k <= i; k++) { Put p = new Put(Bytes.toBytes(i)); p.addColumn(familyBytes, columnBytes, V[k], Bytes.toBytes(V[k])); region.put(p); } } List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, the coprocessor should drop all KeyValues with timestamps in the invalid set LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString()); FlushResultImpl flushResult = region.flushcache(true, false, new FlushLifeCycleTracker() { }); Assert.assertTrue("Unexpected flush result: " + flushResult, flushResult.isFlushSucceeded()); // now a normal scan should only return the valid rows // do not use a filter here to test that cleanup works on flush Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // first returned value should be "4" with version "4" results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 4, new long[]{V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 5, new long[] {V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 6, new long[]{V[6], V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 7, new long[]{V[6], V[4]}); results.clear(); assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 8, new long[] {V[8], V[6], V[4]}); } finally { region.close(); } }
Example 14
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDeleteFiltering() throws Exception { String tableName = "TestDeleteFiltering"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, 0); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); byte[] row = Bytes.toBytes(1); for (int i = 4; i < V.length; i++) { Put p = new Put(row); p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i])); region.put(p); } // delete from the third entry back // take that cell's timestamp + 1 to simulate a delete in a new tx long deleteTs = V[5] + 1; Delete d = new Delete(row, deleteTs); LOG.info("Issuing delete at timestamp " + deleteTs); // row deletes are not yet supported (TransactionAwareHTable normally handles this) d.deleteColumns(familyBytes, columnBytes); region.delete(d); List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, we should drop the deleted version, but not the others LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString()); region.flushcache(true, false); // now a normal scan should return row with versions at: V[8], V[6]. // V[7] is invalid and V[5] and prior are deleted. Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // should be only one row assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 1, new long[]{V[8], V[6], deleteTs}, new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]}); } finally { region.close(); } }
Example 15
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDataJanitorRegionScanner() throws Exception { String tableName = "TestRegionScanner"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, TimeUnit.HOURS.toMillis(3)); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); for (int i = 1; i <= 8; i++) { for (int k = 1; k <= i; k++) { Put p = new Put(Bytes.toBytes(i)); p.add(familyBytes, columnBytes, V[k], Bytes.toBytes(V[k])); region.put(p); } } List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, the coprocessor should drop all KeyValues with timestamps in the invalid set LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString()); Region.FlushResult flushResult = region.flushcache(true, false); Assert.assertTrue("Unexpected flush result: " + flushResult, flushResult.isFlushSucceeded()); // now a normal scan should only return the valid rows // do not use a filter here to test that cleanup works on flush Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // first returned value should be "4" with version "4" results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 4, new long[]{V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 5, new long[] {V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 6, new long[]{V[6], V[4]}); results.clear(); assertTrue(regionScanner.next(results)); assertKeyValueMatches(results, 7, new long[]{V[6], V[4]}); results.clear(); assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 8, new long[] {V[8], V[6], V[4]}); } finally { region.close(); } }
Example 16
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPreExistingData() throws Exception { String tableName = "TestPreExistingData"; byte[] familyBytes = Bytes.toBytes("f"); long ttlMillis = TimeUnit.DAYS.toMillis(14); HRegion region = createRegion(tableName, familyBytes, ttlMillis); try { region.initialize(); // timestamps for pre-existing, non-transactional data long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS; long older = now - ttlMillis / 2; long newer = now - ttlMillis / 3; // timestamps for transactional data long nowTx = txVisibilityState.getVisibilityUpperBound(); long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS; long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS; Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); ttls.put(familyBytes, ttlMillis); List<Cell> cells = new ArrayList<>(); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11"))); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32"))); // Write non-transactional and transactional data for (Cell c : cells) { region.put(new Put(c.getRow()).add(c.getFamily(), c.getQualifier(), c.getTimestamp(), c.getValue())); } Scan rawScan = new Scan(); rawScan.setMaxVersions(); Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState); Scan txScan = new Scan(); txScan.setMaxVersions(); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // read all back with raw scanner scanAndAssert(region, cells, rawScan); // read all back with transaction filter scanAndAssert(region, cells, txScan); // force a flush to clear the memstore region.flushcache(); scanAndAssert(region, cells, txScan); // force a major compaction to remove any expired cells region.compactStores(true); scanAndAssert(region, cells, txScan); // Reduce TTL, this should make cells with timestamps older and olderTx expire long newTtl = ttlMillis / 2 - 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // Raw scan should still give all cells scanAndAssert(region, cells, rawScan); // However, tx scan should not return expired cells scanAndAssert(region, select(cells, 1, 3, 5), txScan); region.flushcache(); scanAndAssert(region, cells, rawScan); // force a major compaction to remove any expired cells region.compactStores(true); // This time raw scan too should not return expired cells, as they would be dropped during major compaction scanAndAssert(region, select(cells, 1, 3, 5), rawScan); // Reduce TTL again to 1 ms, this should expire all cells newTtl = 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // force a major compaction to remove expired cells region.compactStores(true); // This time raw scan should not return any cells, as all cells have expired. scanAndAssert(region, Collections.<Cell>emptyList(), rawScan); } finally { region.close(); } }
Example 17
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDeleteFiltering() throws Exception { String tableName = "TestDeleteFiltering"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, 0); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); byte[] row = Bytes.toBytes(1); for (int i = 4; i < V.length; i++) { Put p = new Put(row); p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i])); region.put(p); } // delete from the third entry back // take that cell's timestamp + 1 to simulate a delete in a new tx long deleteTs = V[5] + 1; Delete d = new Delete(row, deleteTs); LOG.info("Issuing delete at timestamp " + deleteTs); // row deletes are not yet supported (TransactionAwareHTable normally handles this) d.deleteColumns(familyBytes, columnBytes, deleteTs); region.delete(d); List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, we should drop the deleted version, but not the others LOG.info("Flushing region " + region.getRegionNameAsString()); region.flushcache(); // now a normal scan should return row with versions at: V[8], V[6]. // V[7] is invalid and V[5] and prior are deleted. Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // should be only one row assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 1, new long[]{V[8], V[6], deleteTs}, new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]}); } finally { region.close(); } }
Example 18
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDeleteFiltering() throws Exception { String tableName = "TestDeleteFiltering"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, 0); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); byte[] row = Bytes.toBytes(1); for (int i = 4; i < V.length; i++) { Put p = new Put(row); p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i])); region.put(p); } // delete from the third entry back // take that cell's timestamp + 1 to simulate a delete in a new tx long deleteTs = V[5] + 1; Delete d = new Delete(row, deleteTs); LOG.info("Issuing delete at timestamp " + deleteTs); // row deletes are not yet supported (TransactionAwareHTable normally handles this) d.deleteColumns(familyBytes, columnBytes); region.delete(d); List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, we should drop the deleted version, but not the others LOG.info("Flushing region " + region.getRegionNameAsString()); region.flushcache(); // now a normal scan should return row with versions at: V[8], V[6]. // V[7] is invalid and V[5] and prior are deleted. Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // should be only one row assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 1, new long[]{V[8], V[6], deleteTs}, new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]}); } finally { region.close(); } }
Example 19
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Test writing edits into an region, closing it, splitting logs, opening Region again. Verify * seqids. * @throws Exception on failure */ @Test public void testReplayEditsWrittenViaHRegion() throws Exception { final String tableNameStr = "testReplayEditsWrittenViaHRegion"; final RegionInfo hri = RegionInfoBuilder.newBuilder(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)).setSplit(false).build(); final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)); deleteDir(basedir); final TableDescriptor htd = createBasic3FamilyHTD(tableNameStr); //setup basic indexing for the table // enable indexing to a non-existant index table byte[] family = new byte[] { 'a' }; ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME); fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); builder.addIndexGroup(fam1); builder.build(htd); WALFactory walFactory = new WALFactory(this.conf, "localhost,1234"); WAL wal = createWAL(this.conf, walFactory); // create the region + its WAL HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd, wal); // FIXME: Uses private type region0.close(); region0.getWAL().close(); HRegionServer mockRS = Mockito.mock(HRegionServer.class); // mock out some of the internals of the RSS, so we can run CPs when(mockRS.getWAL(null)).thenReturn(wal); RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class); when(mockRS.getRegionServerAccounting()).thenReturn(rsa); ServerName mockServerName = Mockito.mock(ServerName.class); when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234"); when(mockRS.getServerName()).thenReturn(mockServerName); HRegion region = spy(new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS)); region.initialize(); //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(p); // we should then see the server go down Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(), Mockito.any(Exception.class)); // then create the index table so we are successful on WAL replay TestIndexManagementUtil.createIndexTable(UTIL.getAdmin(), INDEX_TABLE_NAME); // run the WAL split and setup the region runWALSplit(this.conf, walFactory); WAL wal2 = createWAL(this.conf, walFactory); HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS); // initialize the region - this should replay the WALEdits from the WAL region1.initialize(); org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(UTIL.getConfiguration()); // now check to ensure that we wrote to the index table Table index = hbaseConn.getTable(org.apache.hadoop.hbase.TableName.valueOf(INDEX_TABLE_NAME)); int indexSize = getKeyValueCount(index); assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize); Get g = new Get(rowkey); final Result result = region1.get(g); assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size()); // cleanup the index table Admin admin = UTIL.getAdmin(); admin.disableTable(TableName.valueOf(INDEX_TABLE_NAME)); admin.deleteTable(TableName.valueOf(INDEX_TABLE_NAME)); admin.close(); }
Example 20
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testDeleteFiltering() throws Exception { String tableName = "TestDeleteFiltering"; byte[] familyBytes = Bytes.toBytes("f"); byte[] columnBytes = Bytes.toBytes("c"); HRegion region = createRegion(tableName, familyBytes, 0); try { region.initialize(); TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get(); LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache)); byte[] row = Bytes.toBytes(1); for (int i = 4; i < V.length; i++) { Put p = new Put(row); p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i])); region.put(p); } // delete from the third entry back // take that cell's timestamp + 1 to simulate a delete in a new tx long deleteTs = V[5] + 1; Delete d = new Delete(row, deleteTs); LOG.info("Issuing delete at timestamp " + deleteTs); // row deletes are not yet supported (TransactionAwareHTable normally handles this) d.deleteColumns(familyBytes, columnBytes); region.delete(d); List<Cell> results = Lists.newArrayList(); // force a flush to clear the data // during flush, we should drop the deleted version, but not the others LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString()); region.flushcache(true, false); // now a normal scan should return row with versions at: V[8], V[6]. // V[7] is invalid and V[5] and prior are deleted. Scan scan = new Scan(); scan.setMaxVersions(10); RegionScanner regionScanner = region.getScanner(scan); // should be only one row assertFalse(regionScanner.next(results)); assertKeyValueMatches(results, 1, new long[]{V[8], V[6], deleteTs}, new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]}); } finally { region.close(); } }