Java Code Examples for org.apache.hadoop.hbase.wal.WAL#Entry
The following examples show how to use
org.apache.hadoop.hbase.wal.WAL#Entry .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SepConsumerTest.java From hbase-indexer with Apache License 2.0 | 6 votes |
@Test public void testReplicateLogEntries_SingleWALEditForMultipleRows() throws IOException { byte[] rowKeyA = Bytes.toBytes("A"); byte[] rowKeyB = Bytes.toBytes("B"); byte[] data = Bytes.toBytes("data"); Cell kvA = new KeyValue(rowKeyA, DATA_COLFAM, PAYLOAD_QUALIFIER, data); Cell kvB = new KeyValue(rowKeyB, DATA_COLFAM, PAYLOAD_QUALIFIER, data); WAL.Entry entry = createHlogEntry(TABLE_NAME, kvA, kvB); replicateWALEntry(new WAL.Entry[]{entry}); SepEvent expectedEventA = SepEvent.create(TABLE_NAME, rowKeyA, Lists.newArrayList(kvA), Bytes.toBytes("data")); SepEvent expectedEventB = SepEvent.create(TABLE_NAME, rowKeyB, Lists.newArrayList(kvB), Bytes.toBytes("data")); verify(eventListener).processEvents(Lists.newArrayList(expectedEventA, expectedEventB)); }
Example 2
Source File: TestHRegionReplayEvents.java From hbase with Apache License 2.0 | 6 votes |
/** * Test the case where the secondary region replica is not in reads enabled state because it is * waiting for a flush or region open marker from primary region. Replaying CANNOT_FLUSH * flush marker entry should restore the reads enabled status in the region and allow the reads * to continue. */ @Test public void testReplayingFlushRequestRestoresReadsEnabledState() throws IOException { disableReads(secondaryRegion); // Test case 1: Test that replaying CANNOT_FLUSH request marker assuming this came from // triggered flush restores readsEnabled primaryRegion.flushcache(true, true, FlushLifeCycleTracker.DUMMY); reader = createWALReaderForPrimary(); while (true) { WAL.Entry entry = reader.next(); if (entry == null) { break; } FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flush != null) { secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getSequenceId()); } } // now reads should be enabled secondaryRegion.get(new Get(Bytes.toBytes(0))); }
Example 3
Source File: TestSequenceIdMonotonicallyIncreasing.java From hbase with Apache License 2.0 | 6 votes |
private long getMaxSeqId(HRegionServer rs, RegionInfo region) throws IOException { Path walFile = ((AbstractFSWAL<?>) rs.getWAL(null)).getCurrentFileName(); long maxSeqId = -1L; try (WAL.Reader reader = WALFactory.createReader(UTIL.getTestFileSystem(), walFile, UTIL.getConfiguration())) { for (;;) { WAL.Entry entry = reader.next(); if (entry == null) { break; } if (Bytes.equals(region.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName())) { maxSeqId = Math.max(maxSeqId, entry.getKey().getSequenceId()); } } } return maxSeqId; }
Example 4
Source File: SystemCatalogWALEntryFilter.java From phoenix with Apache License 2.0 | 6 votes |
@Override public WAL.Entry filter(WAL.Entry entry) { //if the WAL.Entry's table isn't System.Catalog or System.Child_Link, it auto-passes this filter //TODO: when Phoenix drops support for pre-1.3 versions of HBase, redo as a WALCellFilter if (!SchemaUtil.isMetaTable(entry.getKey().getTableName().getName())){ return entry; } List<Cell> cells = entry.getEdit().getCells(); List<Cell> cellsToRemove = Lists.newArrayList(); for (Cell cell : cells) { if (!isTenantRowCell(cell)){ cellsToRemove.add(cell); } } cells.removeAll(cellsToRemove); if (cells.size() > 0) { return entry; } else { return null; } }
Example 5
Source File: ProtobufLogTestHelper.java From hbase with Apache License 2.0 | 6 votes |
public static void doRead(ProtobufLogReader reader, boolean withTrailer, RegionInfo hri, TableName tableName, int columnCount, int recordCount, byte[] row, long timestamp) throws IOException { if (withTrailer) { assertNotNull(reader.trailer); } else { assertNull(reader.trailer); } for (int i = 0; i < recordCount; ++i) { WAL.Entry entry = reader.next(); assertNotNull(entry); assertEquals(columnCount, entry.getEdit().size()); assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName()); assertEquals(tableName, entry.getKey().getTableName()); int idx = 0; for (Cell val : entry.getEdit().getCells()) { assertTrue(Bytes.equals(row, 0, row.length, val.getRowArray(), val.getRowOffset(), val.getRowLength())); assertArrayEquals(toValue(i, idx), CellUtil.cloneValue(val)); idx++; } } assertNull(reader.next()); }
Example 6
Source File: SepConsumerTest.java From hbase-indexer with Apache License 2.0 | 5 votes |
private WAL.Entry createHlogEntry(byte[] tableName, long writeTime, Cell... keyValues) { WAL.Entry entry = mock(WAL.Entry.class, Mockito.RETURNS_DEEP_STUBS); when(entry.getEdit().getCells()).thenReturn(Lists.newArrayList(keyValues)); when(entry.getKey().getTablename()).thenReturn(TableName.valueOf(tableName)); when(entry.getKey().getWriteTime()).thenReturn(writeTime); when(entry.getKey().getEncodedRegionName()).thenReturn(encodedRegionName); when(entry.getKey().getClusterIds()).thenReturn(clusterUUIDs); return entry; }
Example 7
Source File: ChainWALEmptyEntryFilter.java From hbase with Apache License 2.0 | 5 votes |
@Override public WAL.Entry filter(WAL.Entry entry) { entry = super.filter(entry); if (filterEmptyEntry && entry != null && entry.getEdit().isEmpty()) { return null; } return entry; }
Example 8
Source File: TestHRegionReplayEvents.java From hbase with Apache License 2.0 | 5 votes |
/** * Test the case where the secondary region replica is not in reads enabled state because it is * waiting for a flush or region open marker from primary region. Replaying region open event * entry from primary should restore the reads enabled status in the region and allow the reads * to continue. */ @Test public void testReplayingRegionOpenEventRestoresReadsEnabledState() throws IOException { // Test case 3: Test that replaying region open event markers restores readsEnabled disableReads(secondaryRegion); primaryRegion.close(); primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null); reader = createWALReaderForPrimary(); while (true) { WAL.Entry entry = reader.next(); if (entry == null) { break; } RegionEventDescriptor regionEventDesc = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); if (regionEventDesc != null) { secondaryRegion.replayWALRegionEventMarker(regionEventDesc); } } // now reads should be enabled secondaryRegion.get(new Get(Bytes.toBytes(0))); }
Example 9
Source File: TestHRegionReplayEvents.java From hbase with Apache License 2.0 | 5 votes |
/** * Tests the case where a request for flush cache is sent to the region, but region cannot flush. * It should write the flush request marker instead. */ @Test public void testWriteFlushRequestMarker() throws IOException { // primary region is empty at this point. Request a flush with writeFlushRequestWalMarker=false FlushResultImpl result = primaryRegion.flushcache(true, false, FlushLifeCycleTracker.DUMMY); assertNotNull(result); assertEquals(FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, result.result); assertFalse(result.wroteFlushWalMarker); // request flush again, but this time with writeFlushRequestWalMarker = true result = primaryRegion.flushcache(true, true, FlushLifeCycleTracker.DUMMY); assertNotNull(result); assertEquals(FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, result.result); assertTrue(result.wroteFlushWalMarker); List<FlushDescriptor> flushes = Lists.newArrayList(); reader = createWALReaderForPrimary(); while (true) { WAL.Entry entry = reader.next(); if (entry == null) { break; } FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flush != null) { flushes.add(flush); } } assertEquals(1, flushes.size()); assertNotNull(flushes.get(0)); assertEquals(FlushDescriptor.FlushAction.CANNOT_FLUSH, flushes.get(0).getAction()); }
Example 10
Source File: SepConsumerTest.java From hbase-indexer with Apache License 2.0 | 5 votes |
@Test public void testReplicateLogEntries_EntryTimestampBeforeSubscriptionTimestamp() throws IOException { byte[] rowKey = Bytes.toBytes("rowkey"); byte[] payloadDataBeforeTimestamp = Bytes.toBytes("payloadBeforeTimestamp"); byte[] payloadDataOnTimestamp = Bytes.toBytes("payloadOnTimestamp"); byte[] payloadDataAfterTimestamp = Bytes.toBytes("payloadAfterTimestamp"); WAL.Entry hlogEntryBeforeTimestamp = createHlogEntry(TABLE_NAME, SUBSCRIPTION_TIMESTAMP - 1, new KeyValue(rowKey, DATA_COLFAM, PAYLOAD_QUALIFIER, payloadDataBeforeTimestamp)); WAL.Entry hlogEntryOnTimestamp = createHlogEntry(TABLE_NAME, SUBSCRIPTION_TIMESTAMP, new KeyValue(rowKey, DATA_COLFAM, PAYLOAD_QUALIFIER, payloadDataOnTimestamp)); WAL.Entry hlogEntryAfterTimestamp = createHlogEntry(TABLE_NAME, SUBSCRIPTION_TIMESTAMP + 1, new KeyValue(rowKey, DATA_COLFAM, PAYLOAD_QUALIFIER, payloadDataAfterTimestamp)); replicateWALEntry(new WAL.Entry[]{hlogEntryBeforeTimestamp}); replicateWALEntry(new WAL.Entry[]{hlogEntryOnTimestamp}); replicateWALEntry(new WAL.Entry[]{hlogEntryAfterTimestamp}); SepEvent expectedEventOnTimestamp = SepEvent.create(TABLE_NAME, rowKey, hlogEntryOnTimestamp.getEdit().getCells(), payloadDataOnTimestamp); SepEvent expectedEventAfterTimestamp = SepEvent.create(TABLE_NAME, rowKey, hlogEntryAfterTimestamp.getEdit().getCells(), payloadDataAfterTimestamp); // Event should be published for data on or after the subscription timestamp, but not before verify(eventListener, times(1)).processEvents(Lists.newArrayList(expectedEventOnTimestamp)); verify(eventListener, times(1)).processEvents(Lists.newArrayList(expectedEventAfterTimestamp)); verifyNoMoreInteractions(eventListener); }
Example 11
Source File: TestDurability.java From hbase with Apache License 2.0 | 5 votes |
private void verifyWALCount(WALFactory wals, WAL log, int expected) throws Exception { Path walPath = AbstractFSWALProvider.getCurrentFileName(log); WAL.Reader reader = wals.createReader(FS, walPath); int count = 0; WAL.Entry entry = new WAL.Entry(); while (reader.next(entry) != null) { count++; } reader.close(); assertEquals(expected, count); }
Example 12
Source File: ProtobufLogTestHelper.java From hbase with Apache License 2.0 | 5 votes |
public static void doWrite(WAL wal, RegionInfo hri, TableName tableName, int columnCount, int recordCount, byte[] row, long timestamp, MultiVersionConcurrencyControl mvcc) throws IOException { for (int i = 0; i < recordCount; i++) { WAL.Entry entry = generateEdit(i, hri, tableName, row, columnCount, timestamp, mvcc); wal.appendData(hri, entry.getKey(), entry.getEdit()); } wal.sync(); }
Example 13
Source File: AbstractTestDLS.java From hbase with Apache License 2.0 | 5 votes |
private int countWAL(Path log, FileSystem fs, Configuration conf) throws IOException { int count = 0; try (WAL.Reader in = WALFactory.createReader(fs, log, conf)) { WAL.Entry e; while ((e = in.next()) != null) { if (!WALEdit.isMetaEditFamily(e.getEdit().getCells().get(0))) { count++; } } } return count; }
Example 14
Source File: TestReplicationSource.java From hbase with Apache License 2.0 | 5 votes |
/** * Sanity check that we can move logs around while we are reading * from them. Should this test fail, ReplicationSource would have a hard * time reading logs that are being archived. */ @Test public void testLogMoving() throws Exception{ Path logPath = new Path(logDir, "log"); if (!FS.exists(logDir)) FS.mkdirs(logDir); if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir); WALProvider.Writer writer = WALFactory.createWALWriter(FS, logPath, TEST_UTIL.getConfiguration()); for(int i = 0; i < 3; i++) { byte[] b = Bytes.toBytes(Integer.toString(i)); KeyValue kv = new KeyValue(b,b,b); WALEdit edit = new WALEdit(); edit.add(kv); WALKeyImpl key = new WALKeyImpl(b, TableName.valueOf(b), 0, 0, HConstants.DEFAULT_CLUSTER_ID); writer.append(new WAL.Entry(key, edit)); writer.sync(false); } writer.close(); WAL.Reader reader = WALFactory.createReader(FS, logPath, TEST_UTIL.getConfiguration()); WAL.Entry entry = reader.next(); assertNotNull(entry); Path oldLogPath = new Path(oldLogDir, "log"); FS.rename(logPath, oldLogPath); entry = reader.next(); assertNotNull(entry); entry = reader.next(); entry = reader.next(); assertNull(entry); reader.close(); }
Example 15
Source File: SepConsumerTest.java From hbase-indexer with Apache License 2.0 | 4 votes |
private void replicateWALEntry(WAL.Entry[] entries) throws IOException { ReplicationProtbufUtil.replicateWALEntry(sepConsumer, entries); }
Example 16
Source File: SepConsumerTest.java From hbase-indexer with Apache License 2.0 | 4 votes |
@Test public void testReplicateLogEntries() throws IOException { byte[] rowKey = Bytes.toBytes("rowkey"); byte[] payloadData = Bytes.toBytes("payload"); WAL.Entry hlogEntry = createHlogEntry(TABLE_NAME, new KeyValue(rowKey, DATA_COLFAM, PAYLOAD_QUALIFIER, payloadData)); replicateWALEntry(new WAL.Entry[]{hlogEntry}); SepEvent expectedSepEvent = SepEvent.create(TABLE_NAME, rowKey, hlogEntry.getEdit().getCells(), payloadData); verify(eventListener).processEvents(Lists.newArrayList(expectedSepEvent)); }
Example 17
Source File: TestHRegionReplayEvents.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testReplayFlushSeqIds() throws IOException { // load some data to primary and flush int start = 0; LOG.info("-- Writing some data to primary from " + start + " to " + (start+100)); putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families); LOG.info("-- Flushing primary, creating 3 files for 3 stores"); primaryRegion.flush(true); // now replay the flush marker reader = createWALReaderForPrimary(); long flushSeqId = -1; LOG.info("-- Replaying flush events in secondary"); while (true) { WAL.Entry entry = reader.next(); if (entry == null) { break; } FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { LOG.info("-- Replaying flush start in secondary"); secondaryRegion.replayWALFlushStartMarker(flushDesc); flushSeqId = flushDesc.getFlushSequenceNumber(); } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { LOG.info("-- Replaying flush commit in secondary"); secondaryRegion.replayWALFlushCommitMarker(flushDesc); assertEquals(flushSeqId, flushDesc.getFlushSequenceNumber()); } } // else do not replay } // TODO: what to do with this? // assert that the newly picked up flush file is visible long readPoint = secondaryRegion.getMVCC().getReadPoint(); assertEquals(flushSeqId, readPoint); // after replay verify that everything is still visible verifyData(secondaryRegion, 0, 100, cq, families); }
Example 18
Source File: TestWALEntryStream.java From hbase with Apache License 2.0 | 4 votes |
private String getRow(WAL.Entry entry) { Cell cell = entry.getEdit().getCells().get(0); return Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); }
Example 19
Source File: SepConsumerTest.java From hbase-indexer with Apache License 2.0 | 4 votes |
private WAL.Entry createHlogEntry(byte[] tableName, Cell... keyValues) { return createHlogEntry(tableName, SUBSCRIPTION_TIMESTAMP + 1, keyValues); }
Example 20
Source File: WALProcedurePrettyPrinter.java From hbase with Apache License 2.0 | 4 votes |
@Override protected int doWork() throws Exception { Path path = new Path(file); FileSystem fs = path.getFileSystem(conf); try (WAL.Reader reader = WALFactory.createReader(fs, path, conf)) { for (;;) { WAL.Entry entry = reader.next(); if (entry == null) { return 0; } WALKey key = entry.getKey(); WALEdit edit = entry.getEdit(); long sequenceId = key.getSequenceId(); long writeTime = key.getWriteTime(); out.println( String.format(KEY_TMPL, sequenceId, FORMATTER.format(Instant.ofEpochMilli(writeTime)))); for (Cell cell : edit.getCells()) { Map<String, Object> op = WALPrettyPrinter.toStringMap(cell); if (!Bytes.equals(PROC_FAMILY, 0, PROC_FAMILY.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) { // We could have cells other than procedure edits, for example, a flush marker WALPrettyPrinter.printCell(out, op, false); continue; } long procId = Bytes.toLong(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); out.println("pid=" + procId + ", type=" + op.get("type") + ", column=" + op.get("family") + ":" + op.get("qualifier")); if (cell.getType() == Cell.Type.Put) { if (cell.getValueLength() > 0) { // should be a normal put Procedure<?> proc = ProcedureUtil.convertToProcedure(ProcedureProtos.Procedure.parser() .parseFrom(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); out.println("\t" + proc.toStringDetails()); } else { // should be a 'delete' put out.println("\tmark deleted"); } } out.println("cell total size sum: " + cell.heapSize()); } out.println("edit heap size: " + edit.heapSize()); out.println("position: " + reader.getPosition()); } } }