Java Code Examples for org.apache.hadoop.hbase.wal.WALSplitter#split()
The following examples show how to use
org.apache.hadoop.hbase.wal.WALSplitter#split() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java From phoenix with Apache License 2.0 | 5 votes |
private Path runWALSplit(final Configuration c, WALFactory walFactory) throws IOException { FileSystem fs = FileSystem.get(c); List<Path> splits = WALSplitter.split(this.hbaseRootDir, new Path(this.logDir, "localhost,1234"), this.oldLogDir, fs, c, walFactory); // Split should generate only 1 file since there's only 1 region assertEquals("splits=" + splits, 1, splits.size()); // Make sure the file exists assertTrue(fs.exists(splits.get(0))); LOG.info("Split file=" + splits.get(0)); return splits.get(0); }
Example 2
Source File: TestWALObserver.java From hbase with Apache License 2.0 | 5 votes |
private Path runWALSplit(final Configuration c) throws IOException { List<Path> splits = WALSplitter.split( hbaseRootDir, logDir, oldLogDir, FileSystem.get(c), c, wals); // Split should generate only 1 file since there's only 1 region assertEquals(1, splits.size()); // Make sure the file exists assertTrue(fs.exists(splits.get(0))); LOG.info("Split file=" + splits.get(0)); return splits.get(0); }
Example 3
Source File: AbstractTestWALReplay.java From hbase with Apache License 2.0 | 5 votes |
/** * Run the split. Verify only single split file made. * @return The single split file made */ private Path runWALSplit(final Configuration c) throws IOException { List<Path> splits = WALSplitter.split( hbaseRootDir, logDir, oldLogDir, FileSystem.get(c), c, wals); // Split should generate only 1 file since there's only 1 region assertEquals("splits=" + splits, 1, splits.size()); // Make sure the file exists assertTrue(fs.exists(splits.get(0))); LOG.info("Split file=" + splits.get(0)); return splits.get(0); }
Example 4
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java From phoenix with Apache License 2.0 | 5 votes |
private Path runWALSplit(final Configuration c, WALFactory walFactory) throws IOException { FileSystem fs = FileSystem.get(c); List<Path> splits = WALSplitter.split(this.hbaseRootDir, new Path(this.logDir, "localhost,1234"), this.oldLogDir, fs, c, walFactory); // Split should generate only 1 file since there's only 1 region assertEquals("splits=" + splits, 1, splits.size()); // Make sure the file exists assertTrue(fs.exists(splits.get(0))); LOGGER.info("Split file=" + splits.get(0)); return splits.get(0); }
Example 5
Source File: TestLogRollAbort.java From hbase with Apache License 2.0 | 4 votes |
/** * Tests the case where a RegionServer enters a GC pause, * comes back online after the master declared it dead and started to split. * Want log rolling after a master split to fail. See HBASE-2312. */ @Test public void testLogRollAfterSplitStart() throws IOException { LOG.info("Verify wal roll after split starts will fail."); String logName = ServerName.valueOf("testLogRollAfterSplitStart", 16010, System.currentTimeMillis()).toString(); Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName)); final WALFactory wals = new WALFactory(conf, logName); try { // put some entries in an WAL TableName tableName = TableName.valueOf(this.getClass().getName()); RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build(); WAL log = wals.getWAL(regionInfo); MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1); int total = 20; for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(Bytes.toBytes("column"), 0); log.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), mvcc, scopes), kvs); } // Send the data to HDFS datanodes and close the HDFS writer log.sync(); ((AbstractFSWAL<?>) log).replaceWriter(((FSHLog)log).getOldPath(), null, null); // code taken from MasterFileSystem.getLogDirs(), which is called from // MasterFileSystem.splitLog() handles RS shutdowns (as observed by the splitting process) // rename the directory so a rogue RS doesn't create more WALs Path rsSplitDir = thisTestsDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); if (!fs.rename(thisTestsDir, rsSplitDir)) { throw new IOException("Failed fs.rename for log split: " + thisTestsDir); } LOG.debug("Renamed region directory: " + rsSplitDir); LOG.debug("Processing the old log files."); WALSplitter.split(HBASELOGDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals); LOG.debug("Trying to roll the WAL."); try { log.rollWriter(); Assert.fail("rollWriter() did not throw any exception."); } catch (IOException ioe) { if (ioe.getCause() instanceof FileNotFoundException) { LOG.info("Got the expected exception: ", ioe.getCause()); } else { Assert.fail("Unexpected exception: " + ioe); } } } finally { wals.close(); if (fs.exists(thisTestsDir)) { fs.delete(thisTestsDir, true); } } }