org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: QuorumJournalManager.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void selectInputStreams(Collection<EditLogInputStream> streams, long fromTxnId, boolean inProgressOk) throws IOException { QuorumCall<AsyncLogger, RemoteEditLogManifest> q = loggers.getEditLogManifest(fromTxnId, inProgressOk); Map<AsyncLogger, RemoteEditLogManifest> resps = loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs, "selectInputStreams"); LOG.debug("selectInputStream manifests:\n" + Joiner.on("\n").withKeyValueSeparator(": ").join(resps)); final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64, JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR); for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) { AsyncLogger logger = e.getKey(); RemoteEditLogManifest manifest = e.getValue(); for (RemoteEditLog remoteLog : manifest.getLogs()) { URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId()); EditLogInputStream elis = EditLogFileInputStream.fromUrl( connectionFactory, url, remoteLog.getStartTxId(), remoteLog.getEndTxId(), remoteLog.isInProgress()); allStreams.add(elis); } } JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId); }
Example #2
Source File: QuorumJournalManager.java From big-c with Apache License 2.0 | 5 votes |
@Override public void selectInputStreams(Collection<EditLogInputStream> streams, long fromTxnId, boolean inProgressOk) throws IOException { QuorumCall<AsyncLogger, RemoteEditLogManifest> q = loggers.getEditLogManifest(fromTxnId, inProgressOk); Map<AsyncLogger, RemoteEditLogManifest> resps = loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs, "selectInputStreams"); LOG.debug("selectInputStream manifests:\n" + Joiner.on("\n").withKeyValueSeparator(": ").join(resps)); final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64, JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR); for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) { AsyncLogger logger = e.getKey(); RemoteEditLogManifest manifest = e.getValue(); for (RemoteEditLog remoteLog : manifest.getLogs()) { URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId()); EditLogInputStream elis = EditLogFileInputStream.fromUrl( connectionFactory, url, remoteLog.getStartTxId(), remoteLog.getEndTxId(), remoteLog.isInProgress()); allStreams.add(elis); } } JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId); }
Example #3
Source File: TestDFSUpgrade.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testPreserveEditLogs() throws Exception { conf = new HdfsConfiguration(); conf = UpgradeUtilities.initializeStorageStateConf(1, conf); String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false); log("Normal NameNode upgrade", 1); File[] created = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); for (final File createdDir : created) { List<String> fileNameList = IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE); for (String fileName : fileNameList) { String tmpFileName = fileName + ".tmp"; File existingFile = new File(createdDir, fileName); File tmpFile = new File(createdDir, tmpFileName); Files.move(existingFile.toPath(), tmpFile.toPath()); File newFile = new File(createdDir, fileName); Preconditions.checkState(newFile.createNewFile(), "Cannot create new edits log file in " + createdDir); EditLogFileInputStream in = new EditLogFileInputStream(tmpFile, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false); EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile, (int)tmpFile.length()); out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1); FSEditLogOp logOp = in.readOp(); while (logOp != null) { out.write(logOp); logOp = in.readOp(); } out.setReadyToFlush(); out.flushAndSync(true); out.close(); Files.delete(tmpFile.toPath()); } } cluster = createCluster(); DFSInotifyEventInputStream ieis = cluster.getFileSystem().getInotifyEventStream(0); EventBatch batch = ieis.poll(); Event[] events = batch.getEvents(); assertTrue("Should be able to get transactions before the upgrade.", events.length > 0); assertEquals(events[0].getEventType(), Event.EventType.CREATE); assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade"); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); }
Example #4
Source File: TestDFSUpgrade.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testPreserveEditLogs() throws Exception { conf = new HdfsConfiguration(); conf = UpgradeUtilities.initializeStorageStateConf(1, conf); String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false); log("Normal NameNode upgrade", 1); File[] created = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); for (final File createdDir : created) { List<String> fileNameList = IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE); for (String fileName : fileNameList) { String tmpFileName = fileName + ".tmp"; File existingFile = new File(createdDir, fileName); File tmpFile = new File(createdDir, tmpFileName); Files.move(existingFile.toPath(), tmpFile.toPath()); File newFile = new File(createdDir, fileName); Preconditions.checkState(newFile.createNewFile(), "Cannot create new edits log file in " + createdDir); EditLogFileInputStream in = new EditLogFileInputStream(tmpFile, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false); EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile, (int)tmpFile.length()); out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1); FSEditLogOp logOp = in.readOp(); while (logOp != null) { out.write(logOp); logOp = in.readOp(); } out.setReadyToFlush(); out.flushAndSync(true); out.close(); Files.delete(tmpFile.toPath()); } } cluster = createCluster(); DFSInotifyEventInputStream ieis = cluster.getFileSystem().getInotifyEventStream(0); EventBatch batch = ieis.poll(); Event[] events = batch.getEvents(); assertTrue("Should be able to get transactions before the upgrade.", events.length > 0); assertEquals(events[0].getEventType(), Event.EventType.CREATE); assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade"); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); }
Example #5
Source File: TestEditLog.java From RDFS with Apache License 2.0 | 4 votes |
/** * Tests transaction logging in dfs. */ public void testEditLog() throws IOException { // start a cluster Collection<File> namedirs = null; Collection<File> editsdirs = null; Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(0, conf, numDatanodes, true, true, null, null); cluster.waitActive(); FileSystem fileSys = cluster.getFileSystem(); final FSNamesystem namesystem = cluster.getNameNode().getNamesystem(); int numdirs = 0; try { namedirs = cluster.getNameDirs(); editsdirs = cluster.getNameEditsDirs(); } finally { fileSys.close(); cluster.shutdown(); } for (Iterator it = namedirs.iterator(); it.hasNext(); ) { File dir = (File)it.next(); System.out.println(dir); numdirs++; } FSImage fsimage = namesystem.getFSImage(); FSEditLog editLog = fsimage.getEditLog(); // set small size of flush buffer editLog.setBufferCapacity(2048); editLog.close(); editLog.open(); // Create threads and make them run transactions concurrently. Thread threadId[] = new Thread[numThreads]; for (int i = 0; i < numThreads; i++) { Transactions trans = new Transactions(namesystem, numberTransactions); threadId[i] = new Thread(trans, "TransactionThread-" + i); threadId[i].start(); } // wait for all transactions to get over for (int i = 0; i < numThreads; i++) { try { threadId[i].join(); } catch (InterruptedException e) { i--; // retry } } editLog.close(); // Verify that we can read in all the transactions that we have written. // If there were any corruptions, it is likely that the reading in // of these transactions will throw an exception. // for (Iterator<StorageDirectory> it = fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS); // Start from 0 when loading edit logs. editLog.setStartTransactionId(0); System.out.println("Verifying file: " + editFile); EditLogInputStream is = new EditLogFileInputStream(editFile); FSEditLogLoader loader = new FSEditLogLoader(namesystem); int numEdits = loader.loadFSEdits(is, namesystem.getEditLog().getCurrentTxId()); int numLeases = namesystem.leaseManager.countLease(); System.out.println("Number of outstanding leases " + numLeases); assertEquals(0, numLeases); assertTrue("Verification for " + editFile + " failed. " + "Expected " + (numThreads * 3 * numberTransactions) + " transactions. "+ "Found " + numEdits + " transactions.", numEdits == numThreads * 3 * numberTransactions); } }