org.apache.hadoop.hdfs.DFSInputStream Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.DFSInputStream.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DFSByteBufferReader.java From indexr with Apache License 2.0 | 6 votes |
private void tryGetLocalFile() { if (tryGetLocalFileTimes >= TRY_GET_LOCAL_FILE_LIMIT) { return; } if (isSingleBlock && HDFS_READ_HACK_ENABLE) { try { InputStream is = input.getWrappedStream(); if (is instanceof DFSInputStream) { BlockReader blockReader = MemoryUtil.getDFSInputStream_blockReader(is); if (blockReader != null && blockReader.isShortCircuit()) { localFile = MemoryUtil.getBlockReaderLocal_dataIn(blockReader); } } } catch (Throwable e) { logger.debug("HDFS READ HACK failed.", e); } } tryGetLocalFileTimes++; }
Example #2
Source File: WaitingRoom.java From RDFS with Apache License 2.0 | 6 votes |
private void addDirToMaps(Path dir, DFSClient client) throws IOException { FileStatus[] children = dfs.listStatus(dir); if (children == null) return; for (FileStatus child: children) { if (!child.isDir()) { // get block ids for file Path path = child.getPath(); // paths will be unique fileMap.put(path, new ArrayList<Long>()); DFSInputStream stm = client.open(child.getPath().toUri().getPath()); LocatedBlocks blocks = stm.fetchLocatedBlocks(); stm.close(); for (int i = 0; i < blocks.locatedBlockCount(); i++) { Long blockId = blocks.get(i).getBlock().getBlockId(); fileMap.get(path).add(blockId); // add to file block list blockRefMap.put(blockId, null); // mark as unrefereced } } else { // If child is a directory, recurse on it addDirToMaps(child.getPath(), client); } } }
Example #3
Source File: TestShortCircuitCache.java From big-c with Apache License 2.0 | 6 votes |
private static Configuration createShortCircuitConf(String testName, TemporarySocketDirectory sockDir) { Configuration conf = new Configuration(); conf.set(DFS_CLIENT_CONTEXT, testName); conf.setLong(DFS_BLOCK_SIZE_KEY, 4096); conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), testName).getAbsolutePath()); conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false); conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false); DFSInputStream.tcpReadsDisabledForTesting = true; DomainSocket.disableBindPathValidation(); Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null)); return conf; }
Example #4
Source File: TestShortCircuitCache.java From hadoop with Apache License 2.0 | 6 votes |
private static Configuration createShortCircuitConf(String testName, TemporarySocketDirectory sockDir) { Configuration conf = new Configuration(); conf.set(DFS_CLIENT_CONTEXT, testName); conf.setLong(DFS_BLOCK_SIZE_KEY, 4096); conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), testName).getAbsolutePath()); conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false); conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false); DFSInputStream.tcpReadsDisabledForTesting = true; DomainSocket.disableBindPathValidation(); Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null)); return conf; }
Example #5
Source File: DFSClientCache.java From big-c with Apache License 2.0 | 5 votes |
private CacheLoader<DFSInputStreamCaheKey, FSDataInputStream> inputStreamLoader() { return new CacheLoader<DFSInputStreamCaheKey, FSDataInputStream>() { @Override public FSDataInputStream load(DFSInputStreamCaheKey key) throws Exception { DFSClient client = getDfsClient(key.userId); DFSInputStream dis = client.open(key.inodePath); return client.createWrappedInputStream(dis); } }; }
Example #6
Source File: DFSByteBufferReader.java From indexr with Apache License 2.0 | 5 votes |
public static ByteBufferReader open(org.apache.hadoop.fs.FileSystem fileSystem, org.apache.hadoop.fs.Path path, long size, int blockCount, long readBase) throws IOException { FSDataInputStream stream = fileSystem.open(path); if (HDFS_READ_HACK_ENABLE) { if (IS_SHORT_CIRCUIT_LOCAL_READ_ENABLE == null) { IS_SHORT_CIRCUIT_LOCAL_READ_ENABLE = Boolean.parseBoolean(fileSystem.getConf().get("dfs.client.read.shortcircuit", "false")); } if (IS_SHORT_CIRCUIT_LOCAL_READ_ENABLE) { InputStream is = stream.getWrappedStream(); if (is instanceof DFSInputStream) { // Close check sum if short circuit local read is enabled. MemoryUtil.setDFSInputStream_verifyChecksum(is, false); logger.debug("disable read check sum for: {}", path); } } } return new DFSByteBufferReader( path.toString(), stream, size, readBase, stream, blockCount); }
Example #7
Source File: TestStreamFile.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testDoGetShouldCloseTheDFSInputStreamIfResponseGetOutPutStreamThrowsAnyException() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1) .build(); try { Path testFile = createFile(); setUpForDoGetTest(cluster, testFile); Mockito.doThrow(new IOException()).when(mockHttpServletResponse) .getOutputStream(); DFSInputStream fsMock = Mockito.mock(DFSInputStream.class); Mockito.doReturn(fsMock).when(clientMock).open(testFile.toString()); Mockito.doReturn(Long.valueOf(4)).when(fsMock).getFileLength(); try { sfile.doGet(mockHttpServletRequest, mockHttpServletResponse); fail("Not throwing the IOException"); } catch (IOException e) { Mockito.verify(clientMock, Mockito.atLeastOnce()).close(); } } finally { cluster.shutdown(); } }
Example #8
Source File: TestFsck.java From big-c with Apache License 2.0 | 5 votes |
public void checkSalvagedRemains() throws IOException { int chainIdx = 0; HdfsFileStatus status = dfsClient.getFileInfo(name); long length = status.getLen(); int numBlocks = (int)((length + blockSize - 1) / blockSize); DFSInputStream in = null; byte[] blockBuffer = new byte[blockSize]; try { for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) { if (blocksToCorrupt.contains(blockIdx)) { if (in != null) { in.close(); in = null; } continue; } if (in == null) { in = dfsClient.open("/lost+found" + name + "/" + chainIdx); chainIdx++; } int len = blockBuffer.length; if (blockIdx == (numBlocks - 1)) { // The last block might not be full-length len = (int)(in.getFileLength() % blockSize); if (len == 0) len = blockBuffer.length; } IOUtils.readFully(in, blockBuffer, 0, len); int startIdx = blockIdx * blockSize; for (int i = 0; i < len; i++) { if (initialContents[startIdx + i] != blockBuffer[i]) { throw new IOException("salvaged file " + name + " differed " + "from what we expected on block " + blockIdx); } } } } finally { IOUtils.cleanup(null, in); } }
Example #9
Source File: TestFsck.java From big-c with Apache License 2.0 | 5 votes |
private byte[] cacheInitialContents() throws IOException { HdfsFileStatus status = dfsClient.getFileInfo(name); byte[] content = new byte[(int)status.getLen()]; DFSInputStream in = null; try { in = dfsClient.open(name); IOUtils.readFully(in, content, 0, content.length); } finally { in.close(); } return content; }
Example #10
Source File: TestReadSlowDataNode.java From RDFS with Apache License 2.0 | 5 votes |
public static DFSInputStream findDFSClientInputStream(FSDataInputStream in) throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException { Field inField = FilterInputStream.class.getDeclaredField("in"); inField.setAccessible(true); return (DFSInputStream) inField.get(in); }
Example #11
Source File: ChunkRecordReader.java From pxf with Apache License 2.0 | 5 votes |
/** * Translates the FSDataInputStream into a DFSInputStream. */ private DFSInputStream getInputStream() throws IncompatibleInputStreamException { InputStream inputStream = fileIn.getWrappedStream(); if (inputStream instanceof DFSInputStream) { return (DFSInputStream) inputStream; } else { IOUtils.closeStream(fileIn); throw new IncompatibleInputStreamException(inputStream.getClass()); } }
Example #12
Source File: TestReadSlowDataNode.java From RDFS with Apache License 2.0 | 5 votes |
public static ConcurrentHashMap<DatanodeInfo, DatanodeInfo> getDeadNodes( DFSInputStream in) throws SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException { Field deadNodesField = DFSInputStream.class.getDeclaredField("deadNodes"); deadNodesField.setAccessible(true); return (ConcurrentHashMap<DatanodeInfo, DatanodeInfo>) deadNodesField .get(in); }
Example #13
Source File: Hdfs.java From big-c with Apache License 2.0 | 5 votes |
@SuppressWarnings("deprecation") @Override public HdfsDataInputStream open(Path f, int bufferSize) throws IOException, UnresolvedLinkException { final DFSInputStream dfsis = dfs.open(getUriPath(f), bufferSize, verifyChecksum); return dfs.createWrappedInputStream(dfsis); }
Example #14
Source File: TestStreamFile.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testDoGetShouldCloseTheDFSInputStreamIfResponseGetOutPutStreamThrowsAnyException() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1) .build(); try { Path testFile = createFile(); setUpForDoGetTest(cluster, testFile); Mockito.doThrow(new IOException()).when(mockHttpServletResponse) .getOutputStream(); DFSInputStream fsMock = Mockito.mock(DFSInputStream.class); Mockito.doReturn(fsMock).when(clientMock).open(testFile.toString()); Mockito.doReturn(Long.valueOf(4)).when(fsMock).getFileLength(); try { sfile.doGet(mockHttpServletRequest, mockHttpServletResponse); fail("Not throwing the IOException"); } catch (IOException e) { Mockito.verify(clientMock, Mockito.atLeastOnce()).close(); } } finally { cluster.shutdown(); } }
Example #15
Source File: TestFsck.java From hadoop with Apache License 2.0 | 5 votes |
public void checkSalvagedRemains() throws IOException { int chainIdx = 0; HdfsFileStatus status = dfsClient.getFileInfo(name); long length = status.getLen(); int numBlocks = (int)((length + blockSize - 1) / blockSize); DFSInputStream in = null; byte[] blockBuffer = new byte[blockSize]; try { for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) { if (blocksToCorrupt.contains(blockIdx)) { if (in != null) { in.close(); in = null; } continue; } if (in == null) { in = dfsClient.open("/lost+found" + name + "/" + chainIdx); chainIdx++; } int len = blockBuffer.length; if (blockIdx == (numBlocks - 1)) { // The last block might not be full-length len = (int)(in.getFileLength() % blockSize); if (len == 0) len = blockBuffer.length; } IOUtils.readFully(in, blockBuffer, 0, len); int startIdx = blockIdx * blockSize; for (int i = 0; i < len; i++) { if (initialContents[startIdx + i] != blockBuffer[i]) { throw new IOException("salvaged file " + name + " differed " + "from what we expected on block " + blockIdx); } } } } finally { IOUtils.cleanup(null, in); } }
Example #16
Source File: TestFsck.java From hadoop with Apache License 2.0 | 5 votes |
private byte[] cacheInitialContents() throws IOException { HdfsFileStatus status = dfsClient.getFileInfo(name); byte[] content = new byte[(int)status.getLen()]; DFSInputStream in = null; try { in = dfsClient.open(name); IOUtils.readFully(in, content, 0, content.length); } finally { in.close(); } return content; }
Example #17
Source File: SnapshotClient.java From RDFS with Apache License 2.0 | 5 votes |
public DFSInputStream open(String snapshotId, String src) throws IOException { LocatedBlocksWithMetaInfo blocks[] = getLocatedBlocks(snapshotId, src); // Not strictly correct. block.length = 1 could mean directory with // one file. Might want to add a file specific API. if (blocks == null || blocks.length != 1) { throw new IOException("File at " + src + " doesn't exist in snapshot"); } return client.open(blocks[0]); }
Example #18
Source File: Hdfs.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("deprecation") @Override public HdfsDataInputStream open(Path f, int bufferSize) throws IOException, UnresolvedLinkException { final DFSInputStream dfsis = dfs.open(getUriPath(f), bufferSize, verifyChecksum); return dfs.createWrappedInputStream(dfsis); }
Example #19
Source File: DFSClientCache.java From hadoop with Apache License 2.0 | 5 votes |
private CacheLoader<DFSInputStreamCaheKey, FSDataInputStream> inputStreamLoader() { return new CacheLoader<DFSInputStreamCaheKey, FSDataInputStream>() { @Override public FSDataInputStream load(DFSInputStreamCaheKey key) throws Exception { DFSClient client = getDfsClient(key.userId); DFSInputStream dis = client.open(key.inodePath); return client.createWrappedInputStream(dis); } }; }
Example #20
Source File: HdfsDataInputStream.java From big-c with Apache License 2.0 | 4 votes |
private DFSInputStream getDFSInputStream() { if (in instanceof CryptoInputStream) { return (DFSInputStream) ((CryptoInputStream) in).getWrappedStream(); } return (DFSInputStream) in; }
Example #21
Source File: TestReadSlowDataNode.java From RDFS with Apache License 2.0 | 4 votes |
/** * Test that copy on write for blocks works correctly * * @throws NoSuchFieldException * @throws SecurityException * @throws IllegalAccessException * @throws IllegalArgumentException */ public void testSlowDn() throws IOException, SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException { Configuration conf = new Configuration(); conf.setLong("dfs.min.read.speed.bps", 1024 * 200); MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); FileSystem fs = cluster.getFileSystem(); FSDataInputStream in = null; try { // create a new file, write to it and close it. // Path file1 = new Path("/filestatus.dat"); FSDataOutputStream stm = createFile(fs, file1, 2); writeFile(stm); stm.close(); in = fs.open(file1); in.readByte(); DFSInputStream dfsClientIn = findDFSClientInputStream(in); Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader"); blockReaderField.setAccessible(true); BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn); blockReader.setArtificialSlowdown(1000); blockReader.isReadLocal = false; blockReader.isReadRackLocal = false; blockReader.ENABLE_THROW_FOR_SLOW = true; for (int i = 0; i < 1024; i++) { in.readByte(); } blockReader.setArtificialSlowdown(0); for (int i = 1024; i < fileSize - 1; i++) { in.readByte(); } ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes = getDeadNodes(dfsClientIn); TestCase.assertEquals(1, deadNodes.size()); } finally { if (in != null) { in.close(); } fs.close(); cluster.shutdown(); } }
Example #22
Source File: TestDFSClientUpdateNameNodeSignature.java From RDFS with Apache License 2.0 | 4 votes |
/** * Test when name-node's finger-print changes, client re-fetch the * name-node proxy. */ public void testClientUpdateMethodList() throws IOException { InetSocketAddress addr = cluster.getNameNode().getNameNodeDNAddress(); DFSClient client = new DFSClient(addr, cluster.getNameNode().getConf()); ClientProtocol oldNamenode = client.namenode; // Client's name-node proxy should keep the same if the same namenode // sends the same fingerprint // OutputStream os = client.create("/testClientUpdateMethodList.txt", true); os.write(66); os.close(); TestCase.assertSame(oldNamenode, client.namenode); int oldFingerprint = cluster.getNameNode().getClientProtocolMethodsFingerprint(); TestCase.assertEquals(oldFingerprint, client.namenodeProtocolProxy .getMethodsFingerprint()); // Namenode's fingerprint will be different to client. Client is suppsoed // to get a new proxy. // cluster.getNameNode().setClientProtocolMethodsFingerprint(666); os = client.create("/testClientUpdateMethodList1.txt", true); os.write(88); os.close(); TestCase.assertNotSame(oldNamenode, client.namenode); // Since we didn't change method list of name-node, the fingerprint // got from the new proxy should be the same as the previous one. TestCase.assertEquals(oldFingerprint, client.namenodeProtocolProxy .getMethodsFingerprint()); // Client's name-node proxy should keep the same if the same namenode // sends the same fingerprint // ClientProtocol namenode1 = client.namenode; cluster.getNameNode().setClientProtocolMethodsFingerprint(oldFingerprint); DFSInputStream dis = client.open("/testClientUpdateMethodList.txt"); int val = dis.read(); TestCase.assertEquals(66, val); dis.close(); TestCase.assertSame(namenode1, client.namenode); // Namenode's fingerprint will be different to client. Client is suppsoed // to get a new proxy. // cluster.getNameNode().setClientProtocolMethodsFingerprint(888); dis = client.open("/testClientUpdateMethodList1.txt"); val = dis.read(); TestCase.assertEquals(88, val); dis.close(); // Since we didn't change method list of name-node, the fingerprint // got from the new proxy should be the same as the previous one. TestCase.assertNotSame(namenode1, client.namenode); }
Example #23
Source File: TestDFSClientRetries.java From RDFS with Apache License 2.0 | 4 votes |
/** * This tests that DFSInputStream failures are counted for a given read * operation, and not over the lifetime of the stream. It is a regression * test for HDFS-127. */ public void testFailuresArePerOperation() throws Exception { long fileSize = 4096; Path file = new Path("/testFile"); Configuration conf = new Configuration(); int maxBlockAcquires = DFSClient.getMaxBlockAcquireFailures(conf); assertTrue(maxBlockAcquires > 0); int[] numDataNodes = new int[] {1, maxBlockAcquires, maxBlockAcquires + 1}; for (int numDataNode : numDataNodes) { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster(conf, numDataNode, true, null); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); NameNode preSpyNN = cluster.getNameNode(); NameNode spyNN = spy(preSpyNN); DFSClient client = new DFSClient(null, spyNN, conf, null); DFSTestUtil.createFile(fs, file, fileSize, (short)numDataNode, 12345L /*seed*/); // If the client will retry maxBlockAcquires times, then if we fail // any more than that number of times, the operation should entirely // fail. doAnswer(new FailNTimesAnswer(preSpyNN, numDataNode, Math.min(maxBlockAcquires, numDataNode) + 1)) .when(spyNN).openAndFetchMetaInfo(anyString(), anyLong(), anyLong()); try { IOUtils.copyBytes(client.open(file.toString()), new IOUtils.NullOutputStream(), conf, true); fail("Didn't get exception"); } catch (IOException ioe) { DFSClient.LOG.info("Got expected exception", ioe); } // If we fail exactly that many times, then it should succeed. doAnswer(new FailNTimesAnswer(preSpyNN, numDataNode, Math.min(maxBlockAcquires, numDataNode))) .when(spyNN).openAndFetchMetaInfo(anyString(), anyLong(), anyLong()); IOUtils.copyBytes(client.open(file.toString()), new IOUtils.NullOutputStream(), conf, true); DFSClient.LOG.info("Starting test case for failure reset"); // Now the tricky case - if we fail a few times on one read, then succeed, // then fail some more on another read, it shouldn't fail. doAnswer(new FailNTimesAnswer(preSpyNN, numDataNode, Math.min(maxBlockAcquires, numDataNode))) .when(spyNN).openAndFetchMetaInfo(anyString(), anyLong(), anyLong()); DFSInputStream is = client.open(file.toString()); byte buf[] = new byte[10]; IOUtils.readFully(is, buf, 0, buf.length); DFSClient.LOG.info("First read successful after some failures."); // Further reads at this point will succeed since it has the good block locations. // So, force the block locations on this stream to be refreshed from bad info. // When reading again, it should start from a fresh failure count, since // we're starting a new operation on the user level. doAnswer(new FailNTimesAnswer(preSpyNN, numDataNode, Math.min(maxBlockAcquires, numDataNode))) .when(spyNN).openAndFetchMetaInfo(anyString(), anyLong(), anyLong()); is.openInfo(); // Seek to beginning forces a reopen of the BlockReader - otherwise it'll // just keep reading on the existing stream and the fact that we've poisoned // the block info won't do anything. is.seek(0); IOUtils.readFully(is, buf, 0, buf.length); } finally { if (null != cluster) { cluster.shutdown(); } } } }
Example #24
Source File: TestSnapshotFileRead.java From RDFS with Apache License 2.0 | 4 votes |
/** * Only need to check rename. File can never be deleted * because it should always be moved to WaitingRoom which * is equivalent to a rename. */ public void testFileRead() throws IOException { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 4, true, null); SnapshotNode ssNode = new SnapshotNode(conf); WaitingRoom wr = new WaitingRoom(conf); DFSClient client = new DFSClient(conf); SnapshotClient ssClient = new SnapshotClient(conf); FileSystem dfs = cluster.getFileSystem(); String ssDir = conf.get("fs.snapshot.dir", "/.SNAPSHOT"); Path foo = new Path("/foo"); Path bar = new Path("/bar"); FSDataOutputStream out; out = dfs.create(foo); out.write(0); out.close(); out = dfs.create(bar); out.write(1); out.sync(); ssNode.createSnapshot("test", true); out.write(2); out.close(); wr.moveToWaitingRoom(foo); // delete // Current system has foo deleted and bar with length 2 // test snapshot has foo with length 1 and bar with length 1 // Checking current file system assertTrue(!dfs.exists(foo)); DFSInputStream in = client.open("/bar"); assertTrue(in.getFileLength() == 2); assertTrue(in.read() == 1); assertTrue(in.read() == 2); assertTrue(in.read() == -1); //eof // Checking test snapshot in = ssClient.open("test", "/foo"); assertTrue(in.getFileLength() == 1); assertTrue(in.read() == 0); assertTrue(in.read() == -1); //eof in = ssClient.open("test", "/bar"); assertTrue(in.getFileLength() == 1); assertTrue(in.read() == 1); assertTrue(in.read() == -1); //eof }
Example #25
Source File: HdfsDataInputStream.java From big-c with Apache License 2.0 | 4 votes |
public HdfsDataInputStream(CryptoInputStream in) throws IOException { super(in); Preconditions.checkArgument(in.getWrappedStream() instanceof DFSInputStream, "CryptoInputStream should wrap a DFSInputStream"); }
Example #26
Source File: HdfsDataInputStream.java From big-c with Apache License 2.0 | 4 votes |
public HdfsDataInputStream(DFSInputStream in) throws IOException { super(in); }
Example #27
Source File: HdfsDataInputStream.java From hadoop with Apache License 2.0 | 4 votes |
private DFSInputStream getDFSInputStream() { if (in instanceof CryptoInputStream) { return (DFSInputStream) ((CryptoInputStream) in).getWrappedStream(); } return (DFSInputStream) in; }
Example #28
Source File: HdfsDataInputStream.java From hadoop with Apache License 2.0 | 4 votes |
public HdfsDataInputStream(CryptoInputStream in) throws IOException { super(in); Preconditions.checkArgument(in.getWrappedStream() instanceof DFSInputStream, "CryptoInputStream should wrap a DFSInputStream"); }
Example #29
Source File: HdfsDataInputStream.java From hadoop with Apache License 2.0 | 4 votes |
public HdfsDataInputStream(DFSInputStream in) throws IOException { super(in); }
Example #30
Source File: ChunkReaderTest.java From pxf with Apache License 2.0 | 4 votes |
@Before public void setUp() throws Exception { mockStream = mock(DFSInputStream.class); }