Java Code Examples for org.apache.hadoop.fs.FileSystem#Statistics
The following examples show how to use
org.apache.hadoop.fs.FileSystem#Statistics .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GoogleHadoopFSInputStream.java From hadoop-connectors with Apache License 2.0 | 6 votes |
/** * Constructs an instance of GoogleHadoopFSInputStream object. * * @param ghfs Instance of GoogleHadoopFileSystemBase. * @param gcsPath Path of the file to read from. * @param statistics File system statistics object. * @throws IOException if an IO error occurs. */ GoogleHadoopFSInputStream( GoogleHadoopFileSystemBase ghfs, URI gcsPath, GoogleCloudStorageReadOptions readOptions, FileSystem.Statistics statistics) throws IOException { logger.atFine().log( "GoogleHadoopFSInputStream(gcsPath: %s, readOptions: %s)", gcsPath, readOptions); this.ghfs = ghfs; this.gcsPath = gcsPath; this.statistics = statistics; this.initTime = System.nanoTime(); this.totalBytesRead = 0; this.channel = ghfs.getGcsFs().open(gcsPath, readOptions); }
Example 2
Source File: GoogleHadoopOutputStream.java From hadoop-connectors with Apache License 2.0 | 6 votes |
/** * Constructs an instance of GoogleHadoopOutputStream object. * * @param ghfs Instance of GoogleHadoopFileSystemBase. * @param gcsPath Path of the file to write to. * @param statistics File system statistics object. * @throws IOException if an IO error occurs. */ GoogleHadoopOutputStream( GoogleHadoopFileSystemBase ghfs, URI gcsPath, FileSystem.Statistics statistics, CreateFileOptions createFileOptions) throws IOException { logger.atFine().log( "GoogleHadoopOutputStream(gcsPath:%s, createFileOptions: %s)", gcsPath, createFileOptions); this.ghfs = ghfs; this.gcsPath = gcsPath; this.statistics = statistics; this.initTime = System.nanoTime(); this.channel = createChannel(ghfs, gcsPath, createFileOptions); this.out = createOutputStream(this.channel, ghfs.getGcsFs().getOptions().getCloudStorageOptions()); }
Example 3
Source File: FTPInputStream.java From big-c with Apache License 2.0 | 5 votes |
public FTPInputStream(InputStream stream, FTPClient client, FileSystem.Statistics stats) { if (stream == null) { throw new IllegalArgumentException("Null InputStream"); } if (client == null || !client.isConnected()) { throw new IllegalArgumentException("FTP client null or not connected"); } this.wrappedStream = stream; this.client = client; this.stats = stats; this.pos = 0; this.closed = false; }
Example 4
Source File: S3AOutputStream.java From big-c with Apache License 2.0 | 5 votes |
public ProgressableProgressListener(Upload upload, Progressable progress, FileSystem.Statistics statistics) { this.upload = upload; this.progress = progress; this.statistics = statistics; this.lastBytesTransferred = 0; }
Example 5
Source File: GoogleHadoopFSInputStreamIntegrationTest.java From hadoop-connectors with Apache License 2.0 | 5 votes |
private static GoogleHadoopFSInputStream createGhfsInputStream( GoogleHadoopFileSystem ghfs, URI path) throws IOException { GoogleCloudStorageReadOptions options = ghfs.getGcsFs().getOptions().getCloudStorageOptions().getReadChannelOptions(); return new GoogleHadoopFSInputStream( ghfs, path, options, new FileSystem.Statistics(ghfs.getScheme())); }
Example 6
Source File: FileSystemStatisticUpdater.java From incubator-tez with Apache License 2.0 | 5 votes |
void updateCounters() { if (readBytesCounter == null) { readBytesCounter = counters.findCounter(scheme, FileSystemCounter.BYTES_READ); } if (writeBytesCounter == null) { writeBytesCounter = counters.findCounter(scheme, FileSystemCounter.BYTES_WRITTEN); } if (readOpsCounter == null) { readOpsCounter = counters.findCounter(scheme, FileSystemCounter.READ_OPS); } if (largeReadOpsCounter == null) { largeReadOpsCounter = counters.findCounter(scheme, FileSystemCounter.LARGE_READ_OPS); } if (writeOpsCounter == null) { writeOpsCounter = counters.findCounter(scheme, FileSystemCounter.WRITE_OPS); } long readBytes = 0; long writeBytes = 0; long readOps = 0; long largeReadOps = 0; long writeOps = 0; for (FileSystem.Statistics stat : stats) { readBytes = readBytes + stat.getBytesRead(); writeBytes = writeBytes + stat.getBytesWritten(); readOps = readOps + stat.getReadOps(); largeReadOps = largeReadOps + stat.getLargeReadOps(); writeOps = writeOps + stat.getWriteOps(); } readBytesCounter.setValue(readBytes); writeBytesCounter.setValue(writeBytes); readOpsCounter.setValue(readOps); largeReadOpsCounter.setValue(largeReadOps); writeOpsCounter.setValue(writeOps); }
Example 7
Source File: Task.java From big-c with Apache License 2.0 | 5 votes |
/** * Gets a handle to the Statistics instance based on the scheme associated * with path. * * @param path the path. * @param conf the configuration to extract the scheme from if not part of * the path. * @return a Statistics instance, or null if none is found for the scheme. */ protected static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException { List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>(); path = path.getFileSystem(conf).makeQualified(path); String scheme = path.toUri().getScheme(); for (Statistics stats : FileSystem.getAllStatistics()) { if (stats.getScheme().equals(scheme)) { matchedStats.add(stats); } } return matchedStats; }
Example 8
Source File: S3AInputStream.java From hadoop with Apache License 2.0 | 5 votes |
public S3AInputStream(String bucket, String key, long contentLength, AmazonS3Client client, FileSystem.Statistics stats) { this.bucket = bucket; this.key = key; this.contentLength = contentLength; this.client = client; this.stats = stats; this.pos = 0; this.closed = false; this.wrappedStream = null; }
Example 9
Source File: SwiftNativeInputStream.java From big-c with Apache License 2.0 | 5 votes |
public SwiftNativeInputStream(SwiftNativeFileSystemStore storeNative, FileSystem.Statistics statistics, Path path, long bufferSize) throws IOException { this.nativeStore = storeNative; this.statistics = statistics; this.path = path; if (bufferSize <= 0) { throw new IllegalArgumentException("Invalid buffer size"); } this.bufferSize = bufferSize; //initial buffer fill this.httpStream = storeNative.getObject(path).getInputStream(); //fillBuffer(0); }
Example 10
Source File: GoogleHadoopSyncableOutputStreamTest.java From hadoop-connectors with Apache License 2.0 | 5 votes |
@Test public void testExceptionOnDelete() throws IOException { Path objectPath = new Path(ghfs.getFileSystemRoot(), "dir/object2.txt"); GoogleHadoopSyncableOutputStream fout = new GoogleHadoopSyncableOutputStream( ghfs, ghfs.getGcsPath(objectPath), new FileSystem.Statistics(ghfs.getScheme()), CreateFileOptions.DEFAULT_OVERWRITE, SyncableOutputStreamOptions.DEFAULT, mockExecutorService); IOException fakeIoException = new IOException("fake io exception"); when(mockExecutorService.submit(any(Callable.class))) .thenReturn(Futures.immediateFailedFuture(new ExecutionException(fakeIoException))); byte[] data1 = new byte[] { 0x0f, 0x0e, 0x0e, 0x0d }; byte[] data2 = new byte[] { 0x0b, 0x0e, 0x0e, 0x0f }; fout.write(data1, 0, data1.length); fout.sync(); // This one commits straight into destination. fout.write(data2, 0, data2.length); fout.sync(); // This one enqueues the delete, but doesn't propagate exception yet. verify(mockExecutorService).submit(any(Callable.class)); IOException thrown = assertThrows(IOException.class, fout::close); assertThat(thrown).hasCauseThat().hasMessageThat().contains(fakeIoException.getMessage()); verify(mockExecutorService, times(2)).submit(any(Callable.class)); }
Example 11
Source File: Utils.java From tez with Apache License 2.0 | 5 votes |
/** * Gets a handle to the Statistics instance based on the scheme associated * with path. * * @param path the path. * @param conf the configuration to extract the scheme from if not part of * the path. * @return a Statistics instance, or null if none is found for the scheme. */ @Private public static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException { List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>(); path = path.getFileSystem(conf).makeQualified(path); String scheme = path.toUri().getScheme(); for (Statistics stats : FileSystem.getAllStatistics()) { if (stats.getScheme().equals(scheme)) { matchedStats.add(stats); } } return matchedStats; }
Example 12
Source File: Task.java From RDFS with Apache License 2.0 | 4 votes |
FileSystemStatisticUpdater(String uriScheme, FileSystem.Statistics stats) { this.stats = stats; this.counterNames = getFileSystemCounterNames(uriScheme); }
Example 13
Source File: HadoopFileSystemIntegrationHelper.java From hadoop-connectors with Apache License 2.0 | 4 votes |
/** * Writes a file with the given buffer repeated numWrites times. * * @param hadoopPath Path of the file to create. * @param buffer Data to write. * @param numWrites Number of times to repeat the data. * @param overwrite If true, overwrite any existing file. * @return Number of bytes written. */ public int writeFile(Path hadoopPath, byte[] buffer, int numWrites, boolean overwrite) throws IOException { int numBytesWritten = -1; int totalBytesWritten = 0; long fileSystemBytesWritten = 0; FileSystem.Statistics stats = FileSystem.getStatistics( ghfsFileSystemDescriptor.getScheme(), ghfs.getClass()); if (stats != null) { // Let it be null in case no stats have been added for our scheme yet. fileSystemBytesWritten = stats.getBytesWritten(); } try (FSDataOutputStream writeStream = ghfs.create( hadoopPath, FsPermission.getDefault(), overwrite, GoogleHadoopFileSystemConfiguration.GCS_OUTPUT_STREAM_BUFFER_SIZE.getDefault(), GoogleHadoopFileSystemBase.REPLICATION_FACTOR_DEFAULT, GoogleHadoopFileSystemConfiguration.BLOCK_SIZE.getDefault(), /* progress= */ null)) { for (int i = 0; i < numWrites; i++) { writeStream.write(buffer, 0, buffer.length); numBytesWritten = buffer.length; totalBytesWritten += numBytesWritten; } } // After the write, the stats better be non-null for our ghfs scheme. stats = FileSystem.getStatistics(ghfsFileSystemDescriptor.getScheme(), ghfs.getClass()); assertThat(stats).isNotNull(); long endFileSystemBytesWritten = stats.getBytesWritten(); int bytesWrittenStats = (int) (endFileSystemBytesWritten - fileSystemBytesWritten); if (statistics == FileSystemStatistics.EXACT) { assertWithMessage("FS statistics mismatch fetched from class '%s'", ghfs.getClass()) .that(bytesWrittenStats) .isEqualTo(totalBytesWritten); } else if (statistics == FileSystemStatistics.GREATER_OR_EQUAL) { assertWithMessage("Expected %d <= %d", totalBytesWritten, bytesWrittenStats) .that(totalBytesWritten <= bytesWrittenStats) .isTrue(); } else if (statistics == FileSystemStatistics.NONE) { // Do not perform any check because stats are either not maintained or are erratic. } else if (statistics == FileSystemStatistics.IGNORE) { // NO-OP } return totalBytesWritten; }
Example 14
Source File: HdfsDataOutputStream.java From big-c with Apache License 2.0 | 4 votes |
public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats, long startPosition) throws IOException { super(out, stats, startPosition); }
Example 15
Source File: HdfsDataOutputStream.java From hadoop with Apache License 2.0 | 4 votes |
public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics stats, long startPosition) throws IOException { super(out, stats, startPosition); Preconditions.checkArgument(out.getWrappedStream() instanceof DFSOutputStream, "CryptoOutputStream should wrap a DFSOutputStream"); }
Example 16
Source File: DFSClient.java From big-c with Apache License 2.0 | 4 votes |
/** * Same as this(nameNodeUri, null, conf, stats); * @see #DFSClient(URI, ClientProtocol, Configuration, FileSystem.Statistics) */ public DFSClient(URI nameNodeUri, Configuration conf, FileSystem.Statistics stats) throws IOException { this(nameNodeUri, null, conf, stats); }
Example 17
Source File: Task.java From hadoop-gpu with Apache License 2.0 | 4 votes |
FileSystemStatisticUpdater(String uriScheme, FileSystem.Statistics stats) { this.stats = stats; this.counterNames = getFileSystemCounterNames(uriScheme); }
Example 18
Source File: FileSystemStatisticUpdater.java From tez with Apache License 2.0 | 4 votes |
FileSystemStatisticUpdater(TezCounters counters, List<FileSystem.Statistics> stats, String scheme) { this.stats = stats; this.scheme = scheme; this.counters = counters; }
Example 19
Source File: KFSImpl.java From hadoop-gpu with Apache License 2.0 | 4 votes |
public KFSImpl(String metaServerHost, int metaServerPort, FileSystem.Statistics stats) throws IOException { kfsAccess = new KfsAccess(metaServerHost, metaServerPort); statistics = stats; }
Example 20
Source File: DFSClient.java From big-c with Apache License 2.0 | 4 votes |
/** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * If HA is enabled and a positive value is set for * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode * must be null. */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX); traceSampler = new SamplerBuilder(TraceUtils. wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build(); // Copy only the required DFSClient configuration this.dfsClientConf = new Conf(conf); if (this.dfsClientConf.useLegacyBlockReaderLocal) { LOG.debug("Using legacy short-circuit local reads."); } this.conf = conf; this.stats = stats; this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class, numResponseToDrop, nnFallbackToSimpleAuth); } if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; dtService = null; } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces)+ "] with addresses [" + Joiner.on(',').join(localInterfaceAddrs) + "]"); } Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false); this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); this.clientContext = ClientContext.get( conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT), dfsClientConf); this.hedgedReadThresholdMillis = conf.getLong( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS); int numThreads = conf.getInt( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE); if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } this.saslClient = new SaslDataTransferClient( conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth); }