com.amazonaws.event.ProgressEvent Java Examples
The following examples show how to use
com.amazonaws.event.ProgressEvent.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: S3Accessor.java From datacollector with Apache License 2.0 | 6 votes |
@Override public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventType()) { case TRANSFER_STARTED_EVENT: LOG.debug("Started uploading object {} into Amazon S3", object); break; case TRANSFER_COMPLETED_EVENT: LOG.debug("Completed uploading object {} into Amazon S3", object); break; case TRANSFER_FAILED_EVENT: LOG.warn("Failed uploading object {} into Amazon S3", object); break; default: break; } }
Example #2
Source File: S3Backuper.java From cassandra-backup with Apache License 2.0 | 6 votes |
@Override public void progressChanged(final ProgressEvent progressEvent) { final ProgressEventType progressEventType = progressEvent.getEventType(); if (progressEventType == ProgressEventType.TRANSFER_PART_COMPLETED_EVENT) { logger.debug("Successfully uploaded part for {}.", s3RemoteObjectReference.canonicalPath); } if (progressEventType == ProgressEventType.TRANSFER_PART_FAILED_EVENT) { logger.debug("Failed to upload part for {}.", s3RemoteObjectReference.canonicalPath); } if (progressEventType == TRANSFER_FAILED_EVENT) { logger.debug("Failed to upload {}.", s3RemoteObjectReference.canonicalPath); } if (progressEventType == TRANSFER_COMPLETED_EVENT) { logger.debug("Successfully uploaded {}.", s3RemoteObjectReference.canonicalPath); } }
Example #3
Source File: RetriableFileCopyCommand.java From circus-train with Apache License 2.0 | 6 votes |
@Override public void progressChanged(ProgressEvent progressEvent) { StringBuilder message = new StringBuilder(); switch (progressEvent.getEventType()) { case TRANSFER_STARTED_EVENT: message.append("Starting: ").append(description); break; case TRANSFER_COMPLETED_EVENT: message.append("Completed: ").append(description); break; case TRANSFER_FAILED_EVENT: message.append("Falied: ").append(description); break; default: break; } context.setStatus(message.toString()); }
Example #4
Source File: S3AOutputStream.java From hadoop with Apache License 2.0 | 6 votes |
public void progressChanged(ProgressEvent progressEvent) { if (progress != null) { progress.progress(); } // There are 3 http ops here, but this should be close enough for now if (progressEvent.getEventCode() == ProgressEvent.PART_STARTED_EVENT_CODE || progressEvent.getEventCode() == ProgressEvent.COMPLETED_EVENT_CODE) { statistics.incrementWriteOps(1); } long transferred = upload.getProgress().getBytesTransferred(); long delta = transferred - lastBytesTransferred; if (statistics != null && delta != 0) { statistics.incrementBytesWritten(delta); } lastBytesTransferred = transferred; }
Example #5
Source File: AmazonS3SinkMockTests.java From spring-cloud-stream-app-starters with Apache License 2.0 | 6 votes |
@Bean public S3ProgressListener s3ProgressListener() { return new S3ProgressListener() { @Override public void onPersistableTransfer(PersistableTransfer persistableTransfer) { } @Override public void progressChanged(ProgressEvent progressEvent) { if (ProgressEventType.TRANSFER_COMPLETED_EVENT.equals(progressEvent.getEventType())) { transferCompletedLatch().countDown(); } } }; }
Example #6
Source File: S3AOutputStream.java From big-c with Apache License 2.0 | 6 votes |
public void progressChanged(ProgressEvent progressEvent) { if (progress != null) { progress.progress(); } // There are 3 http ops here, but this should be close enough for now if (progressEvent.getEventCode() == ProgressEvent.PART_STARTED_EVENT_CODE || progressEvent.getEventCode() == ProgressEvent.COMPLETED_EVENT_CODE) { statistics.incrementWriteOps(1); } long transferred = upload.getProgress().getBytesTransferred(); long delta = transferred - lastBytesTransferred; if (statistics != null && delta != 0) { statistics.incrementBytesWritten(delta); } lastBytesTransferred = transferred; }
Example #7
Source File: PrestoS3FileSystem.java From presto with Apache License 2.0 | 6 votes |
private ProgressListener createProgressListener(Transfer transfer) { return new ProgressListener() { private ProgressEventType previousType; private double previousTransferred; @Override public synchronized void progressChanged(ProgressEvent progressEvent) { ProgressEventType eventType = progressEvent.getEventType(); if (previousType != eventType) { log.debug("Upload progress event (%s/%s): %s", bucket, key, eventType); previousType = eventType; } double transferred = transfer.getProgress().getPercentTransferred(); if (transferred >= (previousTransferred + 10.0)) { log.debug("Upload percentage (%s/%s): %.0f%%", bucket, key, transferred); previousTransferred = transferred; } } }; }
Example #8
Source File: S3UploadTask.java From aws-mobile-self-paced-labs-samples with Apache License 2.0 | 5 votes |
@Override public void progressChanged(ProgressEvent pe) { total += pe.getBytesTransferred(); publishProgress(total); Log.i("bytestranferred:", total + "bytes"); }
Example #9
Source File: S3Manager.java From datacollector with Apache License 2.0 | 5 votes |
String uploadToS3(String name, File file) throws IOException { long start = System.currentTimeMillis(); long fileLength = file.length() / (1000 * 1000); String bucket = getBucket(pipelineEmrConfigs.getS3StagingUri()); String path = getPath(pipelineEmrConfigs.getS3StagingUri()) + "/" + pipelineId + "/" + uniquePrefix; String s3Uri = "s3://" + bucket + "/" + path + "/" + name; try { // Upload PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, path + "/" + name, file); putObjectRequest.setGeneralProgressListener(new ProgressListener() { long counter; long tick = -1; @Override public void progressChanged(ProgressEvent progressEvent) { counter += progressEvent.getBytesTransferred(); if (counter / (100 * 1000000) > tick) { tick++; LOG.debug( "Uploading '{}' {}/{} MB, {} secs", s3Uri, counter / (1000 * 1000), fileLength, (System.currentTimeMillis() - start) / 1000 ); } } }); getS3TransferManager().upload(putObjectRequest).waitForCompletion(); LOG.info("Uploaded file at: {}", s3Uri); return s3Uri; } catch (SdkBaseException|InterruptedException ex) { throw new IOException(ex); } }
Example #10
Source File: S3ScanWriter.java From emodb with Apache License 2.0 | 5 votes |
/** * Starts an asynchronous upload and returns a ListenableFuture for handling the result. */ synchronized ListenableFuture<String> upload() { // Reset values from possible prior attempt _attempts += 1; _bytesTransferred = 0; // Separate the future returned to the caller from the future generated by submitting the // putObject request. If the writer is closed then uploadFuture may be canceled before it executes, // in which case it may not trigger any callbacks. To ensure there is always a callback resultFuture is // tracked independently and, in the event that the upload is aborted, gets set on abort(). _resultFuture = SettableFuture.create(); _uploadFuture = _uploadService.submit(new Runnable() { @Override public void run() { try { ProgressListener progressListener = new ProgressListener() { @Override public void progressChanged(ProgressEvent progressEvent) { // getBytesTransferred() returns zero for all events not pertaining to the file transfer _bytesTransferred += progressEvent.getBytesTransferred(); } }; PutObjectRequest putObjectRequest = new PutObjectRequest(_bucket, _key, _file); putObjectRequest.setGeneralProgressListener(progressListener); PutObjectResult result = _amazonS3.putObject(putObjectRequest); _resultFuture.set(result.getETag()); } catch (Throwable t) { _resultFuture.setException(t); } } }); return _resultFuture; }
Example #11
Source File: S3AFileSystem.java From big-c with Apache License 2.0 | 5 votes |
private void copyFile(String srcKey, String dstKey) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("copyFile " + srcKey + " -> " + dstKey); } ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey); final ObjectMetadata dstom = srcom.clone(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { dstom.setServerSideEncryption(serverSideEncryptionAlgorithm); } CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey); copyObjectRequest.setCannedAccessControlList(cannedACL); copyObjectRequest.setNewObjectMetadata(dstom); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Copy copy = transfers.copy(copyObjectRequest); copy.addProgressListener(progressListener); try { copy.waitForCopyResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } }
Example #12
Source File: S3BroadcastManager.java From kickflip-android-sdk with Apache License 2.0 | 5 votes |
public void queueUpload(final String bucket, final String key, final File file, boolean lastUpload) { if (VERBOSE) Log.i(TAG, "Queueing upload " + key); final PutObjectRequest por = new PutObjectRequest(bucket, key, file); por.setGeneralProgressListener(new ProgressListener() { final String url = "https://" + bucket + ".s3.amazonaws.com/" + key; private long uploadStartTime; @Override public void progressChanged(com.amazonaws.event.ProgressEvent progressEvent) { try { if (progressEvent.getEventCode() == ProgressEvent.STARTED_EVENT_CODE) { uploadStartTime = System.currentTimeMillis(); } else if (progressEvent.getEventCode() == com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE) { long uploadDurationMillis = System.currentTimeMillis() - uploadStartTime; int bytesPerSecond = (int) (file.length() / (uploadDurationMillis / 1000.0)); if (VERBOSE) Log.i(TAG, "Uploaded " + file.length() / 1000.0 + " KB in " + (uploadDurationMillis) + "ms (" + bytesPerSecond / 1000.0 + " KBps)"); mBroadcaster.onS3UploadComplete(new S3UploadEvent(file, url, bytesPerSecond)); } else if (progressEvent.getEventCode() == ProgressEvent.FAILED_EVENT_CODE) { Log.w(TAG, "Upload failed for " + url); } } catch (Exception excp) { Log.e(TAG, "ProgressListener error"); excp.printStackTrace(); } } }); por.setCannedAcl(CannedAccessControlList.PublicRead); for (WeakReference<S3RequestInterceptor> ref : mInterceptors) { S3RequestInterceptor interceptor = ref.get(); if (interceptor != null) { interceptor.interceptRequest(por); } } mQueue.add(new Pair<>(por, lastUpload)); }
Example #13
Source File: S3AFileSystem.java From hadoop with Apache License 2.0 | 5 votes |
private void copyFile(String srcKey, String dstKey) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("copyFile " + srcKey + " -> " + dstKey); } ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey); final ObjectMetadata dstom = srcom.clone(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { dstom.setServerSideEncryption(serverSideEncryptionAlgorithm); } CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey); copyObjectRequest.setCannedAccessControlList(cannedACL); copyObjectRequest.setNewObjectMetadata(dstom); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Copy copy = transfers.copy(copyObjectRequest); copy.addProgressListener(progressListener); try { copy.waitForCopyResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } }
Example #14
Source File: S3DownloadTask.java From aws-mobile-self-paced-labs-samples with Apache License 2.0 | 5 votes |
@Override public void progressChanged(ProgressEvent pe) { total += pe.getBytesTransferred(); publishProgress(total); Log.i("bytestranferred:", total + "bytes"); }
Example #15
Source File: S3AFileSystem.java From big-c with Apache License 2.0 | 4 votes |
/** * The src file is on the local disk. Add it to FS at * the given dst name. * * This version doesn't need to create a temporary file to calculate the md5. * Sadly this doesn't seem to be used by the shell cp :( * * delSrc indicates if the source should be removed * @param delSrc whether to delete the src * @param overwrite whether to overwrite an existing file * @param src path * @param dst path */ @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { String key = pathToKey(dst); if (!overwrite && exists(dst)) { throw new IOException(dst + " already exists"); } if (LOG.isDebugEnabled()) { LOG.debug("Copying local file from " + src + " to " + dst); } // Since we have a local file, we don't need to stream into a temporary file LocalFileSystem local = getLocal(getConf()); File srcfile = local.pathToFile(src); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Upload up = transfers.upload(putObjectRequest); up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } // This will delete unnecessary fake parent directories finishedWrite(key); if (delSrc) { local.delete(src, false); } }
Example #16
Source File: S3KeyCopyingTOCPayloadHandler.java From s3-bucket-loader with Apache License 2.0 | 4 votes |
@Override public void progressChanged(ProgressEvent progressEvent) { logger.debug("progressChanged() " +progressEvent.getEventType() + " bytes:" + progressEvent.getBytes() + " bytesTransferred: " + progressEvent.getBytesTransferred()); }
Example #17
Source File: S3BucketObjectLister.java From s3-bucket-loader with Apache License 2.0 | 4 votes |
@Override public void progressChanged(ProgressEvent progressEvent) { logger.debug("progressChanged() " +progressEvent.getEventType() + " bytes:" + progressEvent.getBytes() + " bytesTransferred: " + progressEvent.getBytesTransferred()); }
Example #18
Source File: AwsGlacier.java From core with GNU General Public License v3.0 | 4 votes |
/** * Called when there is a ProgressEvent during a download. * * @see com.amazonaws.event.ProgressListener#progressChanged(com.amazonaws.event.ProgressEvent) */ @Override public void progressChanged(ProgressEvent progressEvent) { logger.info("Download progress event: {}", progressEvent); }
Example #19
Source File: S3AFastOutputStream.java From big-c with Apache License 2.0 | 4 votes |
public void progressChanged(ProgressEvent progressEvent) { if (progress != null) { progress.progress(); } }
Example #20
Source File: S3OutputStream.java From nifi-minifi with Apache License 2.0 | 4 votes |
@Override public void progressChanged(ProgressEvent progressEvent) { log.debug("Progress event: " + progressEvent); }
Example #21
Source File: S3AFileSystem.java From hadoop with Apache License 2.0 | 4 votes |
/** * The src file is on the local disk. Add it to FS at * the given dst name. * * This version doesn't need to create a temporary file to calculate the md5. * Sadly this doesn't seem to be used by the shell cp :( * * delSrc indicates if the source should be removed * @param delSrc whether to delete the src * @param overwrite whether to overwrite an existing file * @param src path * @param dst path */ @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { String key = pathToKey(dst); if (!overwrite && exists(dst)) { throw new IOException(dst + " already exists"); } if (LOG.isDebugEnabled()) { LOG.debug("Copying local file from " + src + " to " + dst); } // Since we have a local file, we don't need to stream into a temporary file LocalFileSystem local = getLocal(getConf()); File srcfile = local.pathToFile(src); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Upload up = transfers.upload(putObjectRequest); up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } // This will delete unnecessary fake parent directories finishedWrite(key); if (delSrc) { local.delete(src, false); } }
Example #22
Source File: S3AFastOutputStream.java From hadoop with Apache License 2.0 | 4 votes |
public void progressChanged(ProgressEvent progressEvent) { if (progress != null) { progress.progress(); } }
Example #23
Source File: S3Restorer.java From cassandra-backup with Apache License 2.0 | 4 votes |
@Override public void progressChanged(final ProgressEvent progressEvent) { if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) { logger.debug("Successfully downloaded {}.", objectReference.canonicalPath); } }