Java Code Examples for com.amazonaws.services.s3.AmazonS3#initiateMultipartUpload()
The following examples show how to use
com.amazonaws.services.s3.AmazonS3#initiateMultipartUpload() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: S3WritableByteChannel.java From beam with Apache License 2.0 | 5 votes |
S3WritableByteChannel(AmazonS3 amazonS3, S3ResourceId path, String contentType, S3Options options) throws IOException { this.amazonS3 = checkNotNull(amazonS3, "amazonS3"); this.options = checkNotNull(options); this.path = checkNotNull(path, "path"); checkArgument( atMostOne( options.getSSECustomerKey() != null, options.getSSEAlgorithm() != null, options.getSSEAwsKeyManagementParams() != null), "Either SSECustomerKey (SSE-C) or SSEAlgorithm (SSE-S3)" + " or SSEAwsKeyManagementParams (SSE-KMS) must not be set at the same time."); // Amazon S3 API docs: Each part must be at least 5 MB in size, except the last part. checkArgument( options.getS3UploadBufferSizeBytes() >= S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES, "S3UploadBufferSizeBytes must be at least %s bytes", S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES); this.uploadBuffer = ByteBuffer.allocate(options.getS3UploadBufferSizeBytes()); eTags = new ArrayList<>(); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentType(contentType); if (options.getSSEAlgorithm() != null) { objectMetadata.setSSEAlgorithm(options.getSSEAlgorithm()); } InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(path.getBucket(), path.getKey()) .withStorageClass(options.getS3StorageClass()) .withObjectMetadata(objectMetadata); request.setSSECustomerKey(options.getSSECustomerKey()); request.setSSEAwsKeyManagementParams(options.getSSEAwsKeyManagementParams()); InitiateMultipartUploadResult result; try { result = amazonS3.initiateMultipartUpload(request); } catch (AmazonClientException e) { throw new IOException(e); } uploadId = result.getUploadId(); }
Example 2
Source File: PublisherTools.java From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 | 5 votes |
public static void uploadFile( final File file, final Artifact artifact, final CompressionType compressionType, final EncryptionKey encryptionKey, final AmazonS3 amazonS3, final BuildListener listener) throws IOException { LoggingHelper.log(listener, "Uploading artifact: " + artifact + ", file: " + file); final String bucketName = artifact.getLocation().getS3Location().getBucketName(); final String objectKey = artifact.getLocation().getS3Location().getObjectKey(); final List<PartETag> partETags = new ArrayList<>(); final InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest( bucketName, objectKey, createObjectMetadata(compressionType)) .withSSEAwsKeyManagementParams(toSSEAwsKeyManagementParams(encryptionKey)); final InitiateMultipartUploadResult initiateMultipartUploadResult = amazonS3.initiateMultipartUpload(initiateMultipartUploadRequest); final long contentLength = file.length(); long filePosition = 0; long partSize = 5 * 1024 * 1024; // Set part size to 5 MB for (int i = 1; filePosition < contentLength; i++) { partSize = Math.min(partSize, (contentLength - filePosition)); final UploadPartRequest uploadPartRequest = new UploadPartRequest() .withBucketName(bucketName) .withKey(objectKey) .withUploadId(initiateMultipartUploadResult.getUploadId()) .withPartNumber(i) .withFileOffset(filePosition) .withFile(file) .withPartSize(partSize); partETags.add(amazonS3.uploadPart(uploadPartRequest).getPartETag()); filePosition += partSize; } final CompleteMultipartUploadRequest completeMultipartUpload = new CompleteMultipartUploadRequest( bucketName, objectKey, initiateMultipartUploadResult.getUploadId(), partETags); amazonS3.completeMultipartUpload(completeMultipartUpload); LoggingHelper.log(listener, "Upload successful"); }
Example 3
Source File: S3Util.java From s3committer with Apache License 2.0 | 4 votes |
public static PendingUpload multipartUpload( AmazonS3 client, File localFile, String partition, String bucket, String key, long uploadPartSize) { InitiateMultipartUploadResult initiate = client.initiateMultipartUpload( new InitiateMultipartUploadRequest(bucket, key)); String uploadId = initiate.getUploadId(); boolean threw = true; try { Map<Integer, String> etags = Maps.newLinkedHashMap(); long offset = 0; long numParts = (localFile.length() / uploadPartSize + ((localFile.length() % uploadPartSize) > 0 ? 1 : 0)); Preconditions.checkArgument(numParts > 0, "Cannot upload 0 byte file: " + localFile); for (int partNumber = 1; partNumber <= numParts; partNumber += 1) { long size = Math.min(localFile.length() - offset, uploadPartSize); UploadPartRequest part = new UploadPartRequest() .withBucketName(bucket) .withKey(key) .withPartNumber(partNumber) .withUploadId(uploadId) .withFile(localFile) .withFileOffset(offset) .withPartSize(size) .withLastPart(partNumber == numParts); UploadPartResult partResult = client.uploadPart(part); PartETag etag = partResult.getPartETag(); etags.put(etag.getPartNumber(), etag.getETag()); offset += uploadPartSize; } PendingUpload pending = new PendingUpload( partition, bucket, key, uploadId, etags); threw = false; return pending; } finally { if (threw) { try { client.abortMultipartUpload( new AbortMultipartUploadRequest(bucket, key, uploadId)); } catch (AmazonClientException e) { LOG.error("Failed to abort multi-part upload", e); } } } }
Example 4
Source File: AwsUtils.java From studio with GNU General Public License v3.0 | 4 votes |
public static void uploadStream(String inputBucket, String inputKey, AmazonS3 s3Client, int partSize, String filename, InputStream content) throws AwsException { List<PartETag> etags = new LinkedList<>(); InitiateMultipartUploadResult initResult = null; try { int partNumber = 1; long totalBytes = 0; MimetypesFileTypeMap mimeMap = new MimetypesFileTypeMap(); ObjectMetadata meta = new ObjectMetadata(); meta.setContentType(mimeMap.getContentType(filename)); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(inputBucket, inputKey, meta); initResult = s3Client.initiateMultipartUpload(initRequest); byte[] buffer = new byte[partSize]; int read; logger.debug("Starting upload for file '{}'", filename); while (0 < (read = IOUtils.read(content, buffer))) { totalBytes += read; if (logger.isTraceEnabled()) { logger.trace("Uploading part {} with size {} - total: {}", partNumber, read, totalBytes); } ByteArrayInputStream bais = new ByteArrayInputStream(buffer, 0, read); UploadPartRequest uploadRequest = new UploadPartRequest() .withUploadId(initResult.getUploadId()) .withBucketName(inputBucket) .withKey(inputKey) .withInputStream(bais) .withPartNumber(partNumber) .withPartSize(read) .withLastPart(read < partSize); etags.add(s3Client.uploadPart(uploadRequest).getPartETag()); partNumber++; } if (totalBytes == 0) { // If the file is empty, use the simple upload instead of the multipart s3Client.abortMultipartUpload( new AbortMultipartUploadRequest(inputBucket, inputKey, initResult.getUploadId())); s3Client.putObject(inputBucket, inputKey, new ByteArrayInputStream(new byte[0]), meta); } else { CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(inputBucket, inputKey, initResult.getUploadId(), etags); s3Client.completeMultipartUpload(completeRequest); } logger.debug("Upload completed for file '{}'", filename); } catch (Exception e) { if (initResult != null) { s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(inputBucket, inputKey, initResult.getUploadId())); } throw new AwsException("Upload of file '" + filename + "' failed", e); } }