com.amazonaws.services.s3.model.InitiateMultipartUploadRequest Java Examples
The following examples show how to use
com.amazonaws.services.s3.model.InitiateMultipartUploadRequest.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: S3InitiateFileUploadOperatorTest.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Test public void testInitiateUpload() { InitiateMultipartUploadResult result = new InitiateMultipartUploadResult(); result.setUploadId(uploadId); MockitoAnnotations.initMocks(this); when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(result); when(fileMetadata.getFilePath()).thenReturn("/tmp/file1.txt"); when(fileMetadata.getNumberOfBlocks()).thenReturn(4); S3InitiateFileUploadTest operator = new S3InitiateFileUploadTest(); operator.setBucketName("testbucket"); operator.setup(context); CollectorTestSink<S3InitiateFileUploadOperator.UploadFileMetadata> fileSink = new CollectorTestSink<>(); CollectorTestSink<Object> tmp = (CollectorTestSink)fileSink; operator.fileMetadataOutput.setSink(tmp); operator.beginWindow(0); operator.processTuple(fileMetadata); operator.endWindow(); S3InitiateFileUploadOperator.UploadFileMetadata emitted = (S3InitiateFileUploadOperator.UploadFileMetadata)tmp.collectedTuples.get(0); Assert.assertEquals("Upload ID :", uploadId, emitted.getUploadId()); }
Example #2
Source File: COSAPIClient.java From stocator with Apache License 2.0 | 6 votes |
/** * Start the multipart upload process. * @return the upload result containing the ID * @throws IOException IO problem */ String initiateMultiPartUpload(Boolean atomicWrite, String etag) throws IOException { LOG.debug("Initiating Multipart upload"); ObjectMetadata om = newObjectMetadata(-1); // if atomic write is enabled use the etag to ensure put request is atomic if (atomicWrite) { if (etag != null) { LOG.debug("Atomic write - setting If-Match header"); om.setHeader("If-Match", etag); } else { LOG.debug("Atomic write - setting If-None-Match header"); om.setHeader("If-None-Match", "*"); } } final InitiateMultipartUploadRequest initiateMPURequest = new InitiateMultipartUploadRequest(mBucket, key, om); try { return mClient.initiateMultipartUpload(initiateMPURequest) .getUploadId(); } catch (AmazonClientException ace) { throw translateException("initiate MultiPartUpload", key, ace); } }
Example #3
Source File: PublisherCallableTest.java From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 | 6 votes |
@Test public void canUploadMultipleOutputArtifacts() throws IOException { // given jenkinsOutputs.clear(); jenkinsOutputs.add(new OutputArtifact(TEST_FILE, "dummyArtifact")); jenkinsOutputs.add(new OutputArtifact("Dir1", "dummyArtifact1")); jenkinsOutputs.add(new OutputArtifact("Dir2", "dummyArtifact2")); outputArtifacts.clear(); outputArtifacts.add(outputArtifact); outputArtifacts.add(outputArtifact1); outputArtifacts.add(outputArtifact2); // when publisher.invoke(workspace, null); // then verify(s3Client, times(3)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)); }
Example #4
Source File: S3AFastOutputStream.java From big-c with Apache License 2.0 | 6 votes |
private MultiPartUpload initiateMultiPartUpload() throws IOException { final ObjectMetadata om = createDefaultMetadata(); final InitiateMultipartUploadRequest initiateMPURequest = new InitiateMultipartUploadRequest(bucket, key, om); initiateMPURequest.setCannedACL(cannedACL); try { return new MultiPartUpload( client.initiateMultipartUpload(initiateMPURequest).getUploadId()); } catch (AmazonServiceException ase) { throw new IOException("Unable to initiate MultiPartUpload (server side)" + ": " + ase, ase); } catch (AmazonClientException ace) { throw new IOException("Unable to initiate MultiPartUpload (client side)" + ": " + ace, ace); } }
Example #5
Source File: S3InitiateFileUploadOperator.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
/** * For the input file, initiate the upload and emit the UploadFileMetadata through the fileMetadataOutput, * uploadMetadataOutput ports. * @param tuple given tuple */ protected void processTuple(AbstractFileSplitter.FileMetadata tuple) { if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) { return; } String keyName = getKeyName(tuple.getFilePath()); String uploadId = ""; if (tuple.getNumberOfBlocks() > 1) { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName); initRequest.setObjectMetadata(createObjectMetadata()); InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); uploadId = initResponse.getUploadId(); } UploadFileMetadata uploadFileMetadata = new UploadFileMetadata(tuple, uploadId, keyName); fileMetadataOutput.emit(uploadFileMetadata); uploadMetadataOutput.emit(uploadFileMetadata); currentWindowRecoveryState.add(uploadFileMetadata); }
Example #6
Source File: AwsSdkTest.java From s3proxy with Apache License 2.0 | 6 votes |
@Test public void testAtomicMpuAbort() throws Exception { String key = "testAtomicMpuAbort"; ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, key, BYTE_SOURCE.openStream(), metadata); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(containerName, key); InitiateMultipartUploadResult initResponse = client.initiateMultipartUpload(initRequest); String uploadId = initResponse.getUploadId(); client.abortMultipartUpload(new AbortMultipartUploadRequest( containerName, key, uploadId)); S3Object object = client.getObject(containerName, key); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasContentEqualTo(expected); } }
Example #7
Source File: AwsSdkTest.java From s3proxy with Apache License 2.0 | 6 votes |
@Test public void testPartNumberMarker() throws Exception { String blobName = "foo"; InitiateMultipartUploadResult result = client.initiateMultipartUpload( new InitiateMultipartUploadRequest(containerName, blobName)); ListPartsRequest request = new ListPartsRequest(containerName, blobName, result.getUploadId()); client.listParts(request.withPartNumberMarker(0)); try { client.listParts(request.withPartNumberMarker(1)); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("NotImplemented"); } }
Example #8
Source File: TestStandardS3EncryptionService.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testRequests() { final ObjectMetadata metadata = new ObjectMetadata(); final GetObjectRequest getObjectRequest = new GetObjectRequest("", ""); final InitiateMultipartUploadRequest initUploadRequest = new InitiateMultipartUploadRequest("", ""); final PutObjectRequest putObjectRequest = new PutObjectRequest("", "", ""); final UploadPartRequest uploadPartRequest = new UploadPartRequest(); service.configureGetObjectRequest(getObjectRequest, metadata); Assert.assertNull(getObjectRequest.getSSECustomerKey()); Assert.assertNull(metadata.getSSEAlgorithm()); service.configureUploadPartRequest(uploadPartRequest, metadata); Assert.assertNull(uploadPartRequest.getSSECustomerKey()); Assert.assertNull(metadata.getSSEAlgorithm()); service.configurePutObjectRequest(putObjectRequest, metadata); Assert.assertNull(putObjectRequest.getSSECustomerKey()); Assert.assertNull(metadata.getSSEAlgorithm()); service.configureInitiateMultipartUploadRequest(initUploadRequest, metadata); Assert.assertNull(initUploadRequest.getSSECustomerKey()); Assert.assertNull(metadata.getSSEAlgorithm()); }
Example #9
Source File: S3AFastOutputStream.java From hadoop with Apache License 2.0 | 6 votes |
private MultiPartUpload initiateMultiPartUpload() throws IOException { final ObjectMetadata om = createDefaultMetadata(); final InitiateMultipartUploadRequest initiateMPURequest = new InitiateMultipartUploadRequest(bucket, key, om); initiateMPURequest.setCannedACL(cannedACL); try { return new MultiPartUpload( client.initiateMultipartUpload(initiateMPURequest).getUploadId()); } catch (AmazonServiceException ase) { throw new IOException("Unable to initiate MultiPartUpload (server side)" + ": " + ase, ase); } catch (AmazonClientException ace) { throw new IOException("Unable to initiate MultiPartUpload (client side)" + ": " + ace, ace); } }
Example #10
Source File: S3DataOutputStream.java From stratosphere with Apache License 2.0 | 6 votes |
private String initiateMultipartUpload() throws IOException { boolean operationSuccessful = false; final InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(this.bucket, this.object); if (this.useRRS) { request.setStorageClass(StorageClass.ReducedRedundancy); } else { request.setStorageClass(StorageClass.Standard); } try { final InitiateMultipartUploadResult result = this.s3Client.initiateMultipartUpload(request); operationSuccessful = true; return result.getUploadId(); } catch (AmazonServiceException e) { throw new IOException(StringUtils.stringifyException(e)); } finally { if (!operationSuccessful) { abortUpload(); } } }
Example #11
Source File: S3MultipartUploadTest.java From data-highway with Apache License 2.0 | 6 votes |
@Test public void complete() { InitiateMultipartUploadResult response = mock(InitiateMultipartUploadResult.class); when(response.getUploadId()).thenReturn(UPLOAD_ID); when(s3.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(response); ArgumentCaptor<CompleteMultipartUploadRequest> request = ArgumentCaptor .forClass(CompleteMultipartUploadRequest.class); @SuppressWarnings("unchecked") List<PartETag> partETags = mock(List.class); when(s3.completeMultipartUpload(request.capture())).thenReturn(null); underTest.start(); underTest.complete(UPLOAD_ID, partETags); assertThat(request.getValue().getBucketName(), is(BUCKET)); assertThat(request.getValue().getKey(), is(KEY)); assertThat(request.getValue().getUploadId(), is(UPLOAD_ID)); assertThat(request.getValue().getPartETags(), is(partETags)); }
Example #12
Source File: S3TransporterTest.java From bender with Apache License 2.0 | 5 votes |
private AmazonS3Client getMockClient() { AmazonS3Client mockClient = spy(AmazonS3Client.class); UploadPartResult uploadResult = new UploadPartResult(); uploadResult.setETag("foo"); doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class)); InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult(); initUploadResult.setUploadId("123"); doReturn(initUploadResult).when(mockClient) .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)); return mockClient; }
Example #13
Source File: OutputS3.java From crate with Apache License 2.0 | 5 votes |
private S3OutputStream(Executor executor, URI uri, S3ClientHelper s3ClientHelper) throws IOException { this.executor = executor; bucketName = uri.getHost(); key = uri.getPath().substring(1); outputStream = new ByteArrayOutputStream(); client = s3ClientHelper.client(uri); multipartUpload = client.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, key)); }
Example #14
Source File: S3WritableByteChannel.java From beam with Apache License 2.0 | 5 votes |
S3WritableByteChannel(AmazonS3 amazonS3, S3ResourceId path, String contentType, S3Options options) throws IOException { this.amazonS3 = checkNotNull(amazonS3, "amazonS3"); this.options = checkNotNull(options); this.path = checkNotNull(path, "path"); checkArgument( atMostOne( options.getSSECustomerKey() != null, options.getSSEAlgorithm() != null, options.getSSEAwsKeyManagementParams() != null), "Either SSECustomerKey (SSE-C) or SSEAlgorithm (SSE-S3)" + " or SSEAwsKeyManagementParams (SSE-KMS) must not be set at the same time."); // Amazon S3 API docs: Each part must be at least 5 MB in size, except the last part. checkArgument( options.getS3UploadBufferSizeBytes() >= S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES, "S3UploadBufferSizeBytes must be at least %s bytes", S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES); this.uploadBuffer = ByteBuffer.allocate(options.getS3UploadBufferSizeBytes()); eTags = new ArrayList<>(); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentType(contentType); if (options.getSSEAlgorithm() != null) { objectMetadata.setSSEAlgorithm(options.getSSEAlgorithm()); } InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(path.getBucket(), path.getKey()) .withStorageClass(options.getS3StorageClass()) .withObjectMetadata(objectMetadata); request.setSSECustomerKey(options.getSSECustomerKey()); request.setSSEAwsKeyManagementParams(options.getSSEAwsKeyManagementParams()); InitiateMultipartUploadResult result; try { result = amazonS3.initiateMultipartUpload(request); } catch (AmazonClientException e) { throw new IOException(e); } uploadId = result.getUploadId(); }
Example #15
Source File: S3OutputStream.java From nifi-minifi with Apache License 2.0 | 5 votes |
private MultipartUpload newMultipartUpload() throws IOException { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, key, new ObjectMetadata()); try { return new MultipartUpload(s3.initiateMultipartUpload(initRequest).getUploadId()); } catch (AmazonClientException e) { throw new IOException("Unable to initiate MultipartUpload: " + e, e); } }
Example #16
Source File: TestS3PartitionedTaskCommit.java From s3committer with Apache License 2.0 | 5 votes |
@Test public void testReplace() throws Exception { // TODO: this committer needs to delete the data that already exists // This test should assert that the delete was done FileSystem mockS3 = getMockS3(); getTAC().getConfiguration() .set(S3Committer.CONFLICT_MODE, "replace"); S3PartitionedOutputCommitter committer = newTaskCommitter(); committer.setupTask(getTAC()); TestUtil.createTestOutputFiles(relativeFiles, committer.getTaskAttemptPath(getTAC()), getTAC().getConfiguration()); // test success when one partition already exists reset(mockS3); when(mockS3 .exists(new Path(OUTPUT_PATH, relativeFiles.get(3)).getParent())) .thenReturn(true); committer.commitTask(getTAC()); Set<String> files = Sets.newHashSet(); for (InitiateMultipartUploadRequest request : getMockResults().getRequests().values()) { Assert.assertEquals(MockS3FileSystem.BUCKET, request.getBucketName()); files.add(request.getKey()); } Assert.assertEquals("Should have the right number of uploads", relativeFiles.size(), files.size()); Set<String> expected = Sets.newHashSet(); for (String relative : relativeFiles) { expected.add(OUTPUT_PREFIX + "/" + Paths.addUUID(relative, committer.getUUID())); } Assert.assertEquals("Should have correct paths", expected, files); }
Example #17
Source File: TestS3PartitionedTaskCommit.java From s3committer with Apache License 2.0 | 5 votes |
@Test public void testAppend() throws Exception { FileSystem mockS3 = getMockS3(); getTAC().getConfiguration() .set(S3Committer.CONFLICT_MODE, "append"); S3PartitionedOutputCommitter committer = newTaskCommitter(); committer.setupTask(getTAC()); TestUtil.createTestOutputFiles(relativeFiles, committer.getTaskAttemptPath(getTAC()), getTAC().getConfiguration()); // test success when one partition already exists reset(mockS3); when(mockS3 .exists(new Path(OUTPUT_PATH, relativeFiles.get(2)).getParent())) .thenReturn(true); committer.commitTask(getTAC()); Set<String> files = Sets.newHashSet(); for (InitiateMultipartUploadRequest request : getMockResults().getRequests().values()) { Assert.assertEquals(MockS3FileSystem.BUCKET, request.getBucketName()); files.add(request.getKey()); } Assert.assertEquals("Should have the right number of uploads", relativeFiles.size(), files.size()); Set<String> expected = Sets.newHashSet(); for (String relative : relativeFiles) { expected.add(OUTPUT_PREFIX + "/" + Paths.addUUID(relative, committer.getUUID())); } Assert.assertEquals("Should have correct paths", expected, files); }
Example #18
Source File: S3MultipartUploadTest.java From data-highway with Apache License 2.0 | 5 votes |
@Test public void start() { ArgumentCaptor<InitiateMultipartUploadRequest> request = ArgumentCaptor .forClass(InitiateMultipartUploadRequest.class); InitiateMultipartUploadResult response = mock(InitiateMultipartUploadResult.class); when(response.getUploadId()).thenReturn(UPLOAD_ID); when(s3.initiateMultipartUpload(request.capture())).thenReturn(response); String result = underTest.start(); assertThat(result, is(UPLOAD_ID)); assertThat(request.getValue().getBucketName(), is(BUCKET)); assertThat(request.getValue().getKey(), is(KEY)); }
Example #19
Source File: ParallelRequester.java From nexus-public with Eclipse Public License 1.0 | 5 votes |
protected void parallelRequests(final AmazonS3 s3, final String bucket, final String key, final Supplier<IOFunction<String, List<PartETag>>> operations) { InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, key); String uploadId = s3.initiateMultipartUpload(initiateRequest).getUploadId(); CompletionService<List<PartETag>> completionService = new ExecutorCompletionService<>(executorService); try { for (int i = 0; i < parallelism; i++) { completionService.submit(() -> operations.get().apply(uploadId)); } List<PartETag> partETags = new ArrayList<>(); for (int i = 0; i < parallelism; i++) { partETags.addAll(completionService.take().get()); } s3.completeMultipartUpload(new CompleteMultipartUploadRequest() .withBucketName(bucket) .withKey(key) .withUploadId(uploadId) .withPartETags(partETags)); } catch (InterruptedException interrupted) { s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId)); Thread.currentThread().interrupt(); } catch (CancellationException | ExecutionException ex) { s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId)); throw new BlobStoreException( format("Error executing parallel requests for bucket:%s key:%s with uploadId:%s", bucket, key, uploadId), ex, null); } }
Example #20
Source File: S3Service.java From modeldb with Apache License 2.0 | 5 votes |
@Override public Optional<String> initiateMultipart(String s3Key) throws ModelDBException { // Validate bucket Boolean exist = doesBucketExist(bucketName); if (!exist) { throw new ModelDBException("Bucket does not exists", io.grpc.Status.Code.INTERNAL); } InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(bucketName, s3Key); InitiateMultipartUploadResult result = s3Client.initiateMultipartUpload(initiateMultipartUploadRequest); return Optional.ofNullable(result.getUploadId()); }
Example #21
Source File: PublisherTools.java From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 | 5 votes |
public static void uploadFile( final File file, final Artifact artifact, final CompressionType compressionType, final EncryptionKey encryptionKey, final AmazonS3 amazonS3, final BuildListener listener) throws IOException { LoggingHelper.log(listener, "Uploading artifact: " + artifact + ", file: " + file); final String bucketName = artifact.getLocation().getS3Location().getBucketName(); final String objectKey = artifact.getLocation().getS3Location().getObjectKey(); final List<PartETag> partETags = new ArrayList<>(); final InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest( bucketName, objectKey, createObjectMetadata(compressionType)) .withSSEAwsKeyManagementParams(toSSEAwsKeyManagementParams(encryptionKey)); final InitiateMultipartUploadResult initiateMultipartUploadResult = amazonS3.initiateMultipartUpload(initiateMultipartUploadRequest); final long contentLength = file.length(); long filePosition = 0; long partSize = 5 * 1024 * 1024; // Set part size to 5 MB for (int i = 1; filePosition < contentLength; i++) { partSize = Math.min(partSize, (contentLength - filePosition)); final UploadPartRequest uploadPartRequest = new UploadPartRequest() .withBucketName(bucketName) .withKey(objectKey) .withUploadId(initiateMultipartUploadResult.getUploadId()) .withPartNumber(i) .withFileOffset(filePosition) .withFile(file) .withPartSize(partSize); partETags.add(amazonS3.uploadPart(uploadPartRequest).getPartETag()); filePosition += partSize; } final CompleteMultipartUploadRequest completeMultipartUpload = new CompleteMultipartUploadRequest( bucketName, objectKey, initiateMultipartUploadResult.getUploadId(), partETags); amazonS3.completeMultipartUpload(completeMultipartUpload); LoggingHelper.log(listener, "Upload successful"); }
Example #22
Source File: S3MultipartUpload.java From data-highway with Apache License 2.0 | 5 votes |
String start() { bytes = 0; InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucket, key); if(enableServerSideEncryption) { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setObjectMetadata(objectMetadata); } String uploadId = s3.initiateMultipartUpload(request).getUploadId(); stopwatch.start(); log.info("Starting upload to s3://{}/{}.", bucket, key); return uploadId; }
Example #23
Source File: PublisherToolsTest.java From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 | 5 votes |
@Before public void setUp() { MockitoAnnotations.initMocks(this); when(mockS3Client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))) .thenReturn(mockUploadResult); when(mockS3Client.uploadPart(any(UploadPartRequest.class))).thenReturn(mockPartRequest); when(mockUploadResult.getUploadId()).thenReturn("123"); when(mockArtifact.getLocation()).thenReturn(mockLocation); when(mockLocation.getS3Location()).thenReturn(s3ArtifactLocation); when(s3ArtifactLocation.getBucketName()).thenReturn("Bucket"); when(s3ArtifactLocation.getObjectKey()).thenReturn("Key"); outContent = TestUtils.setOutputStream(); }
Example #24
Source File: PublisherToolsTest.java From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 | 5 votes |
@Test public void uploadFileSuccess() throws IOException { TestUtils.initializeTestingFolders(); final File compressedFile = CompressionTools.compressFile( "ZipProject", PATH_TO_COMPRESS, CompressionType.Zip, null); PublisherTools.uploadFile( compressedFile, mockArtifact, CompressionType.Zip, null, // No custom encryption key mockS3Client, null); // Listener final InOrder inOrder = inOrder(mockS3Client); inOrder.verify(mockS3Client, times(1)).initiateMultipartUpload(initiateCaptor.capture()); // Total size is less than 5MB, should only be one upload inOrder.verify(mockS3Client, times(1)).uploadPart(any(UploadPartRequest.class)); inOrder.verify(mockS3Client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString()); assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString()); final InitiateMultipartUploadRequest request = initiateCaptor.getValue(); final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams(); assertNotNull(encryptionParams); assertNull(encryptionParams.getAwsKmsKeyId()); assertEquals("aws:kms", encryptionParams.getEncryption()); compressedFile.delete(); TestUtils.cleanUpTestingFolders(); }
Example #25
Source File: PublisherToolsTest.java From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 | 5 votes |
@Test public void uploadWithCustomKmsEncryptionKey() throws IOException { TestUtils.initializeTestingFolders(); when(mockEncryptionKey.getId()).thenReturn("KMS-KEY-ARN"); when(mockEncryptionKey.getType()).thenReturn(EncryptionKeyType.KMS.toString()); final File compressedFile = CompressionTools.compressFile( "ZipProject", PATH_TO_COMPRESS, CompressionType.Zip, null); PublisherTools.uploadFile( compressedFile, mockArtifact, CompressionType.Zip, mockEncryptionKey, mockS3Client, null); // Listener verify(mockS3Client).initiateMultipartUpload(initiateCaptor.capture()); assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString()); final InitiateMultipartUploadRequest request = initiateCaptor.getValue(); final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams(); assertNotNull(encryptionParams); assertEquals("KMS-KEY-ARN", encryptionParams.getAwsKmsKeyId()); assertEquals("aws:kms", encryptionParams.getEncryption()); compressedFile.delete(); TestUtils.cleanUpTestingFolders(); }
Example #26
Source File: PublisherToolsTest.java From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 | 5 votes |
@Test public void uploadWithUnknownEncryptionKeyType() throws IOException { TestUtils.initializeTestingFolders(); when(mockEncryptionKey.getId()).thenReturn("KMS-KEY-ARN"); when(mockEncryptionKey.getType()).thenReturn("Custom"); final File compressedFile = CompressionTools.compressFile( "ZipProject", PATH_TO_COMPRESS, CompressionType.Zip, null); PublisherTools.uploadFile( compressedFile, mockArtifact, CompressionType.Zip, mockEncryptionKey, mockS3Client, null); // Listener verify(mockS3Client).initiateMultipartUpload(initiateCaptor.capture()); assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString()); final InitiateMultipartUploadRequest request = initiateCaptor.getValue(); final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams(); assertNotNull(encryptionParams); assertNull(encryptionParams.getAwsKmsKeyId()); assertEquals("aws:kms", encryptionParams.getEncryption()); compressedFile.delete(); TestUtils.cleanUpTestingFolders(); }
Example #27
Source File: SimpleStorageResource.java From spring-cloud-aws with Apache License 2.0 | 5 votes |
private void initiateMultiPartIfNeeded() { if (this.multiPartUploadResult == null) { this.multiPartUploadResult = SimpleStorageResource.this.amazonS3 .initiateMultipartUpload(new InitiateMultipartUploadRequest( SimpleStorageResource.this.bucketName, SimpleStorageResource.this.objectName)); } }
Example #28
Source File: TestS3EncryptionStrategies.java From nifi with Apache License 2.0 | 5 votes |
@Before public void setup() { byte[] keyRawBytes = new byte[32]; SecureRandom secureRandom = new SecureRandom(); secureRandom.nextBytes(keyRawBytes); randomKeyMaterial = Base64.encodeBase64String(keyRawBytes); metadata = new ObjectMetadata(); putObjectRequest = new PutObjectRequest("", "", ""); initUploadRequest = new InitiateMultipartUploadRequest("", ""); getObjectRequest = new GetObjectRequest("", ""); uploadPartRequest = new UploadPartRequest(); }
Example #29
Source File: PublisherCallableTest.java From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 | 5 votes |
@Test public void uploadsArtifactToS3() throws IOException { // when publisher.invoke(workspace, null); // then final InOrder inOrder = inOrder(clientFactory, awsClients, s3Client); inOrder.verify(clientFactory).getAwsClient(ACCESS_KEY, SECRET_KEY, PROXY_HOST, PROXY_PORT, REGION, PLUGIN_VERSION); inOrder.verify(awsClients).getCodePipelineClient(); inOrder.verify(awsClients).getS3Client(credentialsProviderCaptor.capture()); inOrder.verify(s3Client).initiateMultipartUpload(initiateMultipartUploadRequestCaptor.capture()); inOrder.verify(s3Client).uploadPart(uploadPartRequestCaptor.capture()); final com.amazonaws.auth.AWSSessionCredentials credentials = (com.amazonaws.auth.AWSSessionCredentials) credentialsProviderCaptor.getValue().getCredentials(); assertEquals(JOB_ACCESS_KEY, credentials.getAWSAccessKeyId()); assertEquals(JOB_SECRET_KEY, credentials.getAWSSecretKey()); assertEquals(JOB_SESSION_TOKEN, credentials.getSessionToken()); verify(codePipelineClient).getJobDetails(getJobDetailsRequestCaptor.capture()); assertEquals(JOB_ID, getJobDetailsRequestCaptor.getValue().getJobId()); final InitiateMultipartUploadRequest initRequest = initiateMultipartUploadRequestCaptor.getValue(); assertEquals(S3_BUCKET_NAME, initRequest.getBucketName()); assertEquals(S3_OBJECT_KEY, initRequest.getKey()); final UploadPartRequest uploadRequest = uploadPartRequestCaptor.getValue(); assertEquals(S3_BUCKET_NAME, uploadRequest.getBucketName()); assertEquals(S3_OBJECT_KEY, uploadRequest.getKey()); assertEquals(UPLOAD_ID, uploadRequest.getUploadId()); assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString()); assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString()); }
Example #30
Source File: S3FeaturesDemoTest.java From Scribengin with GNU Affero General Public License v3.0 | 4 votes |
public void multipartUpload(String bucketName, String keyName, String filePath) throws IOException { //Create a list of UploadPartResponse objects. You get one of these for each part upload. List<PartETag> partETags = new ArrayList<PartETag>(); // Step 1: Initialize. InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName); InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); File file = new File(filePath); long contentLength = file.length(); long partSize = 5 * 1024 * 1024; // Set part size to 1kb. try { // Step 2: Upload parts. long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { // Last part can be less than 5 MB. Adjust part size. partSize = Math.min(partSize, (contentLength - filePosition)); // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest() .withBucketName(bucketName).withKey(keyName) .withUploadId(initResponse.getUploadId()).withPartNumber(i) .withFileOffset(filePosition) .withFile(file) .withPartSize(partSize); // Upload part and add response to our list. partETags.add(s3Client.uploadPart(uploadRequest).getPartETag()); filePosition += partSize; } // Step 3: Complete. CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName, initResponse.getUploadId(), partETags); s3Client.completeMultipartUpload(compRequest); } catch (Exception e) { s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, keyName, initResponse.getUploadId())); e.printStackTrace(); } }