com.amazonaws.services.s3.model.ObjectMetadata Java Examples
The following examples show how to use
com.amazonaws.services.s3.model.ObjectMetadata.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SimpleStorageResource.java From spring-cloud-aws with Apache License 2.0 | 7 votes |
private ObjectMetadata getRequiredObjectMetadata() throws FileNotFoundException { ObjectMetadata metadata = getObjectMetadata(); if (metadata == null) { StringBuilder builder = new StringBuilder().append("Resource with bucket='") .append(this.bucketName).append("' and objectName='") .append(this.objectName); if (this.versionId != null) { builder.append("' and versionId='"); builder.append(this.versionId); } builder.append("' not found!"); throw new FileNotFoundException(builder.toString()); } return metadata; }
Example #2
Source File: AwsSdkTest.java From s3proxy with Apache License 2.0 | 6 votes |
@Test public void testAwsV4SignatureBadCredential() throws Exception { client = AmazonS3ClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider( new BasicAWSCredentials( awsCreds.getAWSAccessKeyId(), "bad-secret-key"))) .withEndpointConfiguration(s3EndpointConfig) .build(); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); try { client.putObject(containerName, "foo", BYTE_SOURCE.openStream(), metadata); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("SignatureDoesNotMatch"); } }
Example #3
Source File: S3Archiver.java From chancery with Apache License 2.0 | 6 votes |
private void upload(@NotNull File src, @NotNull String key, @NotNull CallbackPayload payload) { log.info("Uploading {} to {} in {}", src, key, bucketName); final PutObjectRequest request = new PutObjectRequest(bucketName, key, src); final ObjectMetadata metadata = request.getMetadata(); final String commitId = payload.getAfter(); if (commitId != null) { metadata.addUserMetadata("commit-id", commitId); } final DateTime timestamp = payload.getTimestamp(); if (timestamp != null) { metadata.addUserMetadata("hook-timestamp", ISODateTimeFormat.basicTime().print(timestamp)); } final TimerContext time = uploadTimer.time(); try { s3Client.putObject(request); } catch (Exception e) { log.error("Couldn't upload to {} in {}", key, bucketName, e); throw e; } finally { time.stop(); } log.info("Uploaded to {} in {}", key, bucketName); }
Example #4
Source File: FileClient.java From file-service with Apache License 2.0 | 6 votes |
public String putObject(String bucketName, String originFileName, MultipartFile multipartFile) { String uuid = UUID.randomUUID().toString().replaceAll("-", ""); String fileName = FILE + "_" + uuid + "_" + originFileName; try { InputStream inputStream = multipartFile.getInputStream(); if (isAwsS3) { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.addUserMetadata("Content-Type", "application/octet-stream"); amazonS3.putObject(bucketName, fileName, inputStream, objectMetadata); } else { minioClient.putObject(bucketName, fileName, inputStream, "application/octet-stream"); } } catch (Exception e) { throw new FileUploadException("error.file.upload", e); } return fileName; }
Example #5
Source File: SimpleStorageProtocolResolverTest.java From spring-cloud-aws with Apache License 2.0 | 6 votes |
@Test void testGetResourceWithVersionId() { AmazonS3 amazonS3 = mock(AmazonS3.class); SimpleStorageProtocolResolver resourceLoader = new SimpleStorageProtocolResolver( amazonS3); ObjectMetadata metadata = new ObjectMetadata(); when(amazonS3.getObjectMetadata(any(GetObjectMetadataRequest.class))) .thenReturn(metadata); String resourceName = "s3://bucket/object^versionIdValue"; Resource resource = resourceLoader.resolve(resourceName, new DefaultResourceLoader()); assertThat(resource).isNotNull(); }
Example #6
Source File: SimpleStorageResourceTest.java From spring-cloud-aws with Apache License 2.0 | 6 votes |
@Test void lastModified_withExistingResource_returnsLastModifiedDateOfResource() throws Exception { // Arrange AmazonS3 amazonS3 = mock(AmazonS3.class); ObjectMetadata objectMetadata = new ObjectMetadata(); Date lastModified = new Date(); objectMetadata.setLastModified(lastModified); when(amazonS3.getObjectMetadata(any(GetObjectMetadataRequest.class))) .thenReturn(objectMetadata); // Act SimpleStorageResource simpleStorageResource = new SimpleStorageResource(amazonS3, "bucket", "object", new SyncTaskExecutor()); // Assert assertThat(simpleStorageResource.lastModified()) .isEqualTo(lastModified.getTime()); }
Example #7
Source File: S3Operations.java From ats-framework with Apache License 2.0 | 6 votes |
/** * Get MD5, size, owner, storage class and last modification time for a desired file in the pointed bucket * * @param fileName the file name */ @PublicAtsApi public S3ObjectInfo getFileMetadata( String fileName ) { try { S3Object element = s3Client.getObject(bucketName, fileName); if (element != null) { ObjectMetadata metaData = element.getObjectMetadata(); S3ObjectInfo s3Info = new S3ObjectInfo(); s3Info.setBucketName(fileName); s3Info.setLastModified(metaData.getLastModified()); s3Info.setMd5(metaData.getETag()); s3Info.setName(element.getKey()); s3Info.setSize(metaData.getContentLength()); return s3Info; } else { throw new NoSuchElementException("File with name '" + fileName + "' does not exist!"); } } catch (Exception e) { handleExeption(e, "Could not retrieve metadata for S3 object with key '" + fileName + "'"); } return null; }
Example #8
Source File: S3DaoTest.java From herd with Apache License 2.0 | 6 votes |
@Test public void testTagObjects() { // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(S3_BUCKET_NAME); // Create an S3 object summary. S3ObjectSummary s3ObjectSummary = new S3ObjectSummary(); s3ObjectSummary.setKey(TARGET_S3_KEY); // Create an S3 object tag. Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE); // Put a file in S3. s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), new ObjectMetadata()), null); // Tag the file with an S3 object tag. s3Dao.tagObjects(params, new S3FileTransferRequestParamsDto(), Collections.singletonList(s3ObjectSummary), tag); // Validate that the object got tagged. GetObjectTaggingResult getObjectTaggingResult = s3Operations.getObjectTagging(new GetObjectTaggingRequest(S3_BUCKET_NAME, TARGET_S3_KEY), null); assertEquals(Collections.singletonList(tag), getObjectTaggingResult.getTagSet()); }
Example #9
Source File: ITFetchS3Object.java From localization_nifi with Apache License 2.0 | 6 votes |
@Test public void testSimpleGetEncrypted() throws IOException { putTestFileEncrypted("test-file", getFileFromResourceName(SAMPLE_FILE_RESOURCE_NAME)); final TestRunner runner = TestRunners.newTestRunner(new FetchS3Object()); runner.setProperty(FetchS3Object.CREDENTIALS_FILE, CREDENTIALS_FILE); runner.setProperty(FetchS3Object.REGION, REGION); runner.setProperty(FetchS3Object.BUCKET, BUCKET_NAME); final Map<String, String> attrs = new HashMap<>(); attrs.put("filename", "test-file"); runner.enqueue(new byte[0], attrs); runner.run(1); runner.assertAllFlowFilesTransferred(FetchS3Object.REL_SUCCESS, 1); final List<MockFlowFile> ffs = runner.getFlowFilesForRelationship(FetchS3Object.REL_SUCCESS); MockFlowFile ff = ffs.get(0); ff.assertAttributeEquals(PutS3Object.S3_SSE_ALGORITHM, ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); ff.assertContentEquals(getFileFromResourceName(SAMPLE_FILE_RESOURCE_NAME)); }
Example #10
Source File: S3AFileSystem.java From hadoop with Apache License 2.0 | 6 votes |
private void createEmptyObject(final String bucketName, final String objectName) throws AmazonClientException, AmazonServiceException { final InputStream im = new InputStream() { @Override public int read() throws IOException { return -1; } }; final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(0L); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om); putObjectRequest.setCannedAcl(cannedACL); s3.putObject(putObjectRequest); statistics.incrementWriteOps(1); }
Example #11
Source File: FileUploadCleanupServiceTest.java From herd with Apache License 2.0 | 6 votes |
@Test public void testDeleteBusinessObjectDataS3FileExists() throws Exception { // Prepare database entries required for testing. StorageEntity storageEntity = createTestStorageEntity(STORAGE_NAME, s3BucketName); BusinessObjectDataKey testBusinessObjectKey = new BusinessObjectDataKey(NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES, DATA_VERSION); createTestDatabaseEntities(testBusinessObjectKey, BusinessObjectDataStatusEntity.UPLOADING, storageEntity, TARGET_S3_KEY, 15); // Put a file in S3. PutObjectRequest putObjectRequest = new PutObjectRequest(s3BucketName, TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), new ObjectMetadata()); s3Operations.putObject(putObjectRequest, null); // Delete the business object data. List<BusinessObjectDataKey> resultBusinessObjectDataKeys = fileUploadCleanupService.deleteBusinessObjectData(STORAGE_NAME, 10); // Validate the results. assertNotNull(resultBusinessObjectDataKeys); assertTrue(resultBusinessObjectDataKeys.isEmpty()); validateBusinessObjectDataStatus(testBusinessObjectKey, BusinessObjectDataStatusEntity.UPLOADING); }
Example #12
Source File: S3FileSystem.java From beam with Apache License 2.0 | 6 votes |
@VisibleForTesting CopyObjectResult atomicCopy( S3ResourceId sourcePath, S3ResourceId destinationPath, ObjectMetadata sourceObjectMetadata) throws AmazonClientException { CopyObjectRequest copyObjectRequest = new CopyObjectRequest( sourcePath.getBucket(), sourcePath.getKey(), destinationPath.getBucket(), destinationPath.getKey()); copyObjectRequest.setNewObjectMetadata(sourceObjectMetadata); copyObjectRequest.setStorageClass(options.getS3StorageClass()); copyObjectRequest.setSourceSSECustomerKey(options.getSSECustomerKey()); copyObjectRequest.setDestinationSSECustomerKey(options.getSSECustomerKey()); return amazonS3.get().copyObject(copyObjectRequest); }
Example #13
Source File: S3BlobStore.java From nexus-blobstore-s3 with Eclipse Public License 1.0 | 6 votes |
@Override @Guarded(by = STARTED) public Blob create(final InputStream blobData, final Map<String, String> headers) { checkNotNull(blobData); return create(headers, destination -> { try (InputStream data = blobData) { MetricsInputStream input = new MetricsInputStream(data); TransferManager transferManager = new TransferManager(s3); transferManager.upload(getConfiguredBucket(), destination, input, new ObjectMetadata()) .waitForCompletion(); return input.getMetrics(); } catch (InterruptedException e) { throw new BlobStoreException("error uploading blob", e, null); } }); }
Example #14
Source File: SimpleStorageResource.java From spring-cloud-aws with Apache License 2.0 | 6 votes |
private ObjectMetadata getObjectMetadata() { if (this.objectMetadata == null) { try { GetObjectMetadataRequest metadataRequest = new GetObjectMetadataRequest( this.bucketName, this.objectName); if (this.versionId != null) { metadataRequest.setVersionId(this.versionId); } this.objectMetadata = this.amazonS3.getObjectMetadata(metadataRequest); } catch (AmazonS3Exception e) { // Catch 404 (object not found) and 301 (bucket not found, moved // permanently) if (e.getStatusCode() == 404 || e.getStatusCode() == 301) { this.objectMetadata = null; } else { throw e; } } } return this.objectMetadata; }
Example #15
Source File: S3AFastOutputStream.java From big-c with Apache License 2.0 | 6 votes |
private MultiPartUpload initiateMultiPartUpload() throws IOException { final ObjectMetadata om = createDefaultMetadata(); final InitiateMultipartUploadRequest initiateMPURequest = new InitiateMultipartUploadRequest(bucket, key, om); initiateMPURequest.setCannedACL(cannedACL); try { return new MultiPartUpload( client.initiateMultipartUpload(initiateMPURequest).getUploadId()); } catch (AmazonServiceException ase) { throw new IOException("Unable to initiate MultiPartUpload (server side)" + ": " + ase, ase); } catch (AmazonClientException ace) { throw new IOException("Unable to initiate MultiPartUpload (client side)" + ": " + ace, ace); } }
Example #16
Source File: FileHelper.java From datacollector with Apache License 2.0 | 6 votes |
Upload doUpload(String bucket, String fileName, InputStream is, ObjectMetadata metadata) { final PutObjectRequest putObjectRequest = new PutObjectRequest( bucket, fileName, is, metadata ); final String object = bucket + s3TargetConfigBean.s3Config.delimiter + fileName; Upload upload = transferManager.upload(putObjectRequest); upload.addProgressListener((ProgressListener) progressEvent -> { switch (progressEvent.getEventType()) { case TRANSFER_STARTED_EVENT: LOG.debug("Started uploading object {} into Amazon S3", object); break; case TRANSFER_COMPLETED_EVENT: LOG.debug("Completed uploading object {} into Amazon S3", object); break; case TRANSFER_FAILED_EVENT: LOG.debug("Failed uploading object {} into Amazon S3", object); break; default: break; } }); return upload; }
Example #17
Source File: BusinessObjectDataServiceTestHelper.java From herd with Apache License 2.0 | 6 votes |
/** * Creates an object in S3 with the prefix constructed from the given parameters. The object's full path will be {prefix}/{UUID} * * @param businessObjectFormatEntity business object format * @param request request with partition values and storage * @param businessObjectDataVersion business object data version to put */ public void createS3Object(BusinessObjectFormatEntity businessObjectFormatEntity, BusinessObjectDataInvalidateUnregisteredRequest request, int businessObjectDataVersion) { StorageEntity storageEntity = storageDao.getStorageByName(request.getStorageName()); String s3BucketName = storageHelper.getS3BucketAccessParams(storageEntity).getS3BucketName(); BusinessObjectDataKey businessObjectDataKey = getBusinessObjectDataKey(request); businessObjectDataKey.setBusinessObjectDataVersion(businessObjectDataVersion); String s3KeyPrefix = s3KeyPrefixHelper .buildS3KeyPrefix(AbstractServiceTest.S3_KEY_PREFIX_VELOCITY_TEMPLATE, businessObjectFormatEntity, businessObjectDataKey, storageEntity.getName()); String s3ObjectKey = s3KeyPrefix + "/test"; PutObjectRequest putObjectRequest = new PutObjectRequest(s3BucketName, s3ObjectKey, new ByteArrayInputStream(new byte[1]), new ObjectMetadata()); s3Operations.putObject(putObjectRequest, null); }
Example #18
Source File: S3DaoTest.java From herd with Apache License 2.0 | 6 votes |
/** * The method is successful when both bucket and key exists. */ @Test public void testS3FileExists() { String expectedKey = "foo"; String expectedValue = "bar"; ByteArrayInputStream inputStream = new ByteArrayInputStream((expectedKey + "=" + expectedValue).getBytes()); PutObjectRequest putObjectRequest = new PutObjectRequest(S3_BUCKET_NAME, TARGET_S3_KEY, inputStream, new ObjectMetadata()); s3Operations.putObject(putObjectRequest, null); S3FileTransferRequestParamsDto params = s3DaoTestHelper.getTestS3FileTransferRequestParamsDto(); params.setS3BucketName(S3_BUCKET_NAME); params.setS3KeyPrefix(TARGET_S3_KEY); Assert.assertTrue(s3Dao.s3FileExists(params)); }
Example #19
Source File: S3DaoImpl.java From herd with Apache License 2.0 | 6 votes |
@Override public ObjectMetadata getObjectMetadata(final S3FileTransferRequestParamsDto params) { AmazonS3Client s3Client = getAmazonS3(params); try { return s3Operations.getObjectMetadata(params.getS3BucketName(), params.getS3KeyPrefix(), s3Client); } catch (AmazonServiceException e) { if (e.getStatusCode() == HttpStatus.SC_NOT_FOUND) { return null; } throw new IllegalStateException(String .format("Failed to get S3 metadata for object key \"%s\" from bucket \"%s\". Reason: %s", params.getS3KeyPrefix(), params.getS3BucketName(), e.getMessage()), e); } finally { // Shutdown the AmazonS3Client instance to release resources. s3Client.shutdown(); } }
Example #20
Source File: ITPutS3Object.java From nifi with Apache License 2.0 | 6 votes |
private void testEncryptionServiceWithServerSideS3EncryptionStrategy(byte[] data) throws IOException, InitializationException { TestRunner runner = createPutEncryptionTestRunner(AmazonS3EncryptionService.STRATEGY_NAME_SSE_S3, null); Map<String, String> attrs = new HashMap<>(); attrs.put("filename", "test.txt"); runner.enqueue(data, attrs); runner.assertValid(); runner.run(); runner.assertAllFlowFilesTransferred(PutS3Object.REL_SUCCESS); List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(PutS3Object.REL_SUCCESS); Assert.assertEquals(1, flowFiles.size()); Assert.assertEquals(0, runner.getFlowFilesForRelationship(PutS3Object.REL_FAILURE).size()); MockFlowFile putSuccess = flowFiles.get(0); Assert.assertEquals(putSuccess.getAttribute(PutS3Object.S3_ENCRYPTION_STRATEGY), AmazonS3EncryptionService.STRATEGY_NAME_SSE_S3); MockFlowFile flowFile = fetchEncryptedFlowFile(attrs, AmazonS3EncryptionService.STRATEGY_NAME_SSE_S3, null); flowFile.assertContentEquals(data); flowFile.assertAttributeEquals(PutS3Object.S3_SSE_ALGORITHM, ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); flowFile.assertAttributeEquals(PutS3Object.S3_ENCRYPTION_STRATEGY, AmazonS3EncryptionService.STRATEGY_NAME_SSE_S3); }
Example #21
Source File: S3DaoTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testRestoreObjectsNonGlacierNonDeepArchiveObject() { // Put a 1 byte non-Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to initiate a restore request for a non-Glacier file. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION); fail("Should throw an IllegalStateException when file has a non-Glacier storage class."); } catch (IllegalStateException e) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: object is not in Glacier or DeepArchive (Service: null; Status Code: 0; Error Code: null; Request ID: null)", TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
Example #22
Source File: S3PropertiesFile.java From nexus-blobstore-s3 with Eclipse Public License 1.0 | 5 votes |
public void store() throws IOException { log.debug("Storing: {}/{}", bucket, key); ByteArrayOutputStream bufferStream = new ByteArrayOutputStream(); store(bufferStream, null); byte[] buffer = bufferStream.toByteArray(); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(buffer.length); s3.putObject(bucket, key, new ByteArrayInputStream(buffer), metadata); }
Example #23
Source File: ObjectsOnS3.java From cantor with BSD 3-Clause "New" or "Revised" License | 5 votes |
private void doStore(final String namespace, final String key, final InputStream stream, final long length) { final String bucket = toBucketName(namespace); final ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(length); logger.info("storing stream with length={} at '{}.{}'", length, bucket, key); // if no exception is thrown, the object was put successfully - ignore response value this.s3Client.putObject(bucket, key, stream, metadata); }
Example #24
Source File: AwsSdkTest.java From s3proxy with Apache License 2.0 | 5 votes |
@Test public void testBlobListRecursiveImplicitMarker() throws Exception { ObjectListing listing = client.listObjects(containerName); assertThat(listing.getObjectSummaries()).isEmpty(); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "blob1", BYTE_SOURCE.openStream(), metadata); client.putObject(containerName, "blob2", BYTE_SOURCE.openStream(), metadata); listing = client.listObjects(new ListObjectsRequest() .withBucketName(containerName) .withMaxKeys(1)); assertThat(listing.getObjectSummaries()).hasSize(1); assertThat(listing.getObjectSummaries().iterator().next().getKey()) .isEqualTo("blob1"); listing = client.listObjects(new ListObjectsRequest() .withBucketName(containerName) .withMaxKeys(1) .withMarker("blob1")); assertThat(listing.getObjectSummaries()).hasSize(1); assertThat(listing.getObjectSummaries().iterator().next().getKey()) .isEqualTo("blob2"); }
Example #25
Source File: PathMatchingResourceLoaderAwsTest.java From spring-cloud-aws with Apache License 2.0 | 5 votes |
@Override public String call() { this.amazonS3.putObject(this.bucketName, this.fileName, new ByteArrayInputStream(this.fileName.getBytes()), new ObjectMetadata()); return this.fileName; }
Example #26
Source File: S3UploadAllCallable.java From jobcacher-plugin with MIT License | 5 votes |
/** * Upload from slave */ @Override public Integer invoke(final TransferManager transferManager, File base, VirtualChannel channel) throws IOException, InterruptedException { if(!base.exists()) return 0; final AtomicInteger count = new AtomicInteger(0); final Uploads uploads = new Uploads(); final Map<String, S3ObjectSummary> summaries = lookupExistingCacheEntries(transferManager.getAmazonS3Client()); // Find files to upload that match scan scanner.scan(base, new FileVisitor() { @Override public void visit(File f, String relativePath) throws IOException { if (f.isFile()) { String key = pathPrefix + "/" + relativePath; S3ObjectSummary summary = summaries.get(key); if (summary == null || f.lastModified() > summary.getLastModified().getTime()) { final ObjectMetadata metadata = buildMetadata(f); uploads.startUploading(transferManager, f, IOUtils.toBufferedInputStream(FileUtils.openInputStream(f)), new Destination(bucketName, key), metadata); if (uploads.count() > 20) { waitForUploads(count, uploads); } } } } }); // Wait for each file to complete before returning waitForUploads(count, uploads); return uploads.count(); }
Example #27
Source File: S3AFileSystem.java From hadoop with Apache License 2.0 | 5 votes |
private void copyFile(String srcKey, String dstKey) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("copyFile " + srcKey + " -> " + dstKey); } ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey); final ObjectMetadata dstom = srcom.clone(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { dstom.setServerSideEncryption(serverSideEncryptionAlgorithm); } CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey); copyObjectRequest.setCannedAccessControlList(cannedACL); copyObjectRequest.setNewObjectMetadata(dstom); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Copy copy = transfers.copy(copyObjectRequest); copy.addProgressListener(progressListener); try { copy.waitForCopyResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } }
Example #28
Source File: AmazonS3SinkMockTests.java From spring-cloud-stream-app-starters with Apache License 2.0 | 5 votes |
@Test @Override public void test() throws Exception { AmazonS3 amazonS3Client = TestUtils.getPropertyValue(this.s3MessageHandler, "transferManager.s3", AmazonS3.class); InputStream payload = new StringInputStream("a"); Message<?> message = MessageBuilder.withPayload(payload) .setHeader("key", "myInputStream") .build(); this.channels.input().send(message); ArgumentCaptor<PutObjectRequest> putObjectRequestArgumentCaptor = ArgumentCaptor.forClass(PutObjectRequest.class); verify(amazonS3Client, atLeastOnce()).putObject(putObjectRequestArgumentCaptor.capture()); PutObjectRequest putObjectRequest = putObjectRequestArgumentCaptor.getValue(); assertThat(putObjectRequest.getBucketName(), equalTo(S3_BUCKET)); assertThat(putObjectRequest.getKey(), equalTo("myInputStream")); assertNull(putObjectRequest.getFile()); assertNotNull(putObjectRequest.getInputStream()); ObjectMetadata metadata = putObjectRequest.getMetadata(); assertThat(metadata.getContentMD5(), equalTo(Md5Utils.md5AsBase64(payload))); assertThat(metadata.getContentLength(), equalTo(1L)); assertThat(metadata.getContentType(), equalTo(MediaType.APPLICATION_JSON_VALUE)); assertThat(metadata.getContentDisposition(), equalTo("test.json")); }
Example #29
Source File: S3PropertiesFile.java From nexus-public with Eclipse Public License 1.0 | 5 votes |
public void store() throws IOException { log.debug("Storing: {}/{}", bucket, key); ByteArrayOutputStream bufferStream = new ByteArrayOutputStream(); store(bufferStream, null); byte[] buffer = bufferStream.toByteArray(); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(buffer.length); s3.putObject(bucket, key, new ByteArrayInputStream(buffer), metadata); }
Example #30
Source File: S3LocationResolverTest.java From data-highway with Apache License 2.0 | 5 votes |
@Test public void create() throws URISyntaxException { S3LocationResolver resolver = new S3LocationResolver(s3, "bucket", "prefix"); resolver.resolveLocation("tableName", true); ArgumentCaptor<ByteArrayInputStream> contentCaptor = ArgumentCaptor.forClass(ByteArrayInputStream.class); ArgumentCaptor<ObjectMetadata> metadataCaptor = ArgumentCaptor.forClass(ObjectMetadata.class); verify(s3).putObject(eq("bucket"), eq("prefix/tableName/"), contentCaptor.capture(), metadataCaptor.capture()); assertThat(contentCaptor.getValue().available(), is(0)); assertThat(metadataCaptor.getValue().getContentLength(), is(0L)); }