Java Code Examples for com.amazonaws.services.s3.model.ObjectMetadata#setContentType()
The following examples show how to use
com.amazonaws.services.s3.model.ObjectMetadata#setContentType() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AwsS3BucketService.java From pacbot with Apache License 2.0 | 8 votes |
public boolean uploadFile(final AmazonS3 amazonS3, MultipartFile fileToUpload, String s3BucketName, String key) { try { File file = AdminUtils.convert(fileToUpload); long size = fileToUpload.getSize(); String contentType = fileToUpload.getContentType(); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType(contentType); metadata.setContentLength(size); PutObjectRequest putObjectRequest = new PutObjectRequest(s3BucketName, key, file).withCannedAcl(CannedAccessControlList.PublicRead); amazonS3.putObject(putObjectRequest); return Boolean.TRUE; } catch (IOException exception) { log.error(UNEXPECTED_ERROR_OCCURRED, exception); } return Boolean.FALSE; }
Example 2
Source File: S3CommonFileObject.java From hop with Apache License 2.0 | 6 votes |
@Override protected void doCreateFolder() throws Exception { if ( !isRootBucket() ) { // create meta-data for your folder and set content-length to 0 ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength( 0 ); metadata.setContentType( "binary/octet-stream" ); // create empty content InputStream emptyContent = new ByteArrayInputStream( new byte[ 0 ] ); // create a PutObjectRequest passing the folder name suffixed by / PutObjectRequest putObjectRequest = createPutObjectRequest( bucketName, key + DELIMITER, emptyContent, metadata ); // send request to S3 to create folder try { fileSystem.getS3Client().putObject( putObjectRequest ); } catch ( AmazonS3Exception e ) { throw new FileSystemException( "vfs.provider.local/create-folder.error", this, e ); } } else { throw new FileSystemException( "vfs.provider/create-folder-not-supported.error" ); } }
Example 3
Source File: AwsFileSystem.java From judgels with GNU General Public License v2.0 | 6 votes |
@Override public void uploadPublicFile(Path filePath, InputStream content) { String destFilePathString = filePath.toString(); ObjectMetadata objectMetadata = new ObjectMetadata(); String contentType = URLConnection.guessContentTypeFromName(destFilePathString); if (contentType != null) { objectMetadata.setContentType(contentType); if (contentType.startsWith("image/")) { objectMetadata.setCacheControl("no-transform,public,max-age=300,s-maxage=900"); } } s3.putObject(bucketName, destFilePathString, content, objectMetadata); }
Example 4
Source File: TestPrestoS3FileSystem.java From presto with Apache License 2.0 | 5 votes |
@Test public void testEmptyDirectory() throws Exception { try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) { MockAmazonS3 s3 = new MockAmazonS3() { @Override public ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest) { if (getObjectMetadataRequest.getKey().equals("empty-dir/")) { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentType(S3_DIRECTORY_OBJECT_CONTENT_TYPE); return objectMetadata; } return super.getObjectMetadata(getObjectMetadataRequest); } }; fs.initialize(new URI("s3n://test-bucket/"), new Configuration(false)); fs.setS3Client(s3); FileStatus fileStatus = fs.getFileStatus(new Path("s3n://test-bucket/empty-dir/")); assertTrue(fileStatus.isDirectory()); fileStatus = fs.getFileStatus(new Path("s3n://test-bucket/empty-dir")); assertTrue(fileStatus.isDirectory()); } }
Example 5
Source File: RepositoryS3.java From github-bucket with ISC License | 5 votes |
private boolean walk(Iterator<S3ObjectSummary> iter, ObjectId file, String path) throws IOException { byte[] content; byte[] newHash; LOG.debug("Start processing file: {}", path); try (DigestInputStream is = new DigestInputStream(repository.open(file).openStream(), DigestUtils.getMd5Digest())) { // Get content content = IOUtils.toByteArray(is); // Get hash newHash = is.getMessageDigest().digest(); } if (isUploadFile(iter, path, Hex.encodeHexString(newHash))) { LOG.info("Uploading file: {}", path); ObjectMetadata bucketMetadata = new ObjectMetadata(); bucketMetadata.setContentMD5(Base64.encodeAsString(newHash)); bucketMetadata.setContentLength(content.length); // Give Tika a few hints for the content detection Metadata tikaMetadata = new Metadata(); tikaMetadata.set(Metadata.RESOURCE_NAME_KEY, FilenameUtils.getName(FilenameUtils.normalize(path))); // Fire! try (InputStream bis = TikaInputStream.get(content, tikaMetadata)) { bucketMetadata.setContentType(TIKA_DETECTOR.detect(bis, tikaMetadata).toString()); s3.putObject(bucket.getName(), path, bis, bucketMetadata); return true; } } LOG.info("Skipping file (same checksum): {}", path); return false; }
Example 6
Source File: S3Repository.java From hawkbit-extensions with Eclipse Public License 1.0 | 5 votes |
private ObjectMetadata createObjectMetadata(final String mdMD5Hash16, final String contentType, final File file) { final ObjectMetadata objectMetadata = new ObjectMetadata(); final String mdMD5Hash64 = BaseEncoding.base64().encode(BaseEncoding.base16().lowerCase().decode(mdMD5Hash16)); objectMetadata.setContentMD5(mdMD5Hash64); objectMetadata.setContentType(contentType); objectMetadata.setContentLength(file.length()); if (s3Properties.isServerSideEncryption()) { objectMetadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION, s3Properties.getServerSideEncryptionAlgorithm()); } return objectMetadata; }
Example 7
Source File: TablonController.java From spring-cloud-aws-sample with Apache License 2.0 | 5 votes |
@RequestMapping("/anuncio/nuevo") public String nuevoAnuncio(Model model, @RequestParam String nombre, @RequestParam String asunto, @RequestParam String comentario, @RequestParam String filename, @RequestParam MultipartFile file) { if (!file.isEmpty()) { try { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentType(file.getContentType()); TransferManager transferManager = TransferManagerBuilder.defaultTransferManager(); transferManager.upload(bucket, filename, file.getInputStream(), objectMetadata); } catch (Exception e) { model.addAttribute("message", "You failed to upload " + filename + " => " + e.getMessage()); return "error"; } } else { model.addAttribute("message", "You failed to upload " + filename + " because the file was empty."); return "error"; } Anuncio anuncio = new Anuncio(nombre, asunto, comentario); anuncio.setFoto(s3.getUrl(bucket, filename)); repository.save(anuncio); return "anuncio_guardado"; }
Example 8
Source File: RepositoryS3.java From github-bucket with ISC License | 5 votes |
private boolean walk(Iterator<S3ObjectSummary> iter, ObjectId file, String path) throws IOException { byte[] content; byte[] newHash; LOG.debug("Start processing file: {}", path); try (DigestInputStream is = new DigestInputStream(repository.open(file).openStream(), DigestUtils.getMd5Digest())) { // Get content content = IOUtils.toByteArray(is); // Get hash newHash = is.getMessageDigest().digest(); } if (isUploadFile(iter, path, Hex.encodeHexString(newHash))) { LOG.info("Uploading file: {}", path); ObjectMetadata bucketMetadata = new ObjectMetadata(); bucketMetadata.setContentMD5(Base64.encodeAsString(newHash)); bucketMetadata.setContentLength(content.length); // Give Tika a few hints for the content detection Metadata tikaMetadata = new Metadata(); tikaMetadata.set(Metadata.RESOURCE_NAME_KEY, FilenameUtils.getName(FilenameUtils.normalize(path))); // Fire! try (InputStream bis = TikaInputStream.get(content, tikaMetadata)) { bucketMetadata.setContentType(TIKA_DETECTOR.detect(bis, tikaMetadata).toString()); s3.putObject(bucket.getName(), path, bis, bucketMetadata); return true; } } LOG.info("Skipping file (same checksum): {}", path); return false; }
Example 9
Source File: S3SinkStreamWriter.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
public S3SinkStreamWriter(S3Folder streamS3Folder) throws IOException { this.streamS3Folder = streamS3Folder; segmentName = "segment-" + UUID.randomUUID().toString(); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType("application/binary"); metadata.addUserMetadata("transaction", "prepare"); writer = streamS3Folder.createObjectWriter(segmentName, metadata) ; }
Example 10
Source File: UploadThread.java From aws-photosharing-example with Apache License 2.0 | 5 votes |
@Override public void run() { ObjectMetadata meta_data = new ObjectMetadata(); if (p_content_type != null) meta_data.setContentType(p_content_type); meta_data.setContentLength(p_size); PutObjectRequest putObjectRequest = new PutObjectRequest(p_bucket_name, p_s3_key, p_file_stream, meta_data); putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead); PutObjectResult res = s3Client.putObject(putObjectRequest); }
Example 11
Source File: AwsS3FileStorageService.java From pizzeria with MIT License | 5 votes |
@Override protected void saveFileContent(InputStream inputStream, String name, String contentType) throws IOException { byte[] bytes = IOUtils.toByteArray(inputStream); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentType(contentType); objectMetadata.setContentLength(bytes.length); getAmazonS3().putObject(bucketName, name, new ByteArrayInputStream(bytes), objectMetadata); }
Example 12
Source File: S3MultiReportOutput.java From java-almanac with Creative Commons Attribution Share Alike 4.0 International | 5 votes |
private void upload(String path, byte[] content) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(content.length); metadata.setContentType(getContentType(path)); PutObjectRequest request = new PutObjectRequest(bucketName, basePath + path, new ByteArrayInputStream(content), metadata).withCannedAcl(CannedAccessControlList.PublicRead); s3Client.putObject(request); }
Example 13
Source File: S3ClientIntegrationTest.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testUpdateObjectMetadata() throws IOException, InterruptedException { String KEY = "test-update-objec-metadata" ; ObjectMetadata metadata = new ObjectMetadata() ; metadata.setContentType("text/plain"); s3Client.createObject(BUCKET_NAME, KEY, new byte[0], metadata); metadata = s3Client.getObjectMetadata(BUCKET_NAME, KEY) ; metadata.addUserMetadata("transaction", "buffering"); s3Client.updateObjectMetadata(BUCKET_NAME, KEY, metadata); metadata = s3Client.getObjectMetadata(BUCKET_NAME, KEY) ; Assert.assertEquals("buffering", metadata.getUserMetaDataOf("transaction")); }
Example 14
Source File: AmazonS3ClientWrapper.java From fredbet with Creative Commons Attribution Share Alike 4.0 International | 5 votes |
public void uploadFile(String key, byte[] fileContent, String contentType) { try (ByteArrayInputStream byteIn = new ByteArrayInputStream(fileContent)) { ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(fileContent.length); meta.setContentType(contentType); this.amazonS3.putObject(bucketName, key, byteIn, meta); } catch (IOException e) { throw new AwsS3AccessException(e.getMessage(), e); } }
Example 15
Source File: S3WaitOperatorFactoryTest.java From digdag with Apache License 2.0 | 4 votes |
@Test public void testExponentialBackoff() throws Exception { Config config = newConfig(); config.set("_command", BUCKET + "/" + KEY); when(taskRequest.getConfig()).thenReturn(config); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentType(CONTENT_TYPE); objectMetadata.setContentLength(CONTENT_LENGTH); objectMetadata.setUserMetadata(USER_METADATA); when(s3Client.getObjectMetadata(any(GetObjectMetadataRequest.class))).thenThrow(NOT_FOUND_EXCEPTION); Operator operator = factory.newOperator(newContext(projectPath, taskRequest)); List<Integer> retryIntervals = new ArrayList<>(); for (int i = 0; i < 10; i++) { try { operator.run(); fail(); } catch (TaskExecutionException e) { assertThat(e.isError(), is(false)); assertThat(e.getRetryInterval().isPresent(), is(true)); retryIntervals.add(e.getRetryInterval().get()); Config lastStateParams = e.getStateParams(configFactory).get(); when(taskRequest.getLastStateParams()).thenReturn(lastStateParams); } } for (int i = 1; i < retryIntervals.size(); i++) { int prevInterval = retryIntervals.get(i - 1); int interval = retryIntervals.get(i); assertThat(interval, is(Math.min(MAX_POLL_INTERVAL, prevInterval * 2))); } assertThat(retryIntervals.get(retryIntervals.size() - 1), is(MAX_POLL_INTERVAL)); }
Example 16
Source File: Write.java From openbd-core with GNU General Public License v3.0 | 4 votes |
private void writeData( AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String mimetype, cfData data, int retry, int retryseconds, String acl, String aes256key, Map<String, String> customheaders ) throws Exception { if ( mimetype == null ) { if ( data.getDataType() == cfData.CFBINARYDATA ) mimetype = "application/unknown"; else if ( cfData.isSimpleValue( data ) ) mimetype = "text/plain"; else mimetype = "application/json"; // Check to see if the mime type is in the metadata if ( metadata != null && metadata.containsKey( "Content-Type" ) ) mimetype = metadata.get( "Content-Type" ); } InputStream ios = null; long size = 0; if ( data.getDataType() == cfData.CFSTRINGDATA ) { ios = new java.io.ByteArrayInputStream( data.getString().getBytes() ); size = data.getString().length(); } else if ( data.getDataType() == cfData.CFBINARYDATA ) { ios = new java.io.ByteArrayInputStream( ( (cfBinaryData) data ).getByteArray() ); size = ( (cfBinaryData) data ).getLength(); } else { serializejson json = new serializejson(); StringBuilder out = new StringBuilder(); json.encodeJSON( out, data, false, CaseType.MAINTAIN, DateType.LONG ); size = out.length(); mimetype = "application/json"; ios = new java.io.ByteArrayInputStream( out.toString().getBytes() ); } // Setup the object data ObjectMetadata omd = new ObjectMetadata(); if ( metadata != null ) omd.setUserMetadata( metadata ); omd.setContentType( mimetype ); omd.setContentLength( size ); AmazonS3 s3Client = getAmazonS3( amazonKey ); // Let us run around the number of attempts int attempts = 0; while ( attempts < retry ) { try { PutObjectRequest por = new PutObjectRequest( bucket, key, ios, omd ); por.setStorageClass( storage ); if ( aes256key != null && !aes256key.isEmpty() ) por.setSSECustomerKey( new SSECustomerKey( aes256key ) ); if ( acl != null && !acl.isEmpty() ) por.setCannedAcl( amazonKey.getAmazonCannedAcl( acl ) ); if ( customheaders != null && !customheaders.isEmpty() ) { Iterator<String> it = customheaders.keySet().iterator(); while ( it.hasNext() ) { String k = it.next(); por.putCustomRequestHeader( k, customheaders.get( k ) ); } } s3Client.putObject( por ); break; } catch ( Exception e ) { cfEngine.log( "Failed: AmazonS3Write(bucket=" + bucket + "; key=" + key + "; attempt=" + ( attempts + 1 ) + "; exception=" + e.getMessage() + ")" ); attempts++; if ( attempts == retry ) throw e; else Thread.sleep( retryseconds * 1000 ); } } }
Example 17
Source File: S3EventProcessorUnzip.java From aws-lambda-unzip with Apache License 2.0 | 4 votes |
@Override public String handleRequest(S3Event s3Event, Context context) { byte[] buffer = new byte[1024]; try { for (S3EventNotificationRecord record: s3Event.getRecords()) { String srcBucket = record.getS3().getBucket().getName(); // Object key may have spaces or unicode non-ASCII characters. String srcKey = record.getS3().getObject().getKey() .replace('+', ' '); srcKey = URLDecoder.decode(srcKey, "UTF-8"); // Detect file type Matcher matcher = Pattern.compile(".*\\.([^\\.]*)").matcher(srcKey); if (!matcher.matches()) { System.out.println("Unable to detect file type for key " + srcKey); return ""; } String extension = matcher.group(1).toLowerCase(); if (!"zip".equals(extension)) { System.out.println("Skipping non-zip file " + srcKey + " with extension " + extension); return ""; } System.out.println("Extracting zip file " + srcBucket + "/" + srcKey); // Download the zip from S3 into a stream AmazonS3 s3Client = new AmazonS3Client(); S3Object s3Object = s3Client.getObject(new GetObjectRequest(srcBucket, srcKey)); ZipInputStream zis = new ZipInputStream(s3Object.getObjectContent()); ZipEntry entry = zis.getNextEntry(); while(entry != null) { String fileName = entry.getName(); String mimeType = FileMimeType.fromExtension(FilenameUtils.getExtension(fileName)).mimeType(); System.out.println("Extracting " + fileName + ", compressed: " + entry.getCompressedSize() + " bytes, extracted: " + entry.getSize() + " bytes, mimetype: " + mimeType); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); int len; while ((len = zis.read(buffer)) > 0) { outputStream.write(buffer, 0, len); } InputStream is = new ByteArrayInputStream(outputStream.toByteArray()); ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(outputStream.size()); meta.setContentType(mimeType); s3Client.putObject(srcBucket, FilenameUtils.getFullPath(srcKey) + fileName, is, meta); is.close(); outputStream.close(); entry = zis.getNextEntry(); } zis.closeEntry(); zis.close(); //delete zip file when done System.out.println("Deleting zip file " + srcBucket + "/" + srcKey + "..."); s3Client.deleteObject(new DeleteObjectRequest(srcBucket, srcKey)); System.out.println("Done deleting"); } return "Ok"; } catch (IOException e) { throw new RuntimeException(e); } }
Example 18
Source File: TestFetchS3Object.java From nifi with Apache License 2.0 | 4 votes |
@Test public void testGetObject() throws IOException { runner.setProperty(FetchS3Object.REGION, "us-east-1"); runner.setProperty(FetchS3Object.BUCKET, "request-bucket"); final Map<String, String> attrs = new HashMap<>(); attrs.put("filename", "request-key"); runner.enqueue(new byte[0], attrs); S3Object s3ObjectResponse = new S3Object(); s3ObjectResponse.setBucketName("response-bucket-name"); s3ObjectResponse.setKey("response-key"); s3ObjectResponse.setObjectContent(new StringInputStream("Some Content")); ObjectMetadata metadata = Mockito.spy(ObjectMetadata.class); metadata.setContentDisposition("key/path/to/file.txt"); metadata.setContentType("text/plain"); metadata.setContentMD5("testMD5hash"); Date expiration = new Date(); metadata.setExpirationTime(expiration); metadata.setExpirationTimeRuleId("testExpirationRuleId"); Map<String, String> userMetadata = new HashMap<>(); userMetadata.put("userKey1", "userValue1"); userMetadata.put("userKey2", "userValue2"); metadata.setUserMetadata(userMetadata); metadata.setSSEAlgorithm("testAlgorithm"); Mockito.when(metadata.getETag()).thenReturn("test-etag"); s3ObjectResponse.setObjectMetadata(metadata); Mockito.when(mockS3Client.getObject(Mockito.any())).thenReturn(s3ObjectResponse); runner.run(1); ArgumentCaptor<GetObjectRequest> captureRequest = ArgumentCaptor.forClass(GetObjectRequest.class); Mockito.verify(mockS3Client, Mockito.times(1)).getObject(captureRequest.capture()); GetObjectRequest request = captureRequest.getValue(); assertEquals("request-bucket", request.getBucketName()); assertEquals("request-key", request.getKey()); assertFalse(request.isRequesterPays()); assertNull(request.getVersionId()); runner.assertAllFlowFilesTransferred(FetchS3Object.REL_SUCCESS, 1); final List<MockFlowFile> ffs = runner.getFlowFilesForRelationship(FetchS3Object.REL_SUCCESS); MockFlowFile ff = ffs.get(0); ff.assertAttributeEquals("s3.bucket", "response-bucket-name"); ff.assertAttributeEquals(CoreAttributes.FILENAME.key(), "file.txt"); ff.assertAttributeEquals(CoreAttributes.PATH.key(), "key/path/to"); ff.assertAttributeEquals(CoreAttributes.ABSOLUTE_PATH.key(), "key/path/to/file.txt"); ff.assertAttributeEquals(CoreAttributes.MIME_TYPE.key(), "text/plain"); ff.assertAttributeEquals("hash.value", "testMD5hash"); ff.assertAttributeEquals("hash.algorithm", "MD5"); ff.assertAttributeEquals("s3.etag", "test-etag"); ff.assertAttributeEquals("s3.expirationTime", String.valueOf(expiration.getTime())); ff.assertAttributeEquals("s3.expirationTimeRuleId", "testExpirationRuleId"); ff.assertAttributeEquals("userKey1", "userValue1"); ff.assertAttributeEquals("userKey2", "userValue2"); ff.assertAttributeEquals("s3.sseAlgorithm", "testAlgorithm"); ff.assertContentEquals("Some Content"); }
Example 19
Source File: TestFetchS3Object.java From nifi with Apache License 2.0 | 4 votes |
@Test public void testGetObjectWithRequesterPays() throws IOException { runner.setProperty(FetchS3Object.REGION, "us-east-1"); runner.setProperty(FetchS3Object.BUCKET, "request-bucket"); runner.setProperty(FetchS3Object.REQUESTER_PAYS, "true"); final Map<String, String> attrs = new HashMap<>(); attrs.put("filename", "request-key"); runner.enqueue(new byte[0], attrs); S3Object s3ObjectResponse = new S3Object(); s3ObjectResponse.setBucketName("response-bucket-name"); s3ObjectResponse.setKey("response-key"); s3ObjectResponse.setObjectContent(new StringInputStream("Some Content")); ObjectMetadata metadata = Mockito.spy(ObjectMetadata.class); metadata.setContentDisposition("key/path/to/file.txt"); metadata.setContentType("text/plain"); metadata.setContentMD5("testMD5hash"); Date expiration = new Date(); metadata.setExpirationTime(expiration); metadata.setExpirationTimeRuleId("testExpirationRuleId"); Map<String, String> userMetadata = new HashMap<>(); userMetadata.put("userKey1", "userValue1"); userMetadata.put("userKey2", "userValue2"); metadata.setUserMetadata(userMetadata); metadata.setSSEAlgorithm("testAlgorithm"); Mockito.when(metadata.getETag()).thenReturn("test-etag"); s3ObjectResponse.setObjectMetadata(metadata); Mockito.when(mockS3Client.getObject(Mockito.any())).thenReturn(s3ObjectResponse); runner.run(1); ArgumentCaptor<GetObjectRequest> captureRequest = ArgumentCaptor.forClass(GetObjectRequest.class); Mockito.verify(mockS3Client, Mockito.times(1)).getObject(captureRequest.capture()); GetObjectRequest request = captureRequest.getValue(); assertEquals("request-bucket", request.getBucketName()); assertEquals("request-key", request.getKey()); assertTrue(request.isRequesterPays()); assertNull(request.getVersionId()); runner.assertAllFlowFilesTransferred(FetchS3Object.REL_SUCCESS, 1); final List<MockFlowFile> ffs = runner.getFlowFilesForRelationship(FetchS3Object.REL_SUCCESS); MockFlowFile ff = ffs.get(0); ff.assertAttributeEquals("s3.bucket", "response-bucket-name"); ff.assertAttributeEquals(CoreAttributes.FILENAME.key(), "file.txt"); ff.assertAttributeEquals(CoreAttributes.PATH.key(), "key/path/to"); ff.assertAttributeEquals(CoreAttributes.ABSOLUTE_PATH.key(), "key/path/to/file.txt"); ff.assertAttributeEquals(CoreAttributes.MIME_TYPE.key(), "text/plain"); ff.assertAttributeEquals("hash.value", "testMD5hash"); ff.assertAttributeEquals("hash.algorithm", "MD5"); ff.assertAttributeEquals("s3.etag", "test-etag"); ff.assertAttributeEquals("s3.expirationTime", String.valueOf(expiration.getTime())); ff.assertAttributeEquals("s3.expirationTimeRuleId", "testExpirationRuleId"); ff.assertAttributeEquals("userKey1", "userValue1"); ff.assertAttributeEquals("userKey2", "userValue2"); ff.assertAttributeEquals("s3.sseAlgorithm", "testAlgorithm"); ff.assertContentEquals("Some Content"); }
Example 20
Source File: S3Folder.java From Scribengin with GNU Affero General Public License v3.0 | 4 votes |
public void create(String name, InputStream is, String mimeType) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType(mimeType); create(name, is, metadata); }