Python boto.s3.key.Key() Examples

The following are 30 code examples of boto.s3.key.Key(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module boto.s3.key , or try the search function .
Example #1
Source File: flask_admin_s3_upload.py    From flask-admin-s3-upload with Apache License 2.0 6 votes vote down vote up
def _delete_thumbnail(self, filename, storage_type, bucket_name):
        if not (storage_type and bucket_name):
            self._delete_thumbnail_local(filename)
            return

        if storage_type != 's3':
            raise ValueError(
                'Storage type "%s" is invalid, the only supported storage type'
                ' (apart from default local storage) is s3.' % storage_type)

        conn = S3Connection(self.access_key_id, self.access_key_secret)
        bucket = conn.get_bucket(bucket_name)

        path = self._get_s3_path(self.thumbnail_fn(filename))
        k = Key(bucket)
        k.key = path

        try:
            bucket.delete_key(k)
        except S3ResponseError:
            pass

    # Saving 
Example #2
Source File: compress_css_js_files.py    From django-webpacker with MIT License 6 votes vote down vote up
def upload_to_s3(css_file):
    bucket_name = settings.AWS_BUCKET_NAME
    conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)

    folder = 'webpack_bundles/'
    bucket = conn.get_bucket(bucket_name=bucket_name)

    filename = css_file.split('/')[-1]
    file_obj = open(css_file, 'r')
    content = file_obj.read()

    key = folder + filename
    bucket = conn.get_bucket(bucket_name=bucket_name)
    mime = mimetypes.guess_type(filename)[0]
    k = Key(bucket)
    k.key = key  # folder + filename
    k.set_metadata("Content-Type", mime)
    k.set_contents_from_string(content)
    public_read = True
    if public_read:
        k.set_acl("public-read") 
Example #3
Source File: backup.py    From janeway with GNU Affero General Public License v3.0 6 votes vote down vote up
def handle_s3(tmp_path, start_time):
    print("Sending to S3.")
    file_name = '{0}.zip'.format(start_time)
    file_path = os.path.join(settings.BASE_DIR, 'files', 'temp', file_name)
    f = open(file_path, 'rb')

    END_POINT = settings.END_POINT
    S3_HOST = settings.S3_HOST
    UPLOADED_FILENAME = 'backups/{0}.zip'.format(start_time)
    # include folders in file path. If it doesn't exist, it will be created

    s3 = boto.s3.connect_to_region(END_POINT,
                                   aws_access_key_id=settings.S3_ACCESS_KEY,
                                   aws_secret_access_key=settings.S3_SECRET_KEY,
                                   host=S3_HOST)

    bucket = s3.get_bucket(settings.S3_BUCKET_NAME)
    k = Key(bucket)
    k.key = UPLOADED_FILENAME
    k.set_contents_from_file(f, cb=mycb, num_cb=200) 
Example #4
Source File: distribution_point.py    From python-jss with GNU General Public License v3.0 6 votes vote down vote up
def _copy(self, filename, id_=-1):   # type: (str, int) -> None
        """Copy a file or folder to the bucket.

        Does not yet support chunking.

        Args:
            filename: Path to copy.
            destination: Remote path to copy file to.
        """
        bucket_key = os.path.basename(filename)
        exists = self.bucket.get_key(bucket_key)
        if exists:
            print("Already exists")
        else:
            k = Key(self.bucket)
            k.key = bucket_key
            k.set_metadata('jamf-package-id', id_)
            k.set_contents_from_filename(filename) 
Example #5
Source File: test_s3.py    From drf-to-s3 with MIT License 6 votes vote down vote up
def setUp(self):
        import boto
        from boto.exception import NoAuthHandlerFound
        from boto.s3.key import Key

        keys = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
        try:
            for k in keys:
                os.environ[k]
            self.bucket_name = os.environ.get('AWS_TEST_BUCKET', 'drf-to-s3-test')
        except KeyError:
            self.skipTest('To test s3, set %s in .env' % ' and '.join(keys))

        conn = boto.connect_s3()
        bucket = conn.get_bucket(self.bucket_name)
        k = Key(bucket)
        k.key = "%s%s.txt" % (str(uuid.uuid4()), self.prefix)
        k.set_contents_from_string('This is a test of S3')

        self.existing_key = k.key
        self.existing_key_etag = k.etag
        self.bucket = bucket

        self.nonexisting_key = "%s%s.txt" % (str(uuid.uuid4()), self.prefix)
        self.new_key = None 
Example #6
Source File: user.py    From mltshp with Mozilla Public License 2.0 6 votes vote down vote up
def set_profile_image(self, file_path, file_name, content_type):
        """
        Takes a local path, name and content-type, which are parameters passed in by
        nginx upload module.  Converts to RGB, resizes to thumbnail and uploads to S3.
        Returns False if some conditions aren't met, such as error making thumbnail
        or content type is one we don't support.
        """
        valid_content_types = ('image/gif', 'image/jpeg', 'image/jpg', 'image/png',)
        if content_type not in valid_content_types:
            return False

        destination =  cStringIO.StringIO()
        if not transform_to_square_thumbnail(file_path, 100*2, destination):
            return False

        bucket = S3Bucket()
        k = Key(bucket)
        k.key = "account/%s/profile.jpg" % (self.id)
        k.set_metadata('Content-Type', 'image/jpeg')
        k.set_metadata('Cache-Control', 'max-age=86400')
        k.set_contents_from_string(destination.getvalue())
        k.set_acl('public-read')
        self.profile_image = 1
        self.save()
        return True 
Example #7
Source File: test_s3.py    From drf-to-s3 with MIT License 6 votes vote down vote up
def test_copy_fails_with_mismatched_etag_after_subsequent_update(self):
        from boto.s3.key import Key
        from drf_to_s3 import s3

        k = Key(self.bucket)
        k.key = self.existing_key
        k.set_contents_from_string('Another test')

        with self.assertRaises(s3.ObjectNotFoundException):
            s3.copy(
                src_bucket=self.bucket_name,
                src_key=self.existing_key,
                dst_bucket=self.bucket_name,
                dst_key=self.nonexisting_key,
                src_etag=self.existing_key_etag,
                validate_src_etag=True
            ) 
Example #8
Source File: test_deeds.py    From cccatalog with MIT License 6 votes vote down vote up
def get_records(id_, iterator):
    conn = boto.connect_s3(anon=True, host='s3.amazonaws.com')
    bucket = conn.get_bucket('commoncrawl')

    for uri in iterator:
        key_ = Key(bucket, uri)
        _file = warc.WARCFile(fileobj=GzipStreamFile(key_))

        for record in _file:
            if record['Content-Type'] == 'application/json':
                record = json.loads(record.payload.read())
                try:
                    def cc_filter(x):
                        return "creativecommons.org" in x['url']

                    cc_links = filter(cc_filter, list(record['Envelope']['Payload-Metadata']['HTTP-Response-Metadata']['HTML-Metadata']['Links']))
                    if len(cc_links) > 0:
                        yield record
                except KeyError:
                    pass 
Example #9
Source File: pusher.py    From landsat_ingestor with Apache License 2.0 6 votes vote down vote up
def push_file(src_path, s3_path, verbose=False, overwrite=False):
    key = _get_key(s3_path)
    if key is not None:
        if not overwrite:
            raise Exception('File already at %s' % s3_path)
        if verbose:
            print 'Overwriting existing %s.' % s3_path

    key = Key(_get_bucket(), s3_path)
    if s3_path.endswith('.TIF') or s3_path.endswith('.tif'):
        key.content_type = 'image/tiff'
    if s3_path.endswith('.jpg'):
        key.content_type = 'image/jpeg'
    if s3_path.endswith('.txt'):
        key.content_type = 'text/plain'

    bytes_uploaded = key.set_contents_from_filename(src_path, policy='public-read')
    if verbose:
        print 'Uploaded %d bytes from %s to %s.' % (
            bytes_uploaded, src_path, s3_path) 
Example #10
Source File: flask_admin_s3_upload.py    From flask-admin-s3-upload with Apache License 2.0 6 votes vote down vote up
def _delete_file(self, filename, obj):
        storage_type = getattr(obj, self.storage_type_field, '')
        bucket_name = getattr(obj, self.bucket_name_field, '')

        if not (storage_type and bucket_name):
            return super(S3FileUploadField, self)._delete_file(filename)

        if storage_type != 's3':
            raise ValueError(
                'Storage type "%s" is invalid, the only supported storage type'
                ' (apart from default local storage) is s3.' % storage_type)

        conn = S3Connection(self.access_key_id, self.access_key_secret)
        bucket = conn.get_bucket(bucket_name)

        path = self._get_s3_path(filename)
        k = Key(bucket)
        k.key = path

        try:
            bucket.delete_key(k)
        except S3ResponseError:
            pass 
Example #11
Source File: S3UploadPool.py    From mongodb_consistent_backup with Apache License 2.0 6 votes vote down vote up
def upload(self, file_name, key_name):
        if self.s3_exists(key_name):
            s3_md5hex   = self.s3_md5hex(key_name)
            file_md5hex = file_md5hash(file_name)
            if s3_md5hex and file_md5hex == s3_md5hex:
                logging.warning("Key %s already exists with same checksum (%s), skipping" % (key_name, s3_md5hex))
                return
            else:
                logging.debug("Key %s already exists but the local file checksum differs (local:%s, s3:%s). Re-uploading" % (
                    key_name,
                    file_md5hex,
                    s3_md5hex
                ))
        file_size = self.get_file_size(file_name)
        if file_size >= self.multipart_min_bytes and file_size >= self.chunk_bytes:
            self.upload_multipart(file_name, key_name, file_size)
        else:
            result = self.start(file_name, key_name, file_size)
            self._uploads[file_name] = {
                "complete":  False,
                "multipart": False,
                "result":    result
            } 
Example #12
Source File: s3.py    From teye_scanner_for_book with GNU General Public License v3.0 6 votes vote down vote up
def insert(self, report):
        """
            create a json string from an NmapReport instance
            and push it to S3 bucket.

            :param NmapReport: obj to insert
            :rtype: string
            :return: str id
            :todo: Add tagging option
        """
        try:
            oid = ObjectId()
            mykey = Key(self.bucket)
            mykey.key = str(oid)
            strjsonnmapreport = json.dumps(report, cls=ReportEncoder)
            mykey.set_contents_from_string(strjsonnmapreport)
        except:
            raise Exception("Failed to add nmap object in s3 bucket")
        return str(oid) 
Example #13
Source File: s3.py    From teye_scanner_for_book with GNU General Public License v3.0 6 votes vote down vote up
def get(self, str_report_id=None):
        """
            select a NmapReport by Id.

            :param str: id
            :rtype: NmapReport
            :return: NmapReport object
        """
        nmapreport = None
        if str_report_id is not None and isinstance(str_report_id, str):
            try:
                mykey = Key(self.bucket)
                mykey.key = str_report_id
                nmapreportjson = json.loads(mykey.get_contents_as_string())
                nmapreport = NmapParser.parse_fromdict(nmapreportjson)
            except S3ResponseError:
                pass
        return nmapreport 
Example #14
Source File: rnaseq_unc_pipeline.py    From toil-scripts with Apache License 2.0 6 votes vote down vote up
def upload_output_to_s3(job, job_vars):
    """
    If s3_dir is specified in arguments, file will be uploaded to S3 using boto.
    WARNING: ~/.boto credentials are necessary for this to succeed!

    job_vars: tuple     Tuple of dictionaries: input_args and ids
    """
    import boto
    from boto.s3.key import Key

    input_args, ids = job_vars
    work_dir = job.fileStore.getLocalTempDir()
    uuid = input_args['uuid']
    # Parse s3_dir
    s3_dir = input_args['s3_dir']
    bucket_name = s3_dir.split('/')[0]
    bucket_dir = '/'.join(s3_dir.split('/')[1:])
    # I/O
    uuid_tar = return_input_paths(job, work_dir, ids, 'uuid.tar.gz')
    # Upload to S3 via boto
    conn = boto.connect_s3()
    bucket = conn.get_bucket(bucket_name)
    k = Key(bucket)
    k.key = os.path.join(bucket_dir, uuid + '.tar.gz')
    k.set_contents_from_filename(uuid_tar) 
Example #15
Source File: connection.py    From aws-extender with MIT License 6 votes vote down vote up
def make_request(self, method, bucket='', key='', headers=None, data='',
                     query_args=None, sender=None, override_num_retries=None,
                     retry_handler=None):
        if isinstance(bucket, self.bucket_class):
            bucket = bucket.name
        if isinstance(key, Key):
            key = key.name
        path = self.calling_format.build_path_base(bucket, key)
        boto.log.debug('path=%s' % path)
        auth_path = self.calling_format.build_auth_path(bucket, key)
        boto.log.debug('auth_path=%s' % auth_path)
        host = self.calling_format.build_host(self.server_name(), bucket)
        if query_args:
            path += '?' + query_args
            boto.log.debug('path=%s' % path)
            auth_path += '?' + query_args
            boto.log.debug('auth_path=%s' % auth_path)
        return super(S3Connection, self).make_request(
            method, path, headers,
            data, host, auth_path, sender,
            override_num_retries=override_num_retries,
            retry_handler=retry_handler
        ) 
Example #16
Source File: rnaseq_unc_tcga_versions.py    From toil-scripts with Apache License 2.0 6 votes vote down vote up
def upload_output_to_s3(job, job_vars):
    """
    If s3_dir is specified in arguments, file will be uploaded to S3 using boto.
    WARNING: ~/.boto credentials are necessary for this to succeed!

    job_vars: tuple     Tuple of dictionaries: input_args and ids
    """
    import boto
    from boto.s3.key import Key

    input_args, ids = job_vars
    work_dir = job.fileStore.getLocalTempDir()
    uuid = input_args['uuid']
    # Parse s3_dir
    s3_dir = input_args['s3_dir']
    bucket_name = s3_dir.split('/')[0]
    bucket_dir = '/'.join(s3_dir.split('/')[1:])
    # I/O
    uuid_tar = return_input_paths(job, work_dir, ids, 'uuid.tar.gz')
    # Upload to S3 via boto
    conn = boto.connect_s3()
    bucket = conn.get_bucket(bucket_name)
    k = Key(bucket)
    k.key = os.path.join(bucket_dir, uuid + '.tar.gz')
    k.set_contents_from_filename(uuid_tar) 
Example #17
Source File: s3.py    From FloodMapsWorkshop with Apache License 2.0 6 votes vote down vote up
def CopyToS3( s3_bucket, s3_folder, file_list, force, verbose ):
	aws_access_key 			= os.environ.get('AWS_ACCESSKEYID')
	aws_secret_access_key 	= os.environ.get('AWS_SECRETACCESSKEY')
	
	conn 		= S3Connection(aws_access_key, aws_secret_access_key)
	
	mybucket 	= conn.get_bucket(s3_bucket)
	k 			= Key(mybucket)

	for f in file_list:
		fname	= os.path.basename(f)
		k.key 	= os.path.join(s3_folder, fname)
	
		# Check if it already exists
		possible_key = mybucket.get_key(k.key)
	
		if force or not possible_key:
			if verbose:
				print "storing to s3:", mybucket, k.key
	
			k.set_contents_from_filename(f)
			mybucket.set_acl('public-read', k.key ) 
Example #18
Source File: maxswe.py    From FloodMapsWorkshop with Apache License 2.0 6 votes vote down vote up
def CopyToS3( s3_folder, file_list ):
	aws_access_key 			= os.environ.get('AWS_ACCESSKEYID')
	aws_secret_access_key 	= os.environ.get('AWS_SECRETACCESSKEY')
	
	conn 		= S3Connection(aws_access_key, aws_secret_access_key)
	
	mybucket 	= conn.get_bucket(config.BUCKET)
	k 			= Key(mybucket)

	for f in file_list:
		fname	= os.path.basename(f)
		k.key 	= os.path.join(s3_folder, fname)
	
		# Check if it already exists
		possible_key = mybucket.get_key(k.key)
	
		if force or not possible_key:
			if verbose:
				print "storing to s3:", mybucket, k.key
	
			k.set_contents_from_filename(f)
			mybucket.set_acl('public-read', k.key ) 
Example #19
Source File: postgres_aggregation.py    From aggregation with Apache License 2.0 5 votes vote down vote up
def __write_to_s3__(self,bucket,path,fname,csv_contents):
        result_bucket = self.S3_conn.get_bucket(bucket)
        k = Key(result_bucket)
        t = datetime.datetime.now()
        #fname = str(t.year) + "-" + str(t.month) + "-" + str(t.day) + "_" + str(t.hour) + "_" + str(t.minute)
        k.key = path+fname+".csv"

        k.set_contents_from_string(csv_contents) 
Example #20
Source File: s3.py    From seafobj with Apache License 2.0 5 votes vote down vote up
def obj_exists(self, repo_id, obj_id):
        if not self.s3_client.conn or not self.s3_client.bucket:
            self.s3_client.do_connect()

        bucket = self.s3_client.bucket
        s3_path = '%s/%s' % (repo_id, obj_id)
        key = Key(bucket=bucket, name=s3_path)
        exists = key.exists()
        self.dest_key = key

        return exists 
Example #21
Source File: s3.py    From seafobj with Apache License 2.0 5 votes vote down vote up
def write_obj(self, data, repo_id, obj_id):
        if not self.s3_client.conn or not self.s3_client.bucket:
            self.s3_client.do_connect()

        bucket = self.s3_client.bucket
        s3_path = '%s/%s' % (repo_id, obj_id)
        key = Key(bucket=bucket, name=s3_path)
        key.set_contents_from_string(data) 
Example #22
Source File: s3.py    From seafobj with Apache License 2.0 5 votes vote down vote up
def remove_obj(self, repo_id, obj_id):
        if not self.s3_client.conn or not self.s3_client.bucket:
            self.s3_client.do_connect()

        bucket = self.s3_client.bucket
        s3_path = '%s/%s' % (repo_id, obj_id)
        key = Key(bucket=bucket, name=s3_path)
        bucket.delete_key(key) 
Example #23
Source File: test_sources.py    From shavar with Mozilla Public License 2.0 5 votes vote down vote up
def test_refresh(self):
        with mock_s3():
            conn = boto.connect_s3()
            b = conn.create_bucket(self.bucket_name)
            k = Key(b)
            k.name = self.key_name
            k.set_contents_from_string(self.add + b'\n' + self.sub)

            f = S3FileSource("s3+file://{0}/{1}".format(self.bucket_name,
                                                        self.key_name),
                             0.5)
            f.load()
            # Change the content of the file to change the MD5 reported
            k.set_contents_from_string("%s\n%s" % (self.sub, self.add))
            self.assertTrue(f.needs_refresh()) 
Example #24
Source File: s3.py    From seafobj with Apache License 2.0 5 votes vote down vote up
def read_object_content(self, obj_id):
        if not self.conn:
            self.do_connect()

        k = Key(bucket=self.bucket, name=obj_id)

        return k.get_contents_as_string() 
Example #25
Source File: test_sources.py    From shavar with Mozilla Public License 2.0 5 votes vote down vote up
def test_load(self):
        with mock_s3():
            conn = boto.connect_s3()
            b = conn.create_bucket(self.bucket_name)
            k = Key(b)
            k.name = self.key_name
            k.set_contents_from_string(self.add + b'\n' + self.sub)

            f = S3FileSource("s3+file://{0}/{1}".format(self.bucket_name,
                                                        self.key_name),
                             0.5)
            f.load()
            self.assertEqual(f.chunks, ChunkList(add_chunks=simple_adds,
                                                 sub_chunks=simple_subs)) 
Example #26
Source File: S3ObjectstoreSiteMover.py    From pilot with Apache License 2.0 5 votes vote down vote up
def get_key(self, url, create=False):
        import boto
        import boto.s3.connection
        from boto.s3.key import Key

        parsed = urlparse.urlparse(url)
        scheme = parsed.scheme
        self.hostname = parsed.netloc.partition(':')[0]
        self.port = int(parsed.netloc.partition(':')[2])
        path = parsed.path.strip("/")

        pos = path.index("/")
        bucket_name = path[:pos]
        key_name = path[pos+1:]

        bucket_key = "%s_%s_%s" % (self.hostname, self.port, bucket_name)
        if bucket_key in self.buckets:
            bucket = self.buckets[bucket_key]
        else:
            self.__conn = boto.connect_s3(
                aws_access_key_id = self.access_key,
                aws_secret_access_key = self.secret_key,
                host = self.hostname,
                port = self.port,
                is_secure=self.is_secure, # False,               # uncommmnt if you are not using ssl
                calling_format = boto.s3.connection.OrdinaryCallingFormat(),
                )

            try:
                bucket = self.__conn.get_bucket(bucket_name)
            except boto.exception.S3ResponseError, e:
                tolog("Cannot get bucket: %s" % traceback.format_exc())

                bucket = self.__conn.create_bucket(bucket_name) 
Example #27
Source File: text_aggregation.py    From aggregation with Apache License 2.0 5 votes vote down vote up
def __get_signed_url__(self):
        """
        from http://stackoverflow.com/questions/33549254/how-to-generate-url-from-boto3-in-amazon-web-services
        """
        # s3Client = boto3.client('s3')
        _,s3 = self.__s3_connect__()

        aws_tar = self.__get_aws_tar_name__()
        key = "panoptes-uploads.zooniverse.org/production/project_aggregations_export/"+aws_tar

        url = s3.generate_presigned_url('get_object', Params = {'Bucket': 'zooniverse-static', 'Key': key}, ExpiresIn = 604800)

        return url 
Example #28
Source File: text_aggregation.py    From aggregation with Apache License 2.0 5 votes vote down vote up
def __s3_upload__(self):
        """
        upload the file to s3
        see http://boto.cloudhackers.com/en/latest/s3_tut.html
        :return:
        """
        # s3 = boto3.resource('s3')
        s3,_ = self.__s3_connect__()

        aws_tar = self.__get_aws_tar_name__()

        b = s3.get_bucket('zooniverse-static')

        key_str = "panoptes-uploads.zooniverse.org/production/project_aggregations_export/"+aws_tar

        s3_key = Key(b)
        s3_key.key = key_str

        if not os.path.exists("/tmp/"+aws_tar):
            print("warning the tar file does not exist - creating an temporary one.")
            panoptes_file = open("/app/config/aggregation.yml","rb")
            api_details = yaml.load(panoptes_file)

            rollbar_token = api_details[self.environment]["rollbar"]
            rollbar.init(rollbar_token,self.environment)
            rollbar.report_message('the tar file does not exist', 'warning')
            with open("/tmp/"+aws_tar,"w") as f:
                f.write("")

        s3_key.set_contents_from_filename("/tmp/"+aws_tar) 
Example #29
Source File: s3.py    From whisper-backup with Apache License 2.0 5 votes vote down vote up
def delete(self, src):
        """Delete the object in S3 referenced by the key name src."""

        if self.noop:
            logger.info("No-Op Delete: %s" % src)
        else:
            k = Key(self.__b)
            k.key = src
            k.delete() 
Example #30
Source File: s3.py    From whisper-backup with Apache License 2.0 5 votes vote down vote up
def put(self, dst, data):
        """Store the contents of the string data at a key named by dst
           in S3."""

        if self.noop:
            logger.info("No-Op Put: %s" % dst)
        else:
            k = Key(self.__b)
            k.key = dst
            k.set_contents_from_string(data)