Python botocore.UNSIGNED Examples
The following are 30
code examples of botocore.UNSIGNED().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
botocore
, or try the search function
.
Example #1
Source File: aws.py From indra with BSD 2-Clause "Simplified" License | 7 votes |
def get_s3_client(unsigned=True): """Return a boto3 S3 client with optional unsigned config. Parameters ---------- unsigned : Optional[bool] If True, the client will be using unsigned mode in which public resources can be accessed without credentials. Default: True Returns ------- botocore.client.S3 A client object to AWS S3. """ if unsigned: return boto3.client('s3', config=Config(signature_version=UNSIGNED)) else: return boto3.client('s3')
Example #2
Source File: utils.py From armory with MIT License | 6 votes |
def download_file_from_s3(bucket_name: str, key: str, local_path: str) -> None: """ Downloads file from S3 anonymously :param bucket_name: S3 Bucket name :param key: S3 File key name :param local_path: Local file path to download as """ verify_ssl = get_verify_ssl() if not os.path.isfile(local_path): client = boto3.client( "s3", config=Config(signature_version=UNSIGNED), verify=verify_ssl ) try: logger.info("Downloading S3 data file...") total = client.head_object(Bucket=bucket_name, Key=key)["ContentLength"] with ProgressPercentage(client, bucket_name, key, total) as Callback: client.download_file(bucket_name, key, local_path, Callback=Callback) except ClientError: raise KeyError(f"File {key} not available in {bucket_name} bucket.") else: logger.info(f"Reusing cached file {local_path}...")
Example #3
Source File: ls_public_bucket.py From cube-in-a-box with MIT License | 6 votes |
def iterate_datasets(bucket_name, config, prefix, suffix, start_date, end_date, func, unsafe, sources_policy): manager = Manager() queue = manager.Queue() s3 = boto3.resource('s3', config=Config(signature_version=UNSIGNED)) bucket = s3.Bucket(bucket_name) logging.info("Bucket : %s prefix: %s ", bucket_name, str(prefix)) # safety = 'safe' if not unsafe else 'unsafe' worker_count = cpu_count() * 2 processess = [] for i in range(worker_count): proc = Process(target=worker, args=(config, bucket_name, prefix, suffix, start_date, end_date, func, unsafe, sources_policy, queue,)) processess.append(proc) proc.start() for obj in bucket.objects.filter(Prefix=str(prefix)): if (obj.key.endswith(suffix)): queue.put(obj.key) for i in range(worker_count): queue.put(GUARDIAN) for proc in processess: proc.join()
Example #4
Source File: upload.py From zulip with Apache License 2.0 | 6 votes |
def upload_export_tarball(self, realm: Optional[Realm], tarball_path: str) -> str: def percent_callback(bytes_transferred: Any) -> None: sys.stdout.write('.') sys.stdout.flush() # We use the avatar bucket, because it's world-readable. key = self.avatar_bucket.Object(os.path.join("exports", generate_random_token(32), os.path.basename(tarball_path))) key.upload_file(tarball_path, Callback=percent_callback) session = botocore.session.get_session() config = Config(signature_version=botocore.UNSIGNED) public_url = session.create_client('s3', config=config).generate_presigned_url( 'get_object', Params={ 'Bucket': self.avatar_bucket.name, 'Key': key.key, }, ExpiresIn=0, ) return public_url
Example #5
Source File: autoIndex.py From cube-in-a-box with MIT License | 6 votes |
def worker(parse_only, queue): s3 = boto3.resource("s3", config=Config(signature_version=UNSIGNED)) dc = datacube.Datacube() idx = dc.index while True: try: url = queue.get(timeout=60) if url == STOP_SIGN: break logging.info("Processing {} {}".format(url, current_process())) index_dataset(idx, s3, url, parse_only) queue.task_done() except Empty: break except EOFError: break
Example #6
Source File: test_integration.py From pipeline with BSD 3-Clause "New" or "Revised" License | 6 votes |
def fetch_autoclaved_bucket(dst_dir, bucket_date): print("Fetch bucket") dst_bucket_dir = os.path.join(dst_dir, bucket_date) if not os.path.exists(dst_bucket_dir): os.makedirs(dst_bucket_dir) client = boto3.client("s3", config=Config(signature_version=UNSIGNED)) resource = boto3.resource("s3", config=Config(signature_version=UNSIGNED)) prefix = "autoclaved/jsonl.tar.lz4/{}/".format(bucket_date) paginator = client.get_paginator("list_objects") for result in paginator.paginate(Bucket="ooni-data", Delimiter="/", Prefix=prefix): for f in result.get("Contents", []): fkey = f.get("Key") dst_pathname = os.path.join(dst_bucket_dir, os.path.basename(fkey)) try: s = os.stat(dst_pathname) if s.st_size == f.get("Size"): continue except Exception: # XXX maybe make this more strict. It's FileNotFoundError on py3 and OSError on py2 pass print("[+] Downloading {}".format(dst_pathname)) resource.meta.client.download_file("ooni-data", fkey, dst_pathname)
Example #7
Source File: utils.py From aws-extender with MIT License | 6 votes |
def fix_s3_host(request, signature_version, region_name, default_endpoint_url='s3.amazonaws.com', **kwargs): """ This handler looks at S3 requests just before they are signed. If there is a bucket name on the path (true for everything except ListAllBuckets) it checks to see if that bucket name conforms to the DNS naming conventions. If it does, it alters the request to use ``virtual hosting`` style addressing rather than ``path-style`` addressing. This allows us to avoid 301 redirects for all bucket names that can be CNAME'd. """ # By default we do not use virtual hosted style addressing when # signed with signature version 4. if signature_version is not botocore.UNSIGNED and \ 's3v4' in signature_version: return elif not _allowed_region(region_name): return try: switch_to_virtual_host_style( request, signature_version, default_endpoint_url) except InvalidDNSNameError as e: bucket_name = e.kwargs['bucket_name'] logger.debug('Not changing URI, bucket is not DNS compatible: %s', bucket_name)
Example #8
Source File: aws.py From shimit with GNU General Public License v3.0 | 6 votes |
def assume_role(cls, role_arn, principal_arn, saml_response, duration=3600): ''' Assumes the desired role using the saml_response given. The response should be b64 encoded. Duration is in seconds :param role_arn: role amazon resource name :param principal_arn: principal name :param saml_response: SAML object to assume role with :param duration: session duration (default: 3600) :return: AWS session token ''' # Assume role with new SAML conn = boto3.client('sts', config=client.Config(signature_version=botocore.UNSIGNED, user_agent=cls.USER_AGENT, region_name=None)) aws_session_token = conn.assume_role_with_saml( RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=saml_response, DurationSeconds=duration, ) return aws_session_token
Example #9
Source File: utils.py From faces with GNU General Public License v2.0 | 6 votes |
def fix_s3_host(request, signature_version, region_name, default_endpoint_url='s3.amazonaws.com', **kwargs): """ This handler looks at S3 requests just before they are signed. If there is a bucket name on the path (true for everything except ListAllBuckets) it checks to see if that bucket name conforms to the DNS naming conventions. If it does, it alters the request to use ``virtual hosting`` style addressing rather than ``path-style`` addressing. This allows us to avoid 301 redirects for all bucket names that can be CNAME'd. """ # By default we do not use virtual hosted style addressing when # signed with signature version 4. if signature_version is not botocore.UNSIGNED and \ 's3v4' in signature_version: return elif not _allowed_region(region_name): return try: switch_to_virtual_host_style( request, signature_version, default_endpoint_url) except InvalidDNSNameError as e: bucket_name = e.kwargs['bucket_name'] logger.debug('Not changing URI, bucket is not DNS compatible: %s', bucket_name)
Example #10
Source File: test_pynamodb.py From aws-xray-sdk-python with Apache License 2.0 | 6 votes |
def test_only_dynamodb_calls_are_traced(): """Test only a single subsegment is created for other AWS services. As the pynamodb patch applies the botocore patch as well, we need to ensure that only one subsegment is created for all calls not made by PynamoDB. As PynamoDB calls botocore differently than the botocore patch expects we also just get a single subsegment per PynamoDB call. """ session = botocore.session.get_session() s3 = session.create_client('s3', region_name='us-west-2', config=Config(signature_version=UNSIGNED)) try: s3.get_bucket_location(Bucket='mybucket') except ClientError: pass subsegments = xray_recorder.current_segment().subsegments assert len(subsegments) == 1 assert subsegments[0].name == 's3' assert len(subsegments[0].subsegments) == 0
Example #11
Source File: utils.py From faces with GNU General Public License v2.0 | 6 votes |
def fix_s3_host(request, signature_version, region_name, default_endpoint_url='s3.amazonaws.com', **kwargs): """ This handler looks at S3 requests just before they are signed. If there is a bucket name on the path (true for everything except ListAllBuckets) it checks to see if that bucket name conforms to the DNS naming conventions. If it does, it alters the request to use ``virtual hosting`` style addressing rather than ``path-style`` addressing. This allows us to avoid 301 redirects for all bucket names that can be CNAME'd. """ # By default we do not use virtual hosted style addressing when # signed with signature version 4. if signature_version is not botocore.UNSIGNED and \ 's3v4' in signature_version: return elif not _allowed_region(region_name): return try: switch_to_virtual_host_style( request, signature_version, default_endpoint_url) except InvalidDNSNameError as e: bucket_name = e.kwargs['bucket_name'] logger.debug('Not changing URI, bucket is not DNS compatible: %s', bucket_name)
Example #12
Source File: s3.py From gigantum-client with MIT License | 5 votes |
def _get_client(self): return boto3.client('s3', config=Config(signature_version=UNSIGNED))
Example #13
Source File: signers.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 5 votes |
def _choose_signer(self, operation_name, signing_type, context): """ Allow setting the signature version via the choose-signer event. A value of `botocore.UNSIGNED` means no signing will be performed. :param operation_name: The operation to sign. :param signing_type: The type of signing that the signer is to be used for. :return: The signature version to sign with. """ signing_type_suffix_map = { 'presign-post': '-presign-post', 'presign-url': '-query' } suffix = signing_type_suffix_map.get(signing_type, '') signature_version = self._signature_version if signature_version is not botocore.UNSIGNED and not \ signature_version.endswith(suffix): signature_version += suffix handler, response = self._event_emitter.emit_until_response( 'choose-signer.{0}.{1}'.format( self._service_id.hyphenize(), operation_name), signing_name=self._signing_name, region_name=self._region_name, signature_version=signature_version, context=context) if response is not None: signature_version = response # The suffix needs to be checked again in case we get an improper # signature version from choose-signer. if signature_version is not botocore.UNSIGNED and not \ signature_version.endswith(suffix): signature_version += suffix return signature_version
Example #14
Source File: allennlp_file_utils.py From tatk with Apache License 2.0 | 5 votes |
def get_s3_resource(): session = boto3.session.Session() if session.get_credentials() is None: # Use unsigned requests. s3_resource = session.resource("s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)) else: s3_resource = session.resource("s3") return s3_resource
Example #15
Source File: connector.py From aws-xray-sdk-python with Apache License 2.0 | 5 votes |
def _create_xray_client(self, ip='127.0.0.1', port='2000'): session = botocore.session.get_session() url = 'http://%s:%s' % (ip, port) return session.create_client('xray', endpoint_url=url, region_name='us-west-2', config=Config(signature_version=UNSIGNED), aws_access_key_id='', aws_secret_access_key='' )
Example #16
Source File: handlers.py From aws-extender with MIT License | 5 votes |
def set_operation_specific_signer(context, signing_name, **kwargs): """ Choose the operation-specific signer. Individual operations may have a different auth type than the service as a whole. This will most often manifest as operations that should not be authenticated at all, but can include other auth modes such as sigv4 without body signing. """ auth_type = context.get('auth_type') # Auth type will be None if the operation doesn't have a configured auth # type. if not auth_type: return # Auth type will be the string value 'none' if the operation should not # be signed at all. if auth_type == 'none': return botocore.UNSIGNED if auth_type.startswith('v4'): signature_version = 'v4' if signing_name == 's3': signature_version = 's3v4' # If the operation needs an unsigned body, we set additional context # allowing the signer to be aware of this. if auth_type == 'v4-unsigned-body': context['payload_signing_enabled'] = False return signature_version
Example #17
Source File: signers.py From aws-extender with MIT License | 5 votes |
def _choose_signer(self, operation_name, signing_type, context): """ Allow setting the signature version via the choose-signer event. A value of `botocore.UNSIGNED` means no signing will be performed. :param operation_name: The operation to sign. :param signing_type: The type of signing that the signer is to be used for. :return: The signature version to sign with. """ signing_type_suffix_map = { 'presign-post': '-presign-post', 'presign-url': '-query' } suffix = signing_type_suffix_map.get(signing_type, '') signature_version = self._signature_version if signature_version is not botocore.UNSIGNED and not \ signature_version.endswith(suffix): signature_version += suffix handler, response = self._event_emitter.emit_until_response( 'choose-signer.{0}.{1}'.format(self._service_name, operation_name), signing_name=self._signing_name, region_name=self._region_name, signature_version=signature_version, context=context) if response is not None: signature_version = response # The suffix needs to be checked again in case we get an improper # signature version from choose-signer. if signature_version is not botocore.UNSIGNED and not \ signature_version.endswith(suffix): signature_version += suffix return signature_version
Example #18
Source File: auth.py From hass-nabucasa with GNU General Public License v3.0 | 5 votes |
def _cognito(self, **kwargs): """Get the client credentials.""" return pycognito.Cognito( user_pool_id=self.cloud.user_pool_id, client_id=self.cloud.cognito_client_id, user_pool_region=self.cloud.region, botocore_config=botocore.config.Config(signature_version=botocore.UNSIGNED), session=self._session, **kwargs, )
Example #19
Source File: handlers.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 5 votes |
def set_operation_specific_signer(context, signing_name, **kwargs): """ Choose the operation-specific signer. Individual operations may have a different auth type than the service as a whole. This will most often manifest as operations that should not be authenticated at all, but can include other auth modes such as sigv4 without body signing. """ auth_type = context.get('auth_type') # Auth type will be None if the operation doesn't have a configured auth # type. if not auth_type: return # Auth type will be the string value 'none' if the operation should not # be signed at all. if auth_type == 'none': return botocore.UNSIGNED if auth_type.startswith('v4'): signature_version = 'v4' if signing_name == 's3': signature_version = 's3v4' # If the operation needs an unsigned body, we set additional context # allowing the signer to be aware of this. if auth_type == 'v4-unsigned-body': context['payload_signing_enabled'] = False return signature_version
Example #20
Source File: ls_public_bucket.py From cube-in-a-box with MIT License | 5 votes |
def worker(config, bucket_name, prefix, suffix, start_date, end_date, func, unsafe, sources_policy, queue): dc = datacube.Datacube(config=config) index = dc.index s3 = boto3.resource("s3", config=Config(signature_version=UNSIGNED)) safety = 'safe' if not unsafe else 'unsafe' while True: try: key = queue.get(timeout=60) if key == GUARDIAN: break logging.info("Processing %s %s", key, current_process()) obj = s3.Object(bucket_name, key).get() raw = obj['Body'].read() if suffix == AWS_PDS_TXT_SUFFIX: # Attempt to process text document raw_string = raw.decode('utf8') txt_doc = _parse_group(iter(raw_string.split("\n")))['L1_METADATA_FILE'] data = make_metadata_doc(txt_doc, bucket_name, key) else: yaml = YAML(typ=safety, pure=False) yaml.default_flow_style = False data = yaml.load(raw) uri = get_s3_url(bucket_name, key) cdt = data['creation_dt'] # Use the fact lexicographical ordering matches the chronological ordering if cdt >= start_date and cdt < end_date: logging.info("calling %s", func) func(data, uri, index, sources_policy) queue.task_done() except Empty: break except EOFError: break
Example #21
Source File: autoIndex.py From cube-in-a-box with MIT License | 5 votes |
def index_datasets(items, parse_only=False): s3 = boto3.resource("s3", config=Config(signature_version=UNSIGNED)) dc = datacube.Datacube() idx = dc.index for item in items: if "MTL" in item.assets: index_dataset(idx, s3, item.assets["MTL"]["href"], parse_only) else: logger.info("Item {} does not have an MTL asset (Sentinel2?) - skipping".format(item))
Example #22
Source File: credentials.py From runway with Apache License 2.0 | 5 votes |
def _get_credentials(self): """Get credentials by calling SSO get role credentials.""" config = Config( signature_version=UNSIGNED, region_name=self._sso_region, ) client = self._client_creator('sso', config=config) kwargs = { 'roleName': self._role_name, 'accountId': self._account_id, 'accessToken': self._token_loader(self._start_url), } try: response = client.get_role_credentials(**kwargs) except client.exceptions.UnauthorizedException: raise UnauthorizedSSOTokenError() credentials = response['roleCredentials'] credentials = { 'ProviderType': 'sso', 'Credentials': { 'AccessKeyId': credentials['accessKeyId'], 'SecretAccessKey': credentials['secretAccessKey'], 'SessionToken': credentials['sessionToken'], 'Expiration': self._parse_timestamp(credentials['expiration']), } } return credentials
Example #23
Source File: resource_client.py From cloudformation-cli with Apache License 2.0 | 5 votes |
def __init__( self, function_name, endpoint, region, schema, overrides, inputs=None, role_arn=None, ): # pylint: disable=too-many-arguments self._schema = schema self._session = create_sdk_session(region) self._creds = get_temporary_credentials( self._session, LOWER_CAMEL_CRED_KEYS, role_arn ) self._function_name = function_name if endpoint.startswith("http://"): self._client = self._session.client( "lambda", endpoint_url=endpoint, use_ssl=False, verify=False, config=Config( signature_version=UNSIGNED, # needs to be long if docker is running on a slow machine read_timeout=5 * 60, retries={"max_attempts": 0}, region_name=self._session.region_name, ), ) else: self._client = self._session.client("lambda", endpoint_url=endpoint) self._schema = None self._strategy = None self._update_strategy = None self._invalid_strategy = None self._overrides = overrides self._update_schema(schema) self._inputs = inputs
Example #24
Source File: resolver.py From cakechat with Apache License 2.0 | 5 votes |
def get_s3_resource(): return boto3.resource('s3', config=Config(signature_version=UNSIGNED))
Example #25
Source File: data.py From ATM with MIT License | 5 votes |
def download_demo(datasets, path=None): if not isinstance(datasets, list): datasets = [datasets] if path is None: path = os.path.join(os.getcwd(), 'demos') if not os.path.exists(path): os.makedirs(path) client = boto3.client('s3', config=Config(signature_version=UNSIGNED)) paths = list() for dataset in datasets: save_path = os.path.join(path, dataset) try: LOGGER.info('Downloading {}'.format(dataset)) client.download_file('atm-data', dataset, save_path) paths.append(save_path) except ClientError as e: LOGGER.error('An error occurred trying to download from AWS3.' 'The following error has been returned: {}'.format(e)) return paths[0] if len(paths) == 1 else paths
Example #26
Source File: data.py From ATM with MIT License | 5 votes |
def get_demos(args=None): client = boto3.client('s3', config=Config(signature_version=UNSIGNED)) available_datasets = [obj['Key'] for obj in client.list_objects(Bucket='atm-data')['Contents']] return available_datasets
Example #27
Source File: sparkcc.py From cc-pyspark with MIT License | 5 votes |
def fetch_process_warc_records(self, rows): no_sign_request = botocore.client.Config( signature_version=botocore.UNSIGNED) s3client = boto3.client('s3', config=no_sign_request) bucketname = "commoncrawl" no_parse = (not self.warc_parse_http_header) for row in rows: url = row[0] warc_path = row[1] offset = int(row[2]) length = int(row[3]) self.get_logger().debug("Fetching WARC record for {}".format(url)) rangereq = 'bytes={}-{}'.format(offset, (offset+length-1)) try: response = s3client.get_object(Bucket=bucketname, Key=warc_path, Range=rangereq) except botocore.client.ClientError as exception: self.get_logger().error( 'Failed to download: {} ({}, offset: {}, length: {}) - {}' .format(url, warc_path, offset, length, exception)) self.warc_input_failed.add(1) continue record_stream = BytesIO(response["Body"].read()) try: for record in ArchiveIterator(record_stream, no_record_parse=no_parse): for res in self.process_record(record): yield res self.records_processed.add(1) except ArchiveLoadFailed as exception: self.warc_input_failed.add(1) self.get_logger().error( 'Invalid WARC record: {} ({}, offset: {}, length: {}) - {}' .format(url, warc_path, offset, length, exception))
Example #28
Source File: credentials.py From aws-builders-fair-projects with Apache License 2.0 | 5 votes |
def _get_credentials(self): """Get credentials by calling assume role.""" kwargs = self._assume_role_kwargs() # Assume role with web identity does not require credentials other than # the token, explicitly configure the client to not sign requests. config = Config(signature_version=UNSIGNED) client = self._client_creator('sts', config=config) return client.assume_role_with_web_identity(**kwargs)
Example #29
Source File: handlers.py From aws-builders-fair-projects with Apache License 2.0 | 5 votes |
def set_operation_specific_signer(context, signing_name, **kwargs): """ Choose the operation-specific signer. Individual operations may have a different auth type than the service as a whole. This will most often manifest as operations that should not be authenticated at all, but can include other auth modes such as sigv4 without body signing. """ auth_type = context.get('auth_type') # Auth type will be None if the operation doesn't have a configured auth # type. if not auth_type: return # Auth type will be the string value 'none' if the operation should not # be signed at all. if auth_type == 'none': return botocore.UNSIGNED if auth_type.startswith('v4'): signature_version = 'v4' if signing_name == 's3': signature_version = 's3v4' # If the operation needs an unsigned body, we set additional context # allowing the signer to be aware of this. if auth_type == 'v4-unsigned-body': context['payload_signing_enabled'] = False return signature_version
Example #30
Source File: signers.py From aws-builders-fair-projects with Apache License 2.0 | 5 votes |
def _choose_signer(self, operation_name, signing_type, context): """ Allow setting the signature version via the choose-signer event. A value of `botocore.UNSIGNED` means no signing will be performed. :param operation_name: The operation to sign. :param signing_type: The type of signing that the signer is to be used for. :return: The signature version to sign with. """ signing_type_suffix_map = { 'presign-post': '-presign-post', 'presign-url': '-query' } suffix = signing_type_suffix_map.get(signing_type, '') signature_version = self._signature_version if signature_version is not botocore.UNSIGNED and not \ signature_version.endswith(suffix): signature_version += suffix handler, response = self._event_emitter.emit_until_response( 'choose-signer.{0}.{1}'.format( self._service_id.hyphenize(), operation_name), signing_name=self._signing_name, region_name=self._region_name, signature_version=signature_version, context=context) if response is not None: signature_version = response # The suffix needs to be checked again in case we get an improper # signature version from choose-signer. if signature_version is not botocore.UNSIGNED and not \ signature_version.endswith(suffix): signature_version += suffix return signature_version