Python botocore.client.Config() Examples
The following are 30
code examples of botocore.client.Config().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
botocore.client
, or try the search function
.
Example #1
Source File: aws.py From indra with BSD 2-Clause "Simplified" License | 7 votes |
def get_s3_client(unsigned=True): """Return a boto3 S3 client with optional unsigned config. Parameters ---------- unsigned : Optional[bool] If True, the client will be using unsigned mode in which public resources can be accessed without credentials. Default: True Returns ------- botocore.client.S3 A client object to AWS S3. """ if unsigned: return boto3.client('s3', config=Config(signature_version=UNSIGNED)) else: return boto3.client('s3')
Example #2
Source File: amis.py From startup-kit-templates with Apache License 2.0 | 6 votes |
def main(): config = Config(connect_timeout=60, read_timeout=60) session = boto3.Session(profile_name=None if len(sys.argv) < 2 else sys.argv[1]) amis = {} f = open("ami.yaml","w") for region in get_regions(session.client('ec2', region_name= 'us-east-1', config= config)): amis[region] = {"AMI": bastion_ami(session, config, region)} yaml=YAML() yaml.default_flow_style = False #Print AMI list, in yaml format, to terminal yaml.dump(amis, sys.stdout) #Dump AMI list in yaml format to a file yaml.dump(amis, f)
Example #3
Source File: utils.py From mass with Apache License 2.0 | 6 votes |
def register_domain(domain=None, region=None): client = boto3.client( 'swf', region_name=region or config.REGION, config=Config(connect_timeout=config.CONNECT_TIMEOUT, read_timeout=config.READ_TIMEOUT)) # register domain for Mass try: res = client.register_domain( name=domain or config.DOMAIN, description='The SWF domain for Mass', workflowExecutionRetentionPeriodInDays=str( int(math.ceil(float(config.WORKFLOW_EXECUTION_START_TO_CLOSE_TIMEOUT) / 60 / 60 / 24))) ) except ClientError: # DomainAlreadyExists pass
Example #4
Source File: aws.py From shimit with GNU General Public License v3.0 | 6 votes |
def assume_role(cls, role_arn, principal_arn, saml_response, duration=3600): ''' Assumes the desired role using the saml_response given. The response should be b64 encoded. Duration is in seconds :param role_arn: role amazon resource name :param principal_arn: principal name :param saml_response: SAML object to assume role with :param duration: session duration (default: 3600) :return: AWS session token ''' # Assume role with new SAML conn = boto3.client('sts', config=client.Config(signature_version=botocore.UNSIGNED, user_agent=cls.USER_AGENT, region_name=None)) aws_session_token = conn.assume_role_with_saml( RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=saml_response, DurationSeconds=duration, ) return aws_session_token
Example #5
Source File: utils.py From armory with MIT License | 6 votes |
def download_file_from_s3(bucket_name: str, key: str, local_path: str) -> None: """ Downloads file from S3 anonymously :param bucket_name: S3 Bucket name :param key: S3 File key name :param local_path: Local file path to download as """ verify_ssl = get_verify_ssl() if not os.path.isfile(local_path): client = boto3.client( "s3", config=Config(signature_version=UNSIGNED), verify=verify_ssl ) try: logger.info("Downloading S3 data file...") total = client.head_object(Bucket=bucket_name, Key=key)["ContentLength"] with ProgressPercentage(client, bucket_name, key, total) as Callback: client.download_file(bucket_name, key, local_path, Callback=Callback) except ClientError: raise KeyError(f"File {key} not available in {bucket_name} bucket.") else: logger.info(f"Reusing cached file {local_path}...")
Example #6
Source File: test_pynamodb.py From aws-xray-sdk-python with Apache License 2.0 | 6 votes |
def test_only_dynamodb_calls_are_traced(): """Test only a single subsegment is created for other AWS services. As the pynamodb patch applies the botocore patch as well, we need to ensure that only one subsegment is created for all calls not made by PynamoDB. As PynamoDB calls botocore differently than the botocore patch expects we also just get a single subsegment per PynamoDB call. """ session = botocore.session.get_session() s3 = session.create_client('s3', region_name='us-west-2', config=Config(signature_version=UNSIGNED)) try: s3.get_bucket_location(Bucket='mybucket') except ClientError: pass subsegments = xray_recorder.current_segment().subsegments assert len(subsegments) == 1 assert subsegments[0].name == 's3' assert len(subsegments[0].subsegments) == 0
Example #7
Source File: utils.py From mass with Apache License 2.0 | 6 votes |
def register_activity_type(domain=None, region=None): client = boto3.client( 'swf', region_name=region or config.REGION, config=Config(connect_timeout=config.CONNECT_TIMEOUT, read_timeout=config.READ_TIMEOUT)) # register activity type for Cmd try: res = client.register_activity_type( domain=domain or config.DOMAIN, name=config.ACTIVITY_TYPE_FOR_ACTION['name'], version=config.ACTIVITY_TYPE_FOR_ACTION['version'], description='The SWF activity type for Cmd of Mass.', defaultTaskStartToCloseTimeout=str(config.ACTIVITY_TASK_START_TO_CLOSE_TIMEOUT), defaultTaskHeartbeatTimeout=str(config.ACTIVITY_HEARTBEAT_TIMEOUT), defaultTaskList={'name': config.ACTIVITY_TASK_LIST}, defaultTaskPriority='1', defaultTaskScheduleToStartTimeout=str(config.ACTIVITY_TASK_START_TO_CLOSE_TIMEOUT), defaultTaskScheduleToCloseTimeout=str(config.ACTIVITY_TASK_START_TO_CLOSE_TIMEOUT) ) except ClientError: # TypeAlreadyExists pass
Example #8
Source File: services.py From streamalert with Apache License 2.0 | 6 votes |
def _make_boto3_athena_client(container): region = container.get_parameter('aws_region') logger = container.get('logger') config = botocore_client.Config( connect_timeout=5, read_timeout=5, region_name=region ) session_kwargs = {} try: session = boto3.Session(**session_kwargs) return session.client( 'athena', config=config, ) except ProfileNotFound: logger.error('AWS Athena Connection via Profile Failed')
Example #9
Source File: dataengine-service_create.py From incubator-dlab with Apache License 2.0 | 6 votes |
def get_object_count(bucket, prefix): try: s3_cli = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=args.region) content = s3_cli.get_paginator('list_objects') file_list = [] try: for i in content.paginate(Bucket=bucket, Delimiter='/', Prefix=prefix): for file in i.get('Contents'): file_list.append(file.get('Key')) count = len(file_list) except: print("{} still not exist. Waiting...".format(prefix)) count = 0 return count except Exception as err: logging.error("Unable to get objects from s3: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
Example #10
Source File: services.py From streamalert with Apache License 2.0 | 6 votes |
def _make_boto3_kinesis_client(container): region = container.get_parameter('aws_region') logger = container.get('logger') config = botocore_client.Config( connect_timeout=5, read_timeout=5, region_name=region ) session_kwargs = {} try: session = boto3.Session(**session_kwargs) return session.client('kinesis', config=config) except ProfileNotFound: logger.error('AWS Kinesis Connection via Profile Failed')
Example #11
Source File: test_mass.py From mass with Apache License 2.0 | 6 votes |
def iter_workflow_execution_history(workflow_id, run_id, reverse_order=False, ignore_decision_task=True): client = boto3.client( 'swf', region_name=config.REGION, config=Config(connect_timeout=config.CONNECT_TIMEOUT, read_timeout=config.READ_TIMEOUT)) paginator = client.get_paginator('get_workflow_execution_history') for res in paginator.paginate( domain=config.DOMAIN, execution={ 'workflowId': workflow_id, 'runId': run_id }, reverseOrder=reverse_order ): for event in res['events']: if ignore_decision_task and event['eventType'].startswith('DecisionTask'): continue yield event
Example #12
Source File: upload.py From zulip with Apache License 2.0 | 6 votes |
def upload_export_tarball(self, realm: Optional[Realm], tarball_path: str) -> str: def percent_callback(bytes_transferred: Any) -> None: sys.stdout.write('.') sys.stdout.flush() # We use the avatar bucket, because it's world-readable. key = self.avatar_bucket.Object(os.path.join("exports", generate_random_token(32), os.path.basename(tarball_path))) key.upload_file(tarball_path, Callback=percent_callback) session = botocore.session.get_session() config = Config(signature_version=botocore.UNSIGNED) public_url = session.create_client('s3', config=config).generate_presigned_url( 'get_object', Params={ 'Bucket': self.avatar_bucket.name, 'Key': key.key, }, ExpiresIn=0, ) return public_url
Example #13
Source File: actions_lib.py From incubator-dlab with Apache License 2.0 | 6 votes |
def s3_cleanup(bucket, cluster_name, user_name): s3_res = boto3.resource('s3', config=Config(signature_version='s3v4')) client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=os.environ['aws_region']) try: client.head_bucket(Bucket=bucket) except: print("There is no bucket {} or you do not permission to access it".format(bucket)) sys.exit(0) try: resource = s3_res.Bucket(bucket) prefix = user_name + '/' + cluster_name + "/" for i in resource.objects.filter(Prefix=prefix): s3_res.Object(resource.name, i.key).delete() except Exception as err: logging.info("Unable to clean S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)) append_result(str({"error": "Unable to clean S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)})) traceback.print_exc(file=sys.stdout)
Example #14
Source File: s3boto3.py From django-storages with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, **settings): super().__init__(**settings) check_location(self) # Backward-compatibility: given the anteriority of the SECURE_URL setting # we fall back to https if specified in order to avoid the construction # of unsecure urls. if self.secure_urls: self.url_protocol = 'https:' self._bucket = None self._connections = threading.local() self.access_key, self.secret_key = self._get_access_keys() self.security_token = self._get_security_token() if not self.config: self.config = Config( s3={'addressing_style': self.addressing_style}, signature_version=self.signature_version, proxies=self.proxies, )
Example #15
Source File: s3.py From strax with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _save_chunk(self, data, chunk_info, executor=None): # Keyname key_name = f"{self.strax_unique_key}/{chunk_info['chunk_i']:06d}" # Save chunk via temporary file with tempfile.SpooledTemporaryFile() as f: filesize = strax.save_file(f, data=data, compressor=self.md['compressor']) f.seek(0) self.s3.upload_fileobj(f, BUCKET_NAME, key_name, Config=self.config) return dict(key_name=key_name, filesize=filesize), None
Example #16
Source File: controllers.py From geemusic with GNU General Public License v3.0 | 6 votes |
def proxy_response(req): s3 = boto3.resource('s3') s3_client = boto3.client('s3', config=Config(signature_version='s3v4')) bucket = s3.Bucket(BUCKET_NAME) file_name = str(uuid4()) obj = bucket.put_object( Key=file_name, Body=req.content, ACL="authenticated-read", ContentType=req.headers["content-type"] ) url = s3_client.generate_presigned_url( "get_object", Params={ "Bucket": BUCKET_NAME, "Key": file_name}, ExpiresIn=120 ) return redirect(url, 303)
Example #17
Source File: activities.py From heaviside with Apache License 2.0 | 6 votes |
def __init__(self, handle_task = lambda t, i: None, **kwargs): """Will not be called if used as a mixin. Provides just the expected variables. Args: handle_task (callable) : Callable to process task input and send success or failure kwargs : Arguments for heaviside.utils.create_session """ session, _ = create_session(**kwargs) # DP NOTE: read_timeout is needed so that the long poll for tasking doesn't # timeout client side before AWS returns that there is no work self.client = session.client('stepfunctions', config=Config(read_timeout=70)) self.log = logging.getLogger(__name__) self.name = None self.arn = None self.handle_task = handle_task self.max_concurrent = 0 self.poll_delay = 1 self.polling = False
Example #18
Source File: activities.py From heaviside with Apache License 2.0 | 6 votes |
def __init__(self, name, target=None, **kwargs): """ Args: name (string): Name of the activity to monitor The activity's ARN is looked up in AWS using the provided AWS credentials target (string|callable): Function to pass to TaskProcess as the target, If string, the class / function will be imported kwargs (dict): Same arguments as utils.create_session() """ super(ActivityProcess, self).__init__(name=name) self.name = name self.credentials = kwargs self.session, self.account_id = create_session(**kwargs) self.client = self.session.client('stepfunctions', config=Config(read_timeout=70)) self.log = logging.getLogger(__name__) self.max_concurrent = 0 self.poll_delay = 1 if isinstance(target, str): target = TaskProcess.resolve_function(target) self.target = target
Example #19
Source File: test.py From startup-kit-templates with Apache License 2.0 | 5 votes |
def main(): """ Create the various stacks in all supported regions """ print 'Testing stacks' github = {} config = Config(connect_timeout=60, read_timeout=60) session = boto3.Session(profile_name=None if len(sys.argv) < 2 else sys.argv[1]) github['user'] = sys.argv[2] github['repo'] = sys.argv[3] github['token'] = sys.argv[4] print 'AWS session created' key_pairs = ensure_foundation(session, config) tests = [ 'vpc', 'vpc-bastion', 'vpc-bastion-eb-database', 'vpc-bastion-eb-database-alarm', 'vpc-bastion-eb-database-enhanced-alarm', 'vpc-bastion-fargate', 'vpc-bastion-fargate-LB-alarm' 'vpc-bastion-fargate-database', 'vpc-bastion-fargate-database-alarm', 'vpc-bastion-fargate-database-enhanced-alarm', 'vpc-bastion-fargate-database-LBalarm', 'vpc-bastion-fargate-database-alarm-LBalarm', 'vpc-bastion-fargate-database-enhanced-alarm-LBalarm', ] for test in tests: test_stack(session, config, test, github, key_pairs) remove_keypairs(session, config, key_pairs) #we also need to add code to remove buckets created as part of test harness
Example #20
Source File: s3.py From gigantum-client with MIT License | 5 votes |
def _get_client(self): return boto3.client('s3', config=Config(signature_version=UNSIGNED))
Example #21
Source File: deployer.py From ecs-blue-green-deployment with Apache License 2.0 | 5 votes |
def get_build_artifact_id(build_id): """Get artifact (build.json) from the build project . We are making this as an additional call to get the build.json which already contains the new built repository ECR path. We could have consolidated this script and executed in the build phase, but as codebuild accepts the input from one source only (scripts and application code are in different sources), thats why an additional call to retrieve build.json from a different build project. Args: build_id - Build ID for codebuild (build phase) Returns: build.json Raises: Exception: Any exception thrown by handler """ codebuild_client = boto3.client('codebuild') response = codebuild_client.batch_get_builds( ids=[ str(build_id), ] ) for build in response['builds']: s3_location = build['artifacts']['location'] bucketkey = s3_location.split(":")[5] bucket = bucketkey.split("/")[0] key = bucketkey[bucketkey.find("/") + 1:] s3_client = boto3.client('s3', config=Config(signature_version='s3v4')) s3_client.download_file(bucket, key, 'downloaded_object') zip_ref = zipfile.ZipFile('downloaded_object', 'r') zip_ref.extractall('downloaded_folder') zip_ref.close() with open('downloaded_folder/build.json') as data_file: objbuild = json.load(data_file) print(objbuild['tag']) return objbuild['tag']
Example #22
Source File: package.py From bash-lambda-layer with MIT License | 5 votes |
def _run_main(self, parsed_args, parsed_globals): s3_client = self._session.create_client( "s3", config=Config(signature_version='s3v4'), region_name=parsed_globals.region, verify=parsed_globals.verify_ssl) template_path = parsed_args.template_file if not os.path.isfile(template_path): raise exceptions.InvalidTemplatePathError( template_path=template_path) bucket = parsed_args.s3_bucket self.s3_uploader = S3Uploader(s3_client, bucket, parsed_args.s3_prefix, parsed_args.kms_key_id, parsed_args.force_upload) # attach the given metadata to the artifacts to be uploaded self.s3_uploader.artifact_metadata = parsed_args.metadata output_file = parsed_args.output_template_file use_json = parsed_args.use_json exported_str = self._export(template_path, use_json) sys.stdout.write("\n") self.write_output(output_file, exported_str) if output_file: msg = self.MSG_PACKAGED_TEMPLATE_WRITTEN.format( output_file_name=output_file, output_file_path=os.path.abspath(output_file)) sys.stdout.write(msg) sys.stdout.flush() return 0
Example #23
Source File: connector.py From aws-xray-sdk-python with Apache License 2.0 | 5 votes |
def _create_xray_client(self, ip='127.0.0.1', port='2000'): session = botocore.session.get_session() url = 'http://%s:%s' % (ip, port) return session.create_client('xray', endpoint_url=url, region_name='us-west-2', config=Config(signature_version=UNSIGNED), aws_access_key_id='', aws_secret_access_key='' )
Example #24
Source File: kinesis_stream_logs_producer.py From quay with Apache License 2.0 | 5 votes |
def __init__( self, stream_name, aws_region, aws_access_key=None, aws_secret_key=None, connect_timeout=None, read_timeout=None, max_retries=None, max_pool_connections=None, ): self._stream_name = stream_name self._aws_region = aws_region self._aws_access_key = aws_access_key self._aws_secret_key = aws_secret_key self._connect_timeout = connect_timeout or DEFAULT_CONNECT_TIMEOUT self._read_timeout = read_timeout or DEFAULT_READ_TIMEOUT self._max_retries = max_retries or MAX_RETRY_ATTEMPTS self._max_pool_connections = max_pool_connections or DEFAULT_MAX_POOL_CONNECTIONS client_config = Config( connect_timeout=self._connect_timeout, read_timeout=self._read_timeout, retries={"max_attempts": self._max_retries}, max_pool_connections=self._max_pool_connections, ) self._producer = boto3.client( "kinesis", use_ssl=True, region_name=self._aws_region, aws_access_key_id=self._aws_access_key, aws_secret_access_key=self._aws_secret_key, config=client_config, )
Example #25
Source File: s3.py From cloud-custodian with Apache License 2.0 | 5 votes |
def bucket_client(session, b, kms=False): region = get_region(b) if kms: # Need v4 signature for aws:kms crypto, else let the sdk decide # based on region support. config = Config( signature_version='s3v4', read_timeout=200, connect_timeout=120) else: config = Config(read_timeout=200, connect_timeout=120) return session.client('s3', region_name=region, config=config)
Example #26
Source File: s3.py From cloud-custodian with Apache License 2.0 | 5 votes |
def handle_BucketVersioningConfiguration(self, resource, item_value): # Config defaults versioning to 'Off' for a null value if item_value['status'] not in ('Enabled', 'Suspended'): return resource['Versioning'] = {'Status': item_value['status']} if item_value['isMfaDeleteEnabled']: resource['Versioning']['MFADelete'] = item_value[ 'isMfaDeleteEnabled'].title()
Example #27
Source File: bless_lambda.py From python-blessclient with Apache License 2.0 | 5 votes |
def getCert(self, payload): payload['kmsauth_token'] = self.kmsauth_token payload_json = json.dumps(payload) lambdabotoconfig = Config( connect_timeout=self.config['timeoutconfig']['connect'], read_timeout=self.config['timeoutconfig']['read'] ) try: mfa_lambda_client = boto3.client( 'lambda', region_name=self.region, aws_access_key_id=self.creds['AccessKeyId'], aws_secret_access_key=self.creds['SecretAccessKey'], aws_session_token=self.creds['SessionToken'], config=lambdabotoconfig ) response = mfa_lambda_client.invoke( FunctionName=self.config['functionname'], InvocationType='RequestResponse', LogType='Tail', Payload=payload_json, Qualifier=self.config['functionversion'] ) if response['StatusCode'] != 200: raise LambdaInvocationException('Error creating cert.') except ConnectTimeout: raise LambdaInvocationException('Timeout connecting to Lambda') except ReadTimeout: raise LambdaInvocationException('Timeout reading cert from Lambda') except SSLError: raise LambdaInvocationException('SSL error connecting to Lambda') except ValueError: # On a 404, boto tries to decode any body as json raise LambdaInvocationException('Invalid message format in Lambda response') payload = json.loads(response['Payload'].read()) if 'certificate' not in payload: raise LambdaInvocationException('No certificate in response.') return payload['certificate']
Example #28
Source File: s3.py From zentral with Apache License 2.0 | 5 votes |
def client(self): if self._client is None: config = None if self.config: config = Config(**self.config) self._client = boto3.client('s3', aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, endpoint_url=self.endpoint_url, config=config) return self._client
Example #29
Source File: subcommands.py From bash-lambda-layer with MIT License | 5 votes |
def set_clients(self): client_config = None if self.parameters.get('sse') == 'aws:kms': client_config = Config(signature_version='s3v4') self._client = get_client( self.session, region=self.parameters['region'], endpoint_url=self.parameters['endpoint_url'], verify=self.parameters['verify_ssl'], config=client_config ) self._source_client = get_client( self.session, region=self.parameters['region'], endpoint_url=self.parameters['endpoint_url'], verify=self.parameters['verify_ssl'], config=client_config ) if self.parameters['source_region']: if self.parameters['paths_type'] == 's3s3': self._source_client = get_client( self.session, region=self.parameters['source_region'], endpoint_url=None, verify=self.parameters['verify_ssl'], config=client_config )
Example #30
Source File: conftest.py From inference-model-manager with Apache License 2.0 | 5 votes |
def minio_client(): return boto3.client('s3', endpoint_url=MINIO_ENDPOINT_ADDR, aws_access_key_id=MINIO_ACCESS_KEY_ID, aws_secret_access_key=MINIO_SECRET_ACCESS_KEY, config=Config( signature_version=SIGNATURE_VERSION), region_name=MINIO_REGION)