Python google.appengine.api.urlfetch.POST Examples

The following are 25 code examples of google.appengine.api.urlfetch.POST(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module google.appengine.api.urlfetch , or try the search function .
Example #1
Source File: googl.py    From hackernewsbot with MIT License 6 votes vote down vote up
def call_method(method, data):
  data.update({'key': TOKEN})
  data = json.dumps(data)
  try:
    result = urlfetch.fetch(
        BASE_URL.format(method=method, api_key=TOKEN),
        payload=data,
        method=urlfetch.POST,
        deadline=10,
        headers={'Content-Type': 'application/json'})
  except DeadlineExceededError as e:
    logging.exception(e)
    return None
  if result.status_code == 200:
    return json.loads(result.content)
  else:
    logging.error(result.content)
    return None 
Example #2
Source File: remote_api_put_stub.py    From python-compat-runtime with Apache License 2.0 6 votes vote down vote up
def _MakeRemoteSyncCall(self, service, call, request, response):
    """Send an RPC to a remote_api endpoint."""
    request_pb = remote_api_pb.Request()
    request_pb.set_service_name(service)
    request_pb.set_method(call)
    request_pb.set_request(request.Encode())

    response_pb = remote_api_pb.Response()
    encoded_request = request_pb.Encode()
    try:
      urlfetch_response = urlfetch.fetch(self.remote_url, encoded_request,
                                         urlfetch.POST, self.extra_headers,
                                         follow_redirects=False,
                                         deadline=10)
    except Exception, e:


      logging.exception('Fetch failed to %s', self.remote_url)
      raise FetchFailed(e) 
Example #3
Source File: cloud_datastore_export_test.py    From loaner with Apache License 2.0 6 votes vote down vote up
def test_get(
      self, mock_config, mock_urlfetch, mock_app_identity, mock_logging):
    test_destination_url = cloud_datastore_export._DESTINATION_URL
    test_bucket_name = 'gcp_bucket_name'
    mock_config.side_effect = [test_bucket_name, True]
    expected_url = (
        cloud_datastore_export._DATASTORE_API_URL % self.test_application_id)
    mock_urlfetch.return_value.status_code = httplib.OK
    now = datetime.datetime(
        year=2017, month=1, day=1, hour=1, minute=1, second=15)
    with freezegun.freeze_time(now):
      self.testapp.get(self._CRON_URL)
      mock_urlfetch.assert_called_once_with(
          url=expected_url,
          payload=json.dumps({
              'project_id': self.test_application_id,
              'output_url_prefix': test_destination_url.format(
                  test_bucket_name, now.strftime('%Y_%m_%d-%H%M%S'))
          }),
          method=urlfetch.POST,
          deadline=60,
          headers={
              'Content-Type': 'application/json',
              'Authorization': 'Bearer mock_token'})
      self.assertEqual(mock_logging.call_count, 3) 
Example #4
Source File: paypal.py    From pledgeservice with Apache License 2.0 6 votes vote down vote up
def send_request(fields):
    config = model.Config.get()

    fields["VERSION"] = "113"
    fields["USER"] =  config.paypal_user
    fields["PWD"] =  config.paypal_password
    fields["SIGNATURE"] = config.paypal_signature

    form_data = urllib.urlencode(fields)

    result = urlfetch.fetch(url=config.paypal_api_url, payload=form_data, method=urlfetch.POST,
                headers={'Content-Type': 'application/x-www-form-urlencoded'})
    result_map = urlparse.parse_qs(result.content)

    if 'ACK' in result_map:
        if result_map['ACK'][0] == "Success":
            return (True, result_map)
   
        logging.warning("Paypal returned an error:")
        logging.warning(pprint.pformat(result_map))
        return (False, result_map)

    logging.warning("Could not contact Paypal:")
    logging.warning(result.content)
    return False, result.content 
Example #5
Source File: telegram.py    From hackernewsbot with MIT License 6 votes vote down vote up
def call_method(method, data):
  data = json.dumps(data)
  try:
    result = urlfetch.fetch(
        BASE_URL.format(token=TOKEN, method=method),
        payload=data,
        method=urlfetch.POST,
        deadline=10,
        headers={'Content-Type': 'application/json'})
  except DeadlineExceededError as e:
    logging.exception(e)
    return None
  if result.status_code == 200:
    return json.loads(result.content)
  else:
    logging.error(result.content)
    return None 
Example #6
Source File: dropbox.py    From MyLife with MIT License 6 votes vote down vote up
def get_file_info(self, access_token, name):

		headers = {
			'Content-Type' : 'application/json',
			'Authorization' : 'Bearer ' + access_token
		}

		data = {
    		"path": "/" + name,
		    "include_media_info": False,
		    "include_deleted": False,
		    "include_has_explicit_shared_members": False
		}

		result = urlfetch.fetch(
			payload=json.dumps(data),
			method=urlfetch.POST,
			url='https://api.dropboxapi.com/2/files/get_metadata',
			headers=headers
		)

		if result.status_code != 200:
			raise Exception("Failed to get file metadata from Dropbox. Status: %s, body: %s" % (result.status_code, result.content))
		self.log(result.content)
		return json.loads(result.content) 
Example #7
Source File: background.py    From RSSNewsGAE with Apache License 2.0 6 votes vote down vote up
def push_important_news():
    key = request.values.get('key')
    news = ndb.Key(urlsafe=key).get()
    form_fields = {
        "token": app.config["PUSHOVER_APP_KEY"],
        "user": app.config["PUSHOVER_USER_KEY"],
        "message": news.summary.encode("utf-8"),
        "url": news.link.encode("utf-8"),
        "url_title": u"点击访问正文".encode("utf-8"),
        "title": news.title.encode("utf-8"),
    }
    form_data = urllib.urlencode(form_fields)
    urlfetch.fetch(url=app.config["PUSH_OVER_URL"],
                   payload=form_data,
                   method=urlfetch.POST,
                   headers={'Content-Type': 'application/x-www-form-urlencoded'},
                   follow_redirects=False,
                   validate_certificate=False)
    return "Done", 200 
Example #8
Source File: dropbox.py    From MyLife with MIT License 5 votes vote down vote up
def put_file(self, access_token, name, bytes):

#		info = self.get_file_info(access_token, name)
#		self.log(info)

		dropbox_args = {
    		"path": "/" + name,
    		"mode": { ".tag" : "overwrite"},
    		"autorename": True,
    		"mute": False
		}

		headers = {
			'Content-Type' : 'application/octet-stream',
			'Authorization' : 'Bearer ' + access_token,
			'Dropbox-API-Arg' : json.dumps(dropbox_args)
		}

		result = urlfetch.fetch(
			payload=bytes,
			method=urlfetch.POST,
			url='https://content.dropboxapi.com/2/files/upload',
			headers=headers
		)

		if result.status_code != 200:
			self.log(result.content)
			raise Exception("Failed to send file to Dropbox. Status: %s, body: %s" % (result.status_code, result.content))
		return json.loads(result.content) 
Example #9
Source File: httplib.py    From python-compat-runtime with Apache License 2.0 5 votes vote down vote up
def __init__(self, host, port=None, strict=None,
               timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None,
               context=None):
    # net.proto.ProcotolBuffer relies on httplib so importing urlfetch at the
    # module level causes a failure on prod. That means the import needs to be
    # lazy.
    from google.appengine.api import urlfetch
    self._fetch = urlfetch.fetch
    self._method_map = {
      'GET': urlfetch.GET,
      'POST': urlfetch.POST,
      'HEAD': urlfetch.HEAD,
      'PUT': urlfetch.PUT,
      'DELETE': urlfetch.DELETE,
      'PATCH': urlfetch.PATCH,
    }
    self.host = host
    self.port = port
    # With urllib2 in Python 2.6, an object can be passed here.
    # The default is set to socket.GLOBAL_DEFAULT_TIMEOUT which is an object.
    # We only accept float, int or long values, otherwise it can be
    # silently ignored.
    if not isinstance(timeout, (float, int, long)):
      timeout = None
    self.timeout = timeout
    # Both 'strict' and 'source_address' are ignored.
    self._method = self._url = None
    self._body = ''
    self.headers = [] 
Example #10
Source File: main.py    From python-docs-samples with Apache License 2.0 5 votes vote down vote up
def get(self):
        # [START urlfetch-post]
        try:
            form_data = urllib.urlencode(UrlPostHandler.form_fields)
            headers = {'Content-Type': 'application/x-www-form-urlencoded'}
            result = urlfetch.fetch(
                url='http://localhost:8080/submit_form',
                payload=form_data,
                method=urlfetch.POST,
                headers=headers)
            self.response.write(result.content)
        except urlfetch.Error:
            logging.exception('Caught exception fetching url')
        # [END urlfetch-post] 
Example #11
Source File: background.py    From RSSNewsGAE with Apache License 2.0 5 votes vote down vote up
def collect_keyword_for_one_news():
    user_key_word = get_pure_keyword()
    key = request.values.get('key')
    news = ndb.Key(urlsafe=key).get()
    form_fields = {
        "text": news.summary.encode("utf-8"),
        "topK": app.config["TOP_KEYWORD"],
        "withWeight": 0
    }
    form_data = urllib.urlencode(form_fields)
    result = urlfetch.fetch(url=app.config["JIEBA_API"],
                            payload=form_data,
                            method=urlfetch.POST,
                            headers={'Content-Type': 'application/x-www-form-urlencoded'},
                            follow_redirects=False)
    json_content = json.loads(result.content)
    key_words = json_content["result"]
    del news.key_word[:]
    news.key_word = key_words
    tmp = [val for val in key_words if val in user_key_word]
    if tmp:
        news.important = True
    if tmp and app.config["PUSHOVER"]:
        taskqueue.add(queue_name='push-msg-queue',
                      url=url_for("push_important_news"),
                      method='POST',
                      params={"key": key})
    news.put()
    return "Done", 200 
Example #12
Source File: background.py    From RSSNewsGAE with Apache License 2.0 5 votes vote down vote up
def fetch_one_feed():
    key = request.values.get('key')
    feed = ndb.Key(urlsafe=key).get()
    parser = url2parser(feed.url)
    if parser is not None:
        the_last_fetch = feed.latest_fetch
        feed.latest_fetch = datetime.now()
        list_of_news_entities = []
        ndb.put_multi(list_of_news_entities)
        for entry in parser.entries:
            if check_entry(entry):
                entry.published = datetime.fromtimestamp(mktime(entry.published_parsed))
                if entry.published > the_last_fetch:
                    news_entry = NewsEntry()
                    news_entry.published = entry.published
                    news_entry.title = entry.title
                    news_entry.link = entry.link
                    news_entry.summary = clean_html(entry.summary)
                    news_entry.feed = feed.title
                    list_of_news_entities.append(news_entry)
        feed.put()
        news_key_list = ndb.put_multi(list_of_news_entities)
        for news_key in news_key_list:
            taskqueue.add(queue_name='collect-queue',
                          url=url_for("collect_keyword_for_one_news"),
                          method='POST',
                          params={"key": news_key.urlsafe()}
                          )
        return "Done", 200
    else:
        return "parser is None", 200 
Example #13
Source File: background.py    From RSSNewsGAE with Apache License 2.0 5 votes vote down vote up
def launch_fetch():
    feeds = get_quarterly_feed_key()
    now = datetime.now(tz=app.config["TIME_ZONE"])
    if now.minute / 15 == 3:
        feeds += get_hourly_feed_key()
    if now.hour == 9 and now.minute / 15 == 1:
        feeds += get_daily_feed_key()
    for feed in feeds:
        taskqueue.add(queue_name='fetch-queue',
                      url=url_for("fetch_one_feed"),
                      method='POST',
                      params={"key": feed.urlsafe()}
                      )
    return "Done", 200 
Example #14
Source File: handlers.py    From pledgeservice with Apache License 2.0 5 votes vote down vote up
def _send_to_bitpay(self, amountCents, temp_key_str):
    price_in_dollars = int(amountCents) / 100.0
    apiKey = model.Secrets.get().bitpay_api_key
    uname = base64.b64encode(apiKey)
    headers = {'Authorization': 'Basic ' + uname }

    callbackUrl = self.request.host_url + "/r/bitcoin_notifications"
    logging.info('CALLBACK URL WILL BE: ' + callbackUrl)

    post_data = {
      'posData': temp_key_str,
      'price': price_in_dollars,
      'notificationURL': self.request.host_url + "/r/bitcoin_notifications",
      'currency': 'USD',
      # 'buyerName': data["name"],
      # 'buyerEmail': data["email"]
    }

    payload = urllib.urlencode(post_data)
    logging.info('calling URL fetchee')

    result = urlfetch.fetch(
      url='https://bitpay.com/api/invoice/',
      payload=payload,
      method=urlfetch.POST,
      headers=headers,
      validate_certificate=True
    )

    if result.status_code == 200:
      response_dict = json.loads(result.content)
      return response_dict
    else:
      logging.warning('BitcoinStart failed: ' + str(result.content))
      self.error(400)
      self.response.write('Invalid request')
      return 
Example #15
Source File: dropbox.py    From MyLife with MIT License 4 votes vote down vote up
def get_dropbox_filelist(self, access_token):
		headers = {
			'Content-Type' : 'application/json',
			'Authorization' : 'Bearer ' + access_token
		}

		data = {
			"path": "",
			"recursive": True,
			"include_media_info": False,
			"include_deleted": False,
			"include_has_explicit_shared_members": False,
			"include_mounted_folders": False,
			"limit" : 1000
		}

		result = urlfetch.fetch(
			payload=json.dumps(data),
			method=urlfetch.POST,			
			url='https://api.dropboxapi.com/2/files/list_folder',
    		headers=headers)

		if result.status_code != 200:
			raise Exception("Failed to get files from Dropbox. Status: %s, body: %s" % (result.status_code, result.content))
		
		json_data = json.loads(result.content)
		file_list = [o['name'] for o in json_data['entries']]

		#Get everything
		while json_data['has_more']:
			self.log('Getting next batch...')
			result = urlfetch.fetch(
				payload=json.dumps({"cursor" : json_data['cursor']}),
				method=urlfetch.POST,			
				url='https://api.dropboxapi.com/2/files/list_folder/continue',
	    		headers=headers)

			if result.status_code != 200:
				raise Exception("Failed to get files from Dropbox. Status: %s, body: %s" % (result.status_code, result.content))

			json_data = json.loads(result.content)
			file_list.extend([o['name'] for o in json_data['entries']])

		return file_list 
Example #16
Source File: handlers.py    From pledgeservice with Apache License 2.0 4 votes vote down vote up
def post(self):
    env = self.app.config['env']
    util.EnableCors(self)

    for field in ['team', 'reply_to', 'subject', 'message_body', 'new_members']:
      if not field in self.request.POST:
        msg = "Bad Request: required field %s missing." % field
        logging.warning(msg)
        self.error(400)
        self.response.write(msg)
        return self.response

    # get the pldedges for this team, excluding the reply_to
    pledges = model.Pledge.all().filter(
      'team =',self.request.POST['team'])
    # .filter(
      # 'email !=', self.request.POST['reply_to'])

    # yes this is executing another query, and it's ok because
    # this will be done so infrequently
    # FIXME: lookup from cache.Get.. or TeamTotal once those are sorted out
    total_pledges = model.Pledge.all().filter(
      'team =',self.request.POST['team']).count()

    # if only sending to new members, filter out those that have already received emails
    if self.request.POST['new_members'] == 'True':
      pledges = pledges.filter('thank_you_sent_at =', None)

    i = 0
    for pledge in pledges:
      env.mail_sender.Send(to=pledge.email,
                     subject=self.request.POST['subject'],
                     text_body=self.request.POST['message_body'],
                     html_body=self.request.POST['message_body'],
                     reply_to=self.request.POST['reply_to'])
      i += 1
      # set the thank_you_sent_at for users after sending
      # FIXME: make sure the send was successful
      pledge.thank_you_sent_at = datetime.datetime.now()
      pledge.put()

    logging.info('THANKING: %d PLEDGERS!!' % i)
    response_data = {'num_emailed': i, 'total_pledges': total_pledges}
    self.response.content_type = 'application/json'
    self.response.write(json.dumps(response_data)) 
Example #17
Source File: cloud_datastore_export.py    From loaner with Apache License 2.0 4 votes vote down vote up
def get(self):
    bucket_name = config_model.Config.get('gcp_cloud_storage_bucket')
    if config_model.Config.get('enable_backups') and bucket_name:
      access_token, _ = app_identity.get_access_token(
          'https://www.googleapis.com/auth/datastore')

      # We strip the first 2 characters because os.environ.get returns the
      # application id with a partitiona separated by tilde, eg `s~`, which is
      # not needed here.
      app_id = constants.APPLICATION_ID.split('~')[1]

      request = {
          'project_id': app_id,
          'output_url_prefix': _format_full_path(bucket_name),
      }
      headers = {
          'Content-Type': 'application/json',
          'Authorization': 'Bearer ' + access_token
      }

      logging.info(
          'Attempting to export cloud datastore to bucket %r.', bucket_name)
      try:
        result = urlfetch.fetch(
            url=_DATASTORE_API_URL % app_id,
            payload=json.dumps(request),
            method=urlfetch.POST,
            deadline=60,
            headers=headers)
        if result.status_code == httplib.OK:
          logging.info('Cloud Datastore export completed.')
          logging.info(result.content)
        elif result.status_code >= 500:
          logging.error(result.content)
        else:
          logging.warning(result.content)
        self.response.status_int = result.status_code
      except urlfetch.Error:
        logging.error('Failed to initiate datastore export.')
        self.response.status_int = httplib.INTERNAL_SERVER_ERROR
    else:
      logging.info('Backups are not enabled, skipping.') 
Example #18
Source File: urlfetch.py    From python-for-android with Apache License 2.0 4 votes vote down vote up
def request(self, operation, url, data=None, headers=None):
    """Performs an HTTP call to the server, supports GET, POST, PUT, and
    DELETE.

    Usage example, perform and HTTP GET on http://www.google.com/:
      import atom.http
      client = atom.http.HttpClient()
      http_response = client.request('GET', 'http://www.google.com/')

    Args:
      operation: str The HTTP operation to be performed. This is usually one
          of 'GET', 'POST', 'PUT', or 'DELETE'
      data: filestream, list of parts, or other object which can be converted
          to a string. Should be set to None when performing a GET or DELETE.
          If data is a file-like object which can be read, this method will
          read a chunk of 100K bytes at a time and send them.
          If the data is a list of parts to be sent, each part will be
          evaluated and sent.
      url: The full URL to which the request should be sent. Can be a string
          or atom.url.Url.
      headers: dict of strings. HTTP headers which should be sent
          in the request.
    """
    all_headers = self.headers.copy()
    if headers:
      all_headers.update(headers)

    # Construct the full payload.
    # Assume that data is None or a string.
    data_str = data
    if data:
      if isinstance(data, list):
        # If data is a list of different objects, convert them all to strings
        # and join them together.
        converted_parts = [__ConvertDataPart(x) for x in data]
        data_str = ''.join(converted_parts)
      else:
        data_str = __ConvertDataPart(data)

    # If the list of headers does not include a Content-Length, attempt to
    # calculate it based on the data object.
    if data and 'Content-Length' not in all_headers:
      all_headers['Content-Length'] = len(data_str)

    # Set the content type to the default value if none was set.
    if 'Content-Type' not in all_headers:
      all_headers['Content-Type'] = 'application/atom+xml'

    # Lookup the urlfetch operation which corresponds to the desired HTTP verb.
    if operation == 'GET':
      method = urlfetch.GET
    elif operation == 'POST':
      method = urlfetch.POST
    elif operation == 'PUT':
      method = urlfetch.PUT
    elif operation == 'DELETE':
      method = urlfetch.DELETE
    else:
      method = None
    return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
        method=method, headers=all_headers)) 
Example #19
Source File: appengine.py    From python-for-android with Apache License 2.0 4 votes vote down vote up
def request(self, operation, url, data=None, headers=None):
    """Performs an HTTP call to the server, supports GET, POST, PUT, and
    DELETE.

    Usage example, perform and HTTP GET on http://www.google.com/:
      import atom.http
      client = atom.http.HttpClient()
      http_response = client.request('GET', 'http://www.google.com/')

    Args:
      operation: str The HTTP operation to be performed. This is usually one
          of 'GET', 'POST', 'PUT', or 'DELETE'
      data: filestream, list of parts, or other object which can be converted
          to a string. Should be set to None when performing a GET or DELETE.
          If data is a file-like object which can be read, this method will
          read a chunk of 100K bytes at a time and send them.
          If the data is a list of parts to be sent, each part will be
          evaluated and sent.
      url: The full URL to which the request should be sent. Can be a string
          or atom.url.Url.
      headers: dict of strings. HTTP headers which should be sent
          in the request.
    """
    all_headers = self.headers.copy()
    if headers:
      all_headers.update(headers)

    # Construct the full payload.
    # Assume that data is None or a string.
    data_str = data
    if data:
      if isinstance(data, list):
        # If data is a list of different objects, convert them all to strings
        # and join them together.
        converted_parts = [_convert_data_part(x) for x in data]
        data_str = ''.join(converted_parts)
      else:
        data_str = _convert_data_part(data)

    # If the list of headers does not include a Content-Length, attempt to
    # calculate it based on the data object.
    if data and 'Content-Length' not in all_headers:
      all_headers['Content-Length'] = str(len(data_str))

    # Set the content type to the default value if none was set.
    if 'Content-Type' not in all_headers:
      all_headers['Content-Type'] = 'application/atom+xml'

    # Lookup the urlfetch operation which corresponds to the desired HTTP verb.
    if operation == 'GET':
      method = urlfetch.GET
    elif operation == 'POST':
      method = urlfetch.POST
    elif operation == 'PUT':
      method = urlfetch.PUT
    elif operation == 'DELETE':
      method = urlfetch.DELETE
    else:
      method = None
    return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
        method=method, headers=all_headers, follow_redirects=False)) 
Example #20
Source File: urlfetch.py    From python-for-android with Apache License 2.0 4 votes vote down vote up
def request(self, operation, url, data=None, headers=None):
    """Performs an HTTP call to the server, supports GET, POST, PUT, and
    DELETE.

    Usage example, perform and HTTP GET on http://www.google.com/:
      import atom.http
      client = atom.http.HttpClient()
      http_response = client.request('GET', 'http://www.google.com/')

    Args:
      operation: str The HTTP operation to be performed. This is usually one
          of 'GET', 'POST', 'PUT', or 'DELETE'
      data: filestream, list of parts, or other object which can be converted
          to a string. Should be set to None when performing a GET or DELETE.
          If data is a file-like object which can be read, this method will
          read a chunk of 100K bytes at a time and send them.
          If the data is a list of parts to be sent, each part will be
          evaluated and sent.
      url: The full URL to which the request should be sent. Can be a string
          or atom.url.Url.
      headers: dict of strings. HTTP headers which should be sent
          in the request.
    """
    all_headers = self.headers.copy()
    if headers:
      all_headers.update(headers)

    # Construct the full payload.
    # Assume that data is None or a string.
    data_str = data
    if data:
      if isinstance(data, list):
        # If data is a list of different objects, convert them all to strings
        # and join them together.
        converted_parts = [__ConvertDataPart(x) for x in data]
        data_str = ''.join(converted_parts)
      else:
        data_str = __ConvertDataPart(data)

    # If the list of headers does not include a Content-Length, attempt to
    # calculate it based on the data object.
    if data and 'Content-Length' not in all_headers:
      all_headers['Content-Length'] = len(data_str)

    # Set the content type to the default value if none was set.
    if 'Content-Type' not in all_headers:
      all_headers['Content-Type'] = 'application/atom+xml'

    # Lookup the urlfetch operation which corresponds to the desired HTTP verb.
    if operation == 'GET':
      method = urlfetch.GET
    elif operation == 'POST':
      method = urlfetch.POST
    elif operation == 'PUT':
      method = urlfetch.PUT
    elif operation == 'DELETE':
      method = urlfetch.DELETE
    else:
      method = None
    return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
        method=method, headers=all_headers)) 
Example #21
Source File: appengine.py    From python-for-android with Apache License 2.0 4 votes vote down vote up
def request(self, operation, url, data=None, headers=None):
    """Performs an HTTP call to the server, supports GET, POST, PUT, and
    DELETE.

    Usage example, perform and HTTP GET on http://www.google.com/:
      import atom.http
      client = atom.http.HttpClient()
      http_response = client.request('GET', 'http://www.google.com/')

    Args:
      operation: str The HTTP operation to be performed. This is usually one
          of 'GET', 'POST', 'PUT', or 'DELETE'
      data: filestream, list of parts, or other object which can be converted
          to a string. Should be set to None when performing a GET or DELETE.
          If data is a file-like object which can be read, this method will
          read a chunk of 100K bytes at a time and send them.
          If the data is a list of parts to be sent, each part will be
          evaluated and sent.
      url: The full URL to which the request should be sent. Can be a string
          or atom.url.Url.
      headers: dict of strings. HTTP headers which should be sent
          in the request.
    """
    all_headers = self.headers.copy()
    if headers:
      all_headers.update(headers)

    # Construct the full payload.
    # Assume that data is None or a string.
    data_str = data
    if data:
      if isinstance(data, list):
        # If data is a list of different objects, convert them all to strings
        # and join them together.
        converted_parts = [_convert_data_part(x) for x in data]
        data_str = ''.join(converted_parts)
      else:
        data_str = _convert_data_part(data)

    # If the list of headers does not include a Content-Length, attempt to
    # calculate it based on the data object.
    if data and 'Content-Length' not in all_headers:
      all_headers['Content-Length'] = str(len(data_str))

    # Set the content type to the default value if none was set.
    if 'Content-Type' not in all_headers:
      all_headers['Content-Type'] = 'application/atom+xml'

    # Lookup the urlfetch operation which corresponds to the desired HTTP verb.
    if operation == 'GET':
      method = urlfetch.GET
    elif operation == 'POST':
      method = urlfetch.POST
    elif operation == 'PUT':
      method = urlfetch.PUT
    elif operation == 'DELETE':
      method = urlfetch.DELETE
    else:
      method = None
    return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
        method=method, headers=all_headers, follow_redirects=False)) 
Example #22
Source File: urlfetch.py    From python-for-android with Apache License 2.0 4 votes vote down vote up
def request(self, operation, url, data=None, headers=None):
    """Performs an HTTP call to the server, supports GET, POST, PUT, and
    DELETE.

    Usage example, perform and HTTP GET on http://www.google.com/:
      import atom.http
      client = atom.http.HttpClient()
      http_response = client.request('GET', 'http://www.google.com/')

    Args:
      operation: str The HTTP operation to be performed. This is usually one
          of 'GET', 'POST', 'PUT', or 'DELETE'
      data: filestream, list of parts, or other object which can be converted
          to a string. Should be set to None when performing a GET or DELETE.
          If data is a file-like object which can be read, this method will
          read a chunk of 100K bytes at a time and send them.
          If the data is a list of parts to be sent, each part will be
          evaluated and sent.
      url: The full URL to which the request should be sent. Can be a string
          or atom.url.Url.
      headers: dict of strings. HTTP headers which should be sent
          in the request.
    """
    all_headers = self.headers.copy()
    if headers:
      all_headers.update(headers)

    # Construct the full payload.
    # Assume that data is None or a string.
    data_str = data
    if data:
      if isinstance(data, list):
        # If data is a list of different objects, convert them all to strings
        # and join them together.
        converted_parts = [__ConvertDataPart(x) for x in data]
        data_str = ''.join(converted_parts)
      else:
        data_str = __ConvertDataPart(data)

    # If the list of headers does not include a Content-Length, attempt to
    # calculate it based on the data object.
    if data and 'Content-Length' not in all_headers:
      all_headers['Content-Length'] = len(data_str)

    # Set the content type to the default value if none was set.
    if 'Content-Type' not in all_headers:
      all_headers['Content-Type'] = 'application/atom+xml'

    # Lookup the urlfetch operation which corresponds to the desired HTTP verb.
    if operation == 'GET':
      method = urlfetch.GET
    elif operation == 'POST':
      method = urlfetch.POST
    elif operation == 'PUT':
      method = urlfetch.PUT
    elif operation == 'DELETE':
      method = urlfetch.DELETE
    else:
      method = None
    return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
        method=method, headers=all_headers)) 
Example #23
Source File: virustotal_client.py    From upvote with Apache License 2.0 4 votes vote down vote up
def Lookup(binary_hash):
  """Queries VirusTotal for the given binary hash.

  Args:
    binary_hash: SHA256 hash of the binary in question.

  Returns:
    Dict containing information VirusTotal knows about the binary.
  """
  # Decrypt our VirusTotal API key. If something blows up, just let the
  # exception bubble up to binary_health._PerformLookup().
  vt_auth = singleton.VirusTotalApiAuth.GetInstance()

  payload = urllib.parse.urlencode({
      'apikey': vt_auth.api_key,
      'resource': binary_hash
  })

  # Perform the VirusTotal query.
  response_obj = urlfetch.fetch(
      url=_REPORT_URL,
      payload=payload,
      method=urlfetch.POST,
      headers={'Content-Type': 'application/x-www-form-urlencoded'},
      deadline=15,
      validate_certificate=True)

  # Parse the response content into a dict.
  response_dict = {}
  try:
    json_dict = json.loads(response_obj.content)
  except ValueError:
    logging.error(
        'Bad VT response (HTTP %s): %s', response_obj.status_code,
        response_obj.content)
    raise ResponseError(
        'Failed to parse API response: %s' % response_obj.content)
  else:
    response_dict.update(json_dict)

  # Include verbose response from VT API when unknown response code given.
  if ('response_code' in response_dict and
      response_dict['response_code'] not in _KNOWN_RESPONSE_CODES):
    logging.warning('VirusTotal Error: %s', response_dict['verbose_msg'])

  # Only return scans from trusted antivirus scanners.
  scans = response_dict.get('scans')
  if scans:
    response_dict['scans'] = {
        vendor: scans[vendor]
        for vendor in constants.TRUSTED_AV_VENDORS
        if vendor in scans}
    response_dict['positives'] = sum(
        scan.get('detected') for scan in response_dict['scans'].values())
    response_dict['total'] = len(response_dict['scans'])

  return response_dict 
Example #24
Source File: datastore_backup.py    From upvote with Apache License 2.0 4 votes vote down vote up
def get(self):  # pylint: disable=g-bad-name

    # Only run backups in prod.
    if not env_utils.RunningInProd():
      logging.info('Datastore backups are only run in prod')
      return

    logging.info('Starting a new Datastore backup')

    access_token, _ = app_identity.get_access_token(
        'https://www.googleapis.com/auth/datastore')
    app_id = app_identity.get_application_id()

    # Configure a backup of all Datastore kinds, stored in a separate Cloud
    # Storage bucket for each day.
    output_url_prefix = 'gs://%s/%s/' % (
        env_utils.ENV.DATASTORE_BACKUP_BUCKET,
        datetime.datetime.utcnow().strftime('%Y_%m_%d'))
    kinds = [k for k in metadata.get_kinds() if not k.startswith('_')]
    request = {
        'project_id': app_id,
        'output_url_prefix': output_url_prefix,
        'entity_filter': {'kinds': kinds}
    }
    headers = {
        'Content-Type': 'application/json',
        'Authorization': 'Bearer ' + access_token
    }
    url = 'https://datastore.googleapis.com/v1/projects/%s:export' % app_id

    logging.info('Backing up %d kind(s) to %s', len(kinds), output_url_prefix)

    try:
      result = urlfetch.fetch(
          url=url,
          payload=json.dumps(request),
          method=urlfetch.POST,
          deadline=60,
          headers=headers)

      if result.status_code == httplib.OK:
        logging.info(result.content)
        _DATASTORE_BACKUPS.Increment()
      else:
        logging.warning(result.content)

      self.response.status_int = result.status_code

    except urlfetch.Error:
      logging.exception('Datastore backup failed')
      self.response.status_int = httplib.INTERNAL_SERVER_ERROR 
Example #25
Source File: urlfetch.py    From gdata-python3 with Apache License 2.0 4 votes vote down vote up
def request(self, operation, url, data=None, headers=None):
        """Performs an HTTP call to the server, supports GET, POST, PUT, and
        DELETE.

        Usage example, perform and HTTP GET on http://www.google.com/:
          import atom.http
          client = atom.http.HttpClient()
          http_response = client.request('GET', 'http://www.google.com/')

        Args:
          operation: str The HTTP operation to be performed. This is usually one
              of 'GET', 'POST', 'PUT', or 'DELETE'
          data: filestream, list of parts, or other object which can be converted
              to a string. Should be set to None when performing a GET or DELETE.
              If data is a file-like object which can be read, this method will
              read a chunk of 100K bytes at a time and send them.
              If the data is a list of parts to be sent, each part will be
              evaluated and sent.
          url: The full URL to which the request should be sent. Can be a string
              or atom.url.Url.
          headers: dict of strings. HTTP headers which should be sent
              in the request.
        """
        all_headers = self.headers.copy()
        if headers:
            all_headers.update(headers)

        # Construct the full payload.
        # Assume that data is None or a string.
        data_str = data
        if data:
            if isinstance(data, list):
                # If data is a list of different objects, convert them all to strings
                # and join them together.
                converted_parts = [__ConvertDataPart(x) for x in data]
                data_str = ''.join(converted_parts)
            else:
                data_str = __ConvertDataPart(data)

        # If the list of headers does not include a Content-Length, attempt to
        # calculate it based on the data object.
        if data and 'Content-Length' not in all_headers:
            all_headers['Content-Length'] = len(data_str)

        # Set the content type to the default value if none was set.
        if 'Content-Type' not in all_headers:
            all_headers['Content-Type'] = 'application/atom+xml'

        # Lookup the urlfetch operation which corresponds to the desired HTTP verb.
        if operation == 'GET':
            method = urlfetch.GET
        elif operation == 'POST':
            method = urlfetch.POST
        elif operation == 'PUT':
            method = urlfetch.PUT
        elif operation == 'DELETE':
            method = urlfetch.DELETE
        else:
            method = None
        return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
                                           method=method, headers=all_headers))