Python cloudstorage.delete() Examples

The following are 11 code examples of cloudstorage.delete(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cloudstorage , or try the search function .
Example #1
Source File: _gcs_test.py    From appengine-mapreduce with Apache License 2.0 6 votes vote down vote up
def testNextWithMissingFiles(self):
    reader = self.create_single_reader(["file-*"])

    # Remove the first and second to last files.
    cloudstorage.delete(self.test_filenames[0])
    cloudstorage.delete(self.test_filenames[-2])
    del self.test_filenames[0]
    del self.test_filenames[-2]

    reader_files = list(reader)
    self.assertEqual(len(self.test_filenames), len(reader_files))
    self.assertEqual(self.test_filenames, [f.name for f in reader_files])
    self.assertEqual(
        self.test_num_files - 2,
        self.slice_ctx.counter(input_reader.GCSInputReader.COUNTER_FILE_READ))
    self.assertEqual(
        2,
        self.slice_ctx.counter(
            input_reader.GCSInputReader.COUNTER_FILE_MISSING)) 
Example #2
Source File: test.py    From billing-export-python with Apache License 2.0 5 votes vote down vote up
def tearDown(self):
    # for gcs_object in gcs.listbucket(main.BUCKET):
    #  gcs.delete(gcs_object.filename)
    self.testbed.deactivate() 
Example #3
Source File: filestore.py    From MyLife with MIT License 5 votes vote down vote up
def delete(filename):
	cloudstorage.delete(_path(filename)) 
Example #4
Source File: main.py    From python-docs-samples with Apache License 2.0 5 votes vote down vote up
def delete_files(self):
        self.response.write('Deleting files...\n')
        for filename in self.tmp_filenames_to_clean_up:
            self.response.write('Deleting file {}\n'.format(filename))
            try:
                cloudstorage.delete(filename)
            except cloudstorage.NotFoundError:
                pass
# [END delete_files] 
Example #5
Source File: main.py    From appengine-photoalbum-example with Apache License 2.0 5 votes vote down vote up
def delete():
    filename = request.form.keys()[0]
    photo = ndb.Key('User', 'default', 'Photo', filename).get()
    for tag in photo.tags:
        entity = ndb.Key('User', 'default', 'Tags', tag).get()
        if entity:
            entity.count -= 1
            if entity.count == 0:
                entity.key.delete()
            else:
                entity.put()
    photo.key.delete()
    gcs.delete('/%s/%s' % (bucket_name, filename))
    return redirect(url_for('photos')) 
Example #6
Source File: shuffler.py    From appengine-mapreduce with Apache License 2.0 5 votes vote down vote up
def run(self, job_ids):
    for job_id in job_ids:
      db.delete(_OutputFile.all().ancestor(_OutputFile.get_root_key(job_id))) 
Example #7
Source File: shuffler.py    From appengine-mapreduce with Apache License 2.0 5 votes vote down vote up
def delete_file_or_list(self, filename_or_list):
    if isinstance(filename_or_list, list):
      for filename in filename_or_list:
        self.delete_file_or_list(filename)
    else:
      filename = filename_or_list
      retry_params = cloudstorage.RetryParams(min_retries=self._MIN_RETRIES,
                                              max_retries=self._MAX_RETRIES)
      # pylint: disable=bare-except
      try:
        cloudstorage.delete(filename, retry_params)
      except:
        pass 
Example #8
Source File: input_readers_test.py    From appengine-mapreduce with Apache License 2.0 5 votes vote down vote up
def testNextWithMissingFiles(self):
    readers = self.READER_CLS.split_input(
        self.create_mapper_spec(num_shards=1,
                                input_params={"bucket_name": self.test_bucket,
                                              "objects": ["file-*"]}))
    self.assertEqual(1, len(readers))
    # Remove the first and second to last files.
    cloudstorage.delete(self.test_filenames[0])
    cloudstorage.delete(self.test_filenames[-2])
    del self.test_filenames[0]
    del self.test_filenames[-2]

    reader_files = list(readers[0])
    self.assertEqual(len(self.test_filenames), len(reader_files))
    self.assertEqual(self.test_filenames, [f.name for f in reader_files]) 
Example #9
Source File: workers.py    From crmint with Apache License 2.0 5 votes vote down vote up
def _execute(self):
    delta = timedelta(self._params['expiration_days'])
    expiration_datetime = datetime.now() - delta
    expiration_timestamp = time.mktime(expiration_datetime.timetuple())
    stats = self._get_matching_stats(self._params['file_uris'])
    for stat in stats:
      if stat.st_ctime < expiration_timestamp:
        gcs.delete(stat.filename)
        self.log_info('gs:/%s file deleted.', stat.filename) 
Example #10
Source File: gcs.py    From luci-py with Apache License 2.0 4 votes vote down vote up
def delete_file_async(bucket, filename, ignore_missing):
  """Deletes one file stored in GS.

  Arguments:
    bucket: a bucket that contains the files.
    filename: file path to delete (relative to a bucket root).
    ignore_missing: if True, will silently skip missing files, otherwise will
        print a warning to log.
  """
  retry_params = _make_retry_params()
  max_tries = 4
  # The cloudstorage library supports ndb future but for an unknown reason
  # doesn't export it properly.
  api = cloudstorage.storage_api._get_storage_api(
      retry_params=retry_params, account_id=None)
  p = cloudstorage.api_utils._quote_filename('/%s/%s' % (bucket, filename))
  for i in range(max_tries + 1):
    try:
      # The equivalent of cloudstorage.delete(p, retry_params=retry_params)
      status, resp_headers, content = yield api.delete_object_async(p)
      cloudstorage.errors.check_status(
          status, [204], p, resp_headers=resp_headers, body=content)
      return
    except cloudstorage.errors.NotFoundError:
      if not ignore_missing:
        logging.warning(
            'Trying to delete a GS file that\'s not there: /%s/%s',
            bucket, filename)
      return
    except cloudstorage.errors.TransientError as e:
      if i == max_tries:
        raise
      time.sleep(1 + i * 2)
      continue
    except cloudstorage.errors.FatalError as e:
      if 'But got status 429' in e.message:
        if i == max_tries:
          raise
        # There's a bug in cloudstorage.check_status() that mishandles HTTP
        # 429.
        time.sleep(1 + i * 2)
        continue
      raise 
Example #11
Source File: input_readers_end_to_end_test.py    From appengine-mapreduce with Apache License 2.0 4 votes vote down vote up
def testStrict(self):
    """Tests that fail_on_missing_input works properly."""
    gcs_files = []
    for num in range(10):
      gcs_file = "/los_buckets/file%s" % num
      with cloudstorage.open(gcs_file, "w") as buf:
        buf.write(str(num + 100))
      gcs_files.append("file%s" % num)

    input_class = (input_readers.__name__ + "." +
                   input_readers._GoogleCloudStorageInputReader.__name__)

    def _RunMR(fail_on_missing_input=None):
      """Clears the state and runs a single (strict or not) MR."""
      self._ClearMapperData()

      input_reader_dict = {
          "bucket_name": "los_buckets",
          "objects": gcs_files,
      }
      if fail_on_missing_input is not None:
        input_reader_dict["fail_on_missing_input"] = fail_on_missing_input
      mr_id = control.start_map(
          "job1",
          __name__ + "." + "_input_reader_memory_mapper",
          input_class,
          {
              "input_reader": input_reader_dict,
          },
          shard_count=10)
      test_support.execute_until_empty(self.taskqueue)
      return mr_id

    # All files are there. Default, strict and non-strict MRs should work.
    _RunMR(None)
    self.assertEqual([str(num + 100) for num in range(10)],
                     sorted(_memory_mapper_data))
    _RunMR(False)
    self.assertEqual([str(num + 100) for num in range(10)],
                     sorted(_memory_mapper_data))
    _RunMR(True)
    self.assertEqual([str(num + 100) for num in range(10)],
                     sorted(_memory_mapper_data))

    # Now remove a file.
    cloudstorage.delete("/los_buckets/file5")

    # Non-strict MR still works but some output is not there.
    mr_id = _RunMR(False)
    self.assertEqual([str(num + 100) for num in [0, 1, 2, 3, 4, 6, 7, 8, 9]],
                     sorted(_memory_mapper_data))
    self.assertEquals(model.MapreduceState.get_by_job_id(mr_id).result_status,
                      model.MapreduceState.RESULT_SUCCESS)

    # Strict MR fails.
    mr_id = _RunMR(True)
    self.assertEquals(model.MapreduceState.get_by_job_id(mr_id).result_status,
                      model.MapreduceState.RESULT_FAILED)