Python django.db.connections.close_all() Examples
The following are 20
code examples of django.db.connections.close_all().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
django.db.connections
, or try the search function
.
Example #1
Source File: testcases.py From bioforum with MIT License | 6 votes |
def run(self): """ Set up the live server and databases, and then loop over handling HTTP requests. """ if self.connections_override: # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = self.static_handler(_MediaFilesHandler(WSGIHandler())) self.httpd = self._create_server() # If binding to port zero, assign the port allocated by the OS. if self.port == 0: self.port = self.httpd.server_address[1] self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() finally: connections.close_all()
Example #2
Source File: importer.py From texta with GNU General Public License v3.0 | 6 votes |
def _create_dataset_import(self, parameters, request_user): """Adds a new dataset import entry to database using data access object. :param parameters: dataset import parameters. :param request_user: Django user who initiated the request. :type parameters: dict :return: dataset ID of the added dataset import entry :rtype: int """ connections.close_all() dataset_import = self._dao.objects.create( source_type=self._get_source_type(parameters.get('format', ''), parameters.get('archive', '')), source_name=self._get_source_name(parameters), elastic_index=parameters.get('texta_elastic_index', ''), elastic_mapping=parameters.get('texta_elastic_mapping', ''), start_time=datetime.now(), end_time=None, user=request_user, status='Processing', finished=False, must_sync=parameters.get('keep_synchronized', False) ) dataset_import.save() return dataset_import.pk
Example #3
Source File: testcases.py From Hands-On-Application-Development-with-PyCharm with MIT License | 6 votes |
def run(self): """ Set up the live server and databases, and then loop over handling HTTP requests. """ if self.connections_override: # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = self.static_handler(_MediaFilesHandler(WSGIHandler())) self.httpd = self._create_server() # If binding to port zero, assign the port allocated by the OS. if self.port == 0: self.port = self.httpd.server_address[1] self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() finally: connections.close_all()
Example #4
Source File: importer.py From texta with GNU General Public License v3.0 | 6 votes |
def cancel_import_job(self, import_id): """Cancels an active import job. :param import_id: ID of the cancelled job. :type import_id: int or string holding an int """ connections.close_all() import_dict = self._active_import_jobs.get(int(import_id), None) if import_dict: import_process = import_dict.get('process', None) if import_process: import_process.terminate() if import_dict['is_local'] is False: tear_down_import_directory(import_dict['directory']) try: dataset_import = self._dao.objects.get(pk=import_id) dataset_import.finished = True dataset_import.status = 'Cancelled' dataset_import.save() except: pass
Example #5
Source File: testcases.py From python with Apache License 2.0 | 6 votes |
def run(self): """ Sets up the live server and databases, and then loops over handling http requests. """ if self.connections_override: # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = self.static_handler(_MediaFilesHandler(WSGIHandler())) self.httpd = self._create_server() # If binding to port zero, assign the port allocated by the OS. if self.port == 0: self.port = self.httpd.server_address[1] self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() finally: connections.close_all()
Example #6
Source File: testcases.py From python2017 with MIT License | 6 votes |
def run(self): """ Sets up the live server and databases, and then loops over handling http requests. """ if self.connections_override: # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = self.static_handler(_MediaFilesHandler(WSGIHandler())) self.httpd = self._create_server() # If binding to port zero, assign the port allocated by the OS. if self.port == 0: self.port = self.httpd.server_address[1] self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() finally: connections.close_all()
Example #7
Source File: importer.py From texta with GNU General Public License v3.0 | 5 votes |
def _run_processing_jobs(parameter_dict, reader, n_processes, process_batch_size): """Creates document batches and dispatches them to processing nodes. :param parameter_dict: dataset import's parameters. :param reader: dataset importer's document reader. :param n_processes: size of the multiprocessing pool. :param process_batch_size: the number of documents to process at any given time by a node. :type parameter_dict: dict :type n_processes: int :type process_batch_size: int """ from django import db db.connections.close_all() if parameter_dict.get('remove_existing_dataset', False): _remove_existing_dataset(parameter_dict) import_job_lock = Lock() process_pool = Pool(processes=n_processes, initializer=_init_pool, initargs=(import_job_lock,)) batch = [] for document in reader.read_documents(**parameter_dict): batch.append(document) # Send documents when they reach their batch size and empty it. if len(batch) == process_batch_size: process_pool.apply(_processing_job, args=(batch, parameter_dict)) batch = [] # Send the final documents that did not reach the batch size. if batch: process_pool.apply(_processing_job, args=(batch, parameter_dict)) process_pool.close() process_pool.join() _complete_import_job(parameter_dict)
Example #8
Source File: importer.py From arches with GNU Affero General Public License v3.0 | 5 votes |
def import_one_resource(line): """this single resource import function must be outside of the BusinessDataImporter class in order for it to be called with multiprocessing""" connections.close_all() reader = ArchesFileReader() archesresource = JSONDeserializer().deserialize(line) reader.import_business_data({"resources": [archesresource]})
Example #9
Source File: importer.py From texta with GNU General Public License v3.0 | 5 votes |
def _complete_import_job(parameter_dict): """Updates database entry to completed status. :param parameter_dict: dataset import's parameters. """ connections.close_all() import_id = parameter_dict['import_id'] dataset_import = DatasetImport.objects.get(pk=import_id) dataset_import.end_time = datetime.now() dataset_import.status = 'Completed' dataset_import.json_parameters = json.dumps(parameter_dict) dataset_import.save()
Example #10
Source File: importer.py From texta with GNU General Public License v3.0 | 5 votes |
def _set_total_documents(parameter_dict, reader): """Updates total documents count in the database entry. :param parameter_dict: dataset import's parameters. :param reader: dataset importer's document reader. """ connections.close_all() dataset_import = DatasetImport.objects.get(pk=parameter_dict['import_id']) dataset_import.total_documents = reader.count_total_documents(**parameter_dict) dataset_import.save()
Example #11
Source File: importer.py From texta with GNU General Public License v3.0 | 5 votes |
def _import_dataset(parameter_dict, n_processes, process_batch_size): """Starts the import process from a parallel process. :param parameter_dict: dataset importer's parameters. :param n_processes: size of the multiprocessing pool. :param process_batch_size: the number of documents to process at any given time by a process. :type parameter_dict: dict :type n_processes: int :type process_batch_size: int """ from django import db db.connections.close_all() # Local files are not extracted from archives due to directory permissions # If importing from local hard drive, extract first. if parameter_dict['is_local'] is False: if 'file_path' not in parameter_dict: parameter_dict['file_path'] = download(parameter_dict['url'], parameter_dict['directory']) _extract_archives(parameter_dict) reader = DocumentReader() _set_total_documents(parameter_dict=parameter_dict, reader=reader) _run_processing_jobs(parameter_dict=parameter_dict, reader=reader, n_processes=n_processes, process_batch_size=process_batch_size) # After import is done, remove files from disk tear_down_import_directory(parameter_dict['directory'])
Example #12
Source File: test_executors.py From django-pgschemas with MIT License | 5 votes |
def test_all_schemas_in_parallel(self): # If there are no errors, then this test passed management.call_command("migrate", all_schemas=True, executor="parallel", verbosity=0) connections.close_all()
Example #13
Source File: test_executors.py From django-pgschemas with MIT License | 5 votes |
def test_all_schemas_in_sequential(self): # If there are no errors, then this test passed management.call_command("migrate", all_schemas=True, executor="sequential", verbosity=0) connections.close_all()
Example #14
Source File: test_chat_behavior.py From chatter with MIT License | 5 votes |
def _pre_setup(self): from django.db import connections connections.close_all() super()._pre_setup()
Example #15
Source File: smtpd.py From healthchecks with BSD 3-Clause "New" or "Revised" License | 5 votes |
def process_message( self, peer, mailfrom, rcpttos, data, mail_options=None, rcpt_options=None ): # get a new db connection in case the old one has timed out: connections.close_all() to_parts = rcpttos[0].split("@") code = to_parts[0] try: data = data.decode() except UnicodeError: data = "[binary data]" if not RE_UUID.match(code): self.stdout.write("Not an UUID: %s" % code) return try: check = Check.objects.get(code=code) except Check.DoesNotExist: self.stdout.write("Check not found: %s" % code) return action = "success" if check.subject: parsed = email.message_from_string(data) received_subject = parsed.get("subject", "") if check.subject not in received_subject: action = "ign" ua = "Email from %s" % mailfrom check.ping(peer[0], "email", "", ua, data, action) self.stdout.write("Processed ping for %s" % code)
Example #16
Source File: test_fixtures.py From zulip with Apache License 2.0 | 5 votes |
def get_migration_status(**options: Any) -> str: verbosity = options.get('verbosity', 1) for app_config in apps.get_app_configs(): if module_has_submodule(app_config.module, "management"): import_module('.management', app_config.name) app_label = options['app_label'] if options.get('app_label') else None db = options.get('database', DEFAULT_DB_ALIAS) out = StringIO() command_args = ['--list'] if app_label: command_args.append(app_label) call_command( 'showmigrations', *command_args, database=db, no_color=options.get('no_color', False), settings=options.get('settings', os.environ['DJANGO_SETTINGS_MODULE']), stdout=out, traceback=options.get('traceback', True), verbosity=verbosity, ) connections.close_all() out.seek(0) output = out.read() return re.sub(r'\x1b\[(1|0)m', '', output)
Example #17
Source File: test_fixtures.py From zulip with Apache License 2.0 | 5 votes |
def database_exists(self) -> bool: try: connection = connections[DEFAULT_DB_ALIAS] with connection.cursor() as cursor: cursor.execute( "SELECT 1 from pg_database WHERE datname=%s;", [self.database_name], ) return_value = bool(cursor.fetchone()) connections.close_all() return return_value except OperationalError: return False
Example #18
Source File: base.py From GTDWeb with GNU General Public License v2.0 | 5 votes |
def run_from_argv(self, argv): """ Set up any environment changes requested (e.g., Python path and Django settings), then run this command. If the command raises a ``CommandError``, intercept it and print it sensibly to stderr. If the ``--traceback`` option is present or the raised ``Exception`` is not ``CommandError``, raise it. """ self._called_from_command_line = True parser = self.create_parser(argv[0], argv[1]) if self.use_argparse: options = parser.parse_args(argv[2:]) cmd_options = vars(options) # Move positional args out of options to mimic legacy optparse args = cmd_options.pop('args', ()) else: options, args = parser.parse_args(argv[2:]) cmd_options = vars(options) handle_default_options(options) try: self.execute(*args, **cmd_options) except Exception as e: if options.traceback or not isinstance(e, CommandError): raise # SystemCheckError takes care of its own formatting. if isinstance(e, SystemCheckError): self.stderr.write(str(e), lambda x: x) else: self.stderr.write('%s: %s' % (e.__class__.__name__, e)) sys.exit(1) finally: connections.close_all()
Example #19
Source File: handlers.py From greentor with MIT License | 5 votes |
def on_finish(self): connections.close_all()
Example #20
Source File: test_runner.py From zulip with Apache License 2.0 | 4 votes |
def init_worker(counter: Synchronized) -> None: """ This function runs only under parallel mode. It initializes the individual processes which are also called workers. """ global _worker_id with counter.get_lock(): counter.value += 1 _worker_id = counter.value """ You can now use _worker_id. """ # Clear the cache from zerver.lib.cache import get_cache_backend cache = get_cache_backend(None) cache.clear() # Close all connections connections.close_all() destroy_test_databases(_worker_id) create_test_databases(_worker_id) initialize_worker_path(_worker_id) def is_upload_avatar_url(url: URLPattern) -> bool: if url.pattern.regex.pattern == r'^user_avatars/(?P<path>.*)$': return True return False # We manually update the upload directory path in the url regex. from zproject import dev_urls found = False for url in dev_urls.urls: if is_upload_avatar_url(url): found = True new_root = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars") url.default_args['document_root'] = new_root if not found: print("*** Upload directory not found.")