Python influxdb.exceptions.InfluxDBClientError() Examples
The following are 23
code examples of influxdb.exceptions.InfluxDBClientError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
influxdb.exceptions
, or try the search function
.

Example #1
Source File: audio.py From PolyglotDB with MIT License | 6 votes |
def execute_influxdb(self, query): """ Execute an InfluxDB query for the corpus Parameters ---------- query : str Query to run Returns ------- :class:`influxdb.resultset.ResultSet` Results of the query """ client = self.acoustic_client() try: result = client.query(query) except InfluxDBClientError: print('There was an issue with the following query:') print(query) raise return result
Example #2
Source File: generator.py From grafana-telegraf-dashboard-generator with GNU Affero General Public License v3.0 | 6 votes |
def open(self): """ Connect to InfluxDB cluster. """ try: self.cc = InfluxDBClusterClient(hosts = self.hosts, username=self.influxdb_user, password=self.influxdb_passwd, ssl=self.influxdb_ssl, verify_ssl=self.influxdb_verify_ssl) self.is_connected = CONNECTED except InfluxDBClientError as e: logging.warning("Connection failed: %s" % e) return False return True
Example #3
Source File: handlers.py From probr-core with MIT License | 6 votes |
def initDb(self, client): try: client.create_database(influx_config.influx_db) except InfluxDBClientError: return # We have to rewrite the packet structure since # for influxdb, the tags and fields in a packet must be in the form of: # packet = { # tags : { # tagname1: tagValue, # tagname2: tagValue, # . # . # }, # time: ..... , # fields : { # ssid: ..., # mac_addr_src:...., # . # . # value: ..... # } # }
Example #4
Source File: influxdb-import.py From backtrader with GNU General Public License v3.0 | 6 votes |
def write_dataframe_to_idb(self, ticker): """Write Pandas Dataframe to InfluxDB database""" cachepath = self._cache cachefile = ('%s/%s-1M.csv.gz' % (cachepath, ticker)) if not os.path.exists(cachefile): log.warn('Import file does not exist: %s' % (cachefile)) return df = pd.read_csv(cachefile, compression='infer', header=0, infer_datetime_format=True) df['Datetime'] = pd.to_datetime(df['Date'] + ' ' + df['Time']) df = df.set_index('Datetime') df = df.drop(['Date', 'Time'], axis=1) try: self.dfdb.write_points(df, ticker) except InfluxDBClientError as err: log.error('Write to database failed: %s' % err)
Example #5
Source File: worker.py From kafka-influxdb with Apache License 2.0 | 6 votes |
def flush(self): """ Flush values with writer """ if not self.buffer: # Don't do anything when buffer empty return try: self.last_flush_time = time.time() self.writer.write(self.buffer) if self.config.statistics: self.show_statistics() except (InfluxDBServerError, InfluxDBClientError) as influx_error: logging.error("Error while writing to InfluxDB: %s", influx_error) finally: self.buffer = []
Example #6
Source File: influx.py From iris with BSD 2-Clause "Simplified" License | 6 votes |
def send_metrics(self, metrics): if not self.enable_metrics: return now = str(datetime.now()) payload = [] for metric, value in metrics.items(): data = { 'measurement': self.appname, 'tags': {}, 'time': now, 'fields': { metric: value } } if self.extra_tags: data['tags'].update(self.extra_tags) payload.append(data) try: self.client.write_points(payload) except (RequestException, InfluxDBClientError, InfluxDBServerError): logger.exception('Failed to send metrics to influxdb')
Example #7
Source File: gpu_logger.py From gpu_monitor with MIT License | 6 votes |
def _create_influxdb_writer(influxdb_client, tags): """ Returns function which writes to influxdb Parameters ---------- influxdb_client: """ def to_influxdf(data_list, retries=5, pause=5): logger = _logger() logger.debug(data_list) for i in range(retries): try: if influxdb_client.write_points(data_list, tags=tags): logger.debug("Success") break else: sleep(pause) except InfluxDBClientError: logger.debug('Failed {} out of {}'.format(i, retries)) else: logger.warning("Failed to write to Database") return to_influxdf
Example #8
Source File: worker.py From kafka-influxdb with Apache License 2.0 | 6 votes |
def init_database(self): """ Initialize the InfluxDB database if it is not already there """ try: logging.info("Creating InfluxDB database if not exists: %s", self.config.influxdb_dbname) self.writer.create_database(self.config.influxdb_dbname) except ConnectionError as error: logging.error( "Connection error while trying to create InfluxDB database: %s. Waiting for retry...", error) time.sleep(self.db_create_delay) self.init_database() except (InfluxDBServerError, InfluxDBClientError) as error: logging.warning( "Could not create InfluxDB database. Assuming it already exists: %s", error)
Example #9
Source File: influx.py From oncall with BSD 2-Clause "Simplified" License | 6 votes |
def send_metrics(self, metrics): if not self.enable_metrics: return now = str(datetime.now()) payload = [] for metric, value in metrics.items(): data = { 'measurement': self.appname, 'tags': {}, 'time': now, 'fields': { metric: value } } if self.extra_tags: data['tags'].update(self.extra_tags) payload.append(data) try: self.client.write_points(payload) except (RequestException, InfluxDBClientError, InfluxDBServerError): logger.exception('Failed to send metrics to influxdb')
Example #10
Source File: influx_listenstore.py From listenbrainz-server with GNU General Public License v2.0 | 5 votes |
def delete(self, musicbrainz_id): """ Delete all listens for user with specified MusicBrainz ID. Note: this method tries to delete the user 5 times before giving up. Args: musicbrainz_id (str): the MusicBrainz ID of the user Raises: Exception if unable to delete the user in 5 retries """ for _ in range(5): try: self.influx.drop_measurement(get_measurement_name(musicbrainz_id)) break except InfluxDBClientError as e: # influxdb-python raises client error if measurement isn't found # so we have to handle that case. if 'measurement not found' in e.content: return else: self.log.error('Error in influx client while dropping user %s: %s', musicbrainz_id, str(e), exc_info=True) time.sleep(3) except InfluxDBServerError as e: self.log.error('Error in influx server while dropping user %s: %s', musicbrainz_id, str(e), exc_info=True) time.sleep(3) except Exception as e: self.log.error('Error while trying to drop user %s: %s', musicbrainz_id, str(e), exc_info=True) time.sleep(3) else: raise InfluxListenStoreException("Couldn't delete user with MusicBrainz ID: %s" % musicbrainz_id)
Example #11
Source File: metrics_repository.py From monasca-api with Apache License 2.0 | 5 votes |
def list_metrics(self, tenant_id, region, name, dimensions, offset, limit, start_timestamp=None, end_timestamp=None): try: query = self._build_show_series_query(dimensions, name, tenant_id, region) query += " limit {}".format(limit + 1) if offset: query += ' offset {}'.format(int(offset) + 1) result = self.query_tenant_db(query, tenant_id) json_metric_list = self._build_serie_metric_list(result, tenant_id, region, start_timestamp, end_timestamp, offset) return json_metric_list except InfluxDBClientError as ex: if str(ex).startswith(MEASUREMENT_NOT_FOUND_MSG): return [] else: LOG.exception(ex) raise exceptions.RepositoryException(ex) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
Example #12
Source File: metrics_repository.py From monasca-api with Apache License 2.0 | 5 votes |
def query_tenant_db(self, query, tenant_id): database = ('%s_%s' % (self.conf.influxdb.database_name, tenant_id) if self.conf.influxdb.db_per_tenant else self.conf.influxdb.database_name) try: return self.influxdb_client.query(query, database=database) except InfluxDBClientError as ex: if (str(ex).startswith(DATABASE_NOT_FOUND_MSG) and self.conf.influxdb.db_per_tenant): return None else: raise
Example #13
Source File: metrics_repository.py From monasca-api with Apache License 2.0 | 5 votes |
def _get_influxdb_version(self): '''If Version found in the result set, return the InfluxDB Version, otherwise raise an exception. InfluxDB has changed the format of their result set and SHOW DIAGNOSTICS was introduced at some point so earlier releases of InfluxDB might not return a Version. ''' try: result = self.influxdb_client.query('SHOW DIAGNOSTICS') except InfluxDBClientError as ex: LOG.exception(ex) raise if 'series' not in result.raw: LOG.exception('series not in result.raw') raise Exception('Series not in SHOW DIAGNOSTICS result set') for series in result.raw['series']: if 'columns' not in series: continue columns = series['columns'] if u'Version' not in series['columns']: continue if u'values' not in series: continue for value in series[u'values']: version_index = columns.index(u'Version') version_str = value[version_index] return version.StrictVersion(version_str) raise Exception('Version not found in SHOW DIAGNOSTICS result set')
Example #14
Source File: dump_manager.py From listenbrainz-server with GNU General Public License v2.0 | 5 votes |
def import_dump(private_archive, public_archive, listen_archive, threads): """ Import a ListenBrainz dump into the database. Note: This method tries to import the private db dump first, followed by the public db dump. However, in absence of a private dump, it imports sanitized versions of the user table in the public dump in order to satisfy foreign key constraints. Then it imports the listen dump. Args: private_archive (str): the path to the ListenBrainz private dump to be imported public_archive (str): the path to the ListenBrainz public dump to be imported listen_archive (str): the path to the ListenBrainz listen dump archive to be imported threads (int): the number of threads to use during decompression, defaults to 1 """ if not private_archive and not public_archive and not listen_archive: print('You need to enter a path to the archive(s) to import!') sys.exit(1) app = create_app() with app.app_context(): db_dump.import_postgres_dump(private_archive, public_archive, threads) from listenbrainz.webserver.influx_connection import _influx as ls try: ls.import_listens_dump(listen_archive, threads) except IOError as e: current_app.logger.critical('IOError while trying to import data into Influx: %s', str(e), exc_info=True) raise except InfluxDBClientError as e: current_app.logger.critical('Error while sending data to Influx: %s', str(e), exc_info=True) raise except InfluxDBServerError as e: current_app.logger.critical('InfluxDB Server Error while importing data: %s', str(e), exc_info=True) raise except Exception as e: current_app.logger.critical('Unexpected error while importing data: %s', str(e), exc_info=True) raise
Example #15
Source File: cli.py From listenbrainz-server with GNU General Public License v2.0 | 5 votes |
def demo_user_replay(user_name): replayer = DemoUserReplayer(user_name) try: replayer.start() except (InfluxDBClientError, InfluxDBServerError) as e: replayer.app.logger.error("Influx error while replaying listens: %s", str(e), exc_info=True) raise except Exception as e: replayer.app.logger.error("Error while replaying listens: %s", str(e), exc_info=True) raise
Example #16
Source File: influx_listenstore.py From listenbrainz-server with GNU General Public License v2.0 | 5 votes |
def query(self, query): while True: try: return self.influx.query(query) except InfluxDBClientError as e: self.log.error("Client error while querying influx: %s", str(e), exc_info=True) time.sleep(1) except InfluxDBServerError as e: self.log.error("Server error while querying influx: %s", str(e), exc_info=True) time.sleep(1) except Exception as e: self.log.error("Error while querying influx: %s", str(e), exc_info=True) time.sleep(1)
Example #17
Source File: influx_listenstore.py From listenbrainz-server with GNU General Public License v2.0 | 5 votes |
def get_listen_count_for_user(self, user_name, need_exact=False): """Get the total number of listens for a user. The number of listens comes from brainzutils cache unless an exact number is asked for. Args: user_name: the user to get listens for need_exact: if True, get an exact number of listens directly from the ListenStore """ if not need_exact: # check if the user's listen count is already in cache # if already present return it directly instead of calculating it again # decode is set to False as we have not encoded the value when we set it # in brainzutils cache as we need to call increment operation which requires # an integer value user_key = '{}{}'.format(REDIS_INFLUX_USER_LISTEN_COUNT, user_name) count = cache.get(user_key, decode=False) if count: return int(count) try: results = self.influx.query('SELECT count(*) FROM ' + get_escaped_measurement_name(user_name)) except (InfluxDBServerError, InfluxDBClientError) as e: self.log.error("Cannot query influx: %s" % str(e), exc_info=True) raise # get the number of listens from the json try: count = results.get_points(measurement = get_measurement_name(user_name)).__next__()['count_recording_msid'] except (KeyError, StopIteration): count = 0 # put this value into brainzutils cache with an expiry time user_key = "{}{}".format(REDIS_INFLUX_USER_LISTEN_COUNT, user_name) cache.set(user_key, int(count), InfluxListenStore.USER_LISTEN_COUNT_CACHE_TIME, encode=False) return int(count)
Example #18
Source File: influxdb_plugin.py From isilon_data_insights_connector with MIT License | 5 votes |
def _write_points(points, num_points): """ Write the points to the InfluxDB in groups that are MAX_POINTS_PER_WRITE in size. """ LOG.debug("Writing points %d", num_points) write_index = 0 points_written = 0 while write_index < num_points: max_write_index = write_index + MAX_POINTS_PER_WRITE write_points = points[write_index:max_write_index] try: g_client.write_points(write_points) points_written += len(write_points) except InfluxDBServerError as svr_exc: LOG.error( "InfluxDBServerError: %s\nFailed to write points: %s", str(svr_exc), _get_point_names(write_points), ) except InfluxDBClientError as client_exc: LOG.error( "InfluxDBClientError writing points: %s\n" "Error: %s", _get_point_names(write_points), str(client_exc), ) except requests.exceptions.ConnectionError as req_exc: LOG.error( "ConnectionError exception caught writing points: %s\n" "Error: %s", _get_point_names(write_points), str(req_exc), ) write_index += MAX_POINTS_PER_WRITE return points_written
Example #19
Source File: iqfeed-to-influxdb.py From backtrader with GNU General Public License v3.0 | 5 votes |
def get_historical_minute_data(self, ticker: str): """Request historical 5 minute data from DTN.""" start = self._start stop = self._stop if len(stop) > 4: stop = stop[:4] if len(start) > 4: start = start[:4] for year in range(int(start), int(stop) + 1): beg_time = ('%s0101000000' % year) end_time = ('%s1231235959' % year) msg = "HIT,%s,60,%s,%s,,,,1,,,s\r\n" % (ticker, beg_time, end_time) try: data = iq.iq_query(message=msg) iq.add_data_to_df(data=data) except Exception as err: log.error('No data returned because %s', err) try: self.dfdb.write_points(self._ndf, ticker) except InfluxDBClientError as err: log.error('Write to database failed: %s' % err)
Example #20
Source File: util.py From kotori with GNU Affero General Public License v3.0 | 5 votes |
def make_reset_measurement(self): @pytest.fixture(scope="function") def reset_measurement(): logger.info('InfluxDB: Resetting database') # Clear out the database. influx = InfluxWrapper(database=self.database, measurement=self.measurement) influx.client.delete_series(self.database, measurement=self.measurement) #try: #except InfluxDBClientError as ex: # if 'database not found: mqttkit_1_itest' not in ex.message: # raise return reset_measurement
Example #21
Source File: consumer.py From panoptes with Apache License 2.0 | 4 votes |
def _send_to_influxdb(self, point): """ This method attempts to send points to InfluxDB write api in batch and retries incase of failures. It calls _send_one_by_one method when it gets response code 400 (unable to parse). Args: point(str): InfluxDB points string Returns: bool: True if it was able to emit some metrics to YAMAS2, false otherwise """ logger = self._logger self.influxdb_points.add(point) self.influxdb_points_batch_size = len(self.influxdb_points) time_over_emit_interval = round(self.current_time - self._last_emitted) if self.influxdb_points_batch_size >= self._config[u'influxdb'][u'write_api_batch_size'] \ or time_over_emit_interval >= self._config[u'influxdb'][u'write_api_max_emit_interval']: logger.debug(u'Going to send {} bytes to InfluxDB api ({} points, {}s over emit interval)' .format(sys.getsizeof(self.influxdb_points), len(self.influxdb_points), time_over_emit_interval)) for retry in range(0, self._config[u'influxdb'][u'write_api_commit_retries']): try: self.influxdb_connection.write_points(list(self.influxdb_points), time_precision=u's', protocol=u'line', batch_size=self._config[u'influxdb'][u'write_api_batch_size']) logger.debug(u'Successfully bulk sent {} points to InfluxDB API'.format(len(self.influxdb_points))) self._clear_metrics(self.current_time) break except InfluxDBClientError as e: logger.exception(u'Failed while trying to send {} bytes ({} points)'. format(sys.getsizeof(self.influxdb_points), len(self.influxdb_points))) if e.code == 400: if self._send_one_by_one(): break else: continue except Exception as e: logger.exception(u'Failed while trying to send {} bytes ({} points): {}'. format(sys.getsizeof(self.influxdb_points), len(self.influxdb_points), repr(e))) continue # Return False to Kafka consumer once we have points above write_api_batch_size in buffer and # not able to send *any* of them if len(self.influxdb_points) > self._config[u'influxdb'][u'write_api_batch_size']: logger.warn(u'Retries failed, will try again after backoff interval {}s'. format(self._config[u'influxdb'][u'write_api_fail_backoff_interval'])) time.sleep(self._config[u'influxdb'][u'write_api_fail_backoff_interval']) return False return True
Example #22
Source File: influx_listenstore.py From listenbrainz-server with GNU General Public License v2.0 | 4 votes |
def insert(self, listens): """ Insert a batch of listens. """ submit = [] user_names = {} for listen in listens: user_names[listen.user_name] = 1 submit.append(listen.to_influx(quote(listen.user_name))) if not self.influx.write_points(submit, time_precision='s'): self.log.error("Cannot write data to influx. (write_points returned False), data=%s", json.dumps(submit, indent=3)) # If we reach this point, we were able to write the listens to the InfluxListenStore. # So update the listen counts of the users cached in brainzutils cache. for data in submit: user_key = "{}{}".format(REDIS_INFLUX_USER_LISTEN_COUNT, data['fields']['user_name']) cached_count = cache.get(user_key, decode=False) if cached_count: cache.increment(user_key) # Invalidate cached data for user for user_name in user_names.keys(): cache.delete(REDIS_USER_TIMESTAMPS % user_name) if len(listens): # Enter a measurement to count items inserted submit = [{ 'measurement': TEMP_COUNT_MEASUREMENT, 'tags': { COUNT_MEASUREMENT_NAME: len(listens) }, 'fields': { COUNT_MEASUREMENT_NAME: len(listens) } }] try: if not self.influx.write_points(submit): self.log.error("Cannot write listen cound to influx. (write_points returned False)") except (InfluxDBServerError, InfluxDBClientError, ValueError) as err: self.log.error("Cannot write data to influx: %s, data: %s", str(err), json.dumps(submit, indent=3), exc_info=True) raise
Example #23
Source File: influx_listenstore.py From listenbrainz-server with GNU General Public License v2.0 | 4 votes |
def get_total_listen_count(self, cache_value=True): """ Returns the total number of listens stored in the ListenStore. First checks the brainzutils cache for the value, if not present there makes a query to the db and caches it in brainzutils cache. """ if cache_value: count = cache.get(InfluxListenStore.REDIS_INFLUX_TOTAL_LISTEN_COUNT, decode=False) if count: return int(count) try: result = self.influx.query("""SELECT %s FROM "%s" ORDER BY time DESC LIMIT 1""" % (COUNT_MEASUREMENT_NAME, TIMELINE_COUNT_MEASUREMENT)) except (InfluxDBServerError, InfluxDBClientError) as err: self.log.error("Cannot query influx: %s" % str(err), exc_info=True) raise try: item = result.get_points(measurement=TIMELINE_COUNT_MEASUREMENT).__next__() count = int(item[COUNT_MEASUREMENT_NAME]) timestamp = convert_to_unix_timestamp(item['time']) except (KeyError, ValueError, StopIteration): timestamp = 0 count = 0 # Now sum counts that have been added in the interval we're interested in try: result = self.influx.query("""SELECT sum(%s) as total FROM "%s" WHERE time > %s""" % (COUNT_MEASUREMENT_NAME, TEMP_COUNT_MEASUREMENT, get_influx_query_timestamp(timestamp))) except (InfluxDBServerError, InfluxDBClientError) as err: self.log.error("Cannot query influx: %s" % str(err), exc_info=True) raise try: data = result.get_points(measurement=TEMP_COUNT_MEASUREMENT).__next__() count += int(data['total']) except StopIteration: pass if cache_value: cache.set( InfluxListenStore.REDIS_INFLUX_TOTAL_LISTEN_COUNT, int(count), InfluxListenStore.TOTAL_LISTEN_COUNT_CACHE_TIME, encode=False, ) return count