Python stats.Stats() Examples
The following are 8
code examples of stats.Stats().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
stats
, or try the search function
.
Example #1
Source File: controller.py From daf-recipes with GNU General Public License v3.0 | 5 votes |
def index(self): c = p.toolkit.c stats = stats_lib.Stats() rev_stats = stats_lib.RevisionStats() c.top_rated_packages = stats.top_rated_packages() c.most_edited_packages = stats.most_edited_packages() c.largest_groups = stats.largest_groups() c.top_tags = stats.top_tags() c.top_package_creators = stats.top_package_creators() c.new_packages_by_week = rev_stats.get_by_week('new_packages') c.deleted_packages_by_week = rev_stats.get_by_week('deleted_packages') c.num_packages_by_week = rev_stats.get_num_packages_by_week() c.package_revisions_by_week = rev_stats.get_by_week('package_revisions') c.raw_packages_by_week = [] for week_date, num_packages, cumulative_num_packages in c.num_packages_by_week: c.raw_packages_by_week.append({'date': h.date_str_to_datetime(week_date), 'total_packages': cumulative_num_packages}) c.all_package_revisions = [] c.raw_all_package_revisions = [] for week_date, revs, num_revisions, cumulative_num_revisions in c.package_revisions_by_week: c.all_package_revisions.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_revisions)) c.raw_all_package_revisions.append({'date': h.date_str_to_datetime(week_date), 'total_revisions': num_revisions}) c.new_datasets = [] c.raw_new_datasets = [] for week_date, pkgs, num_packages, cumulative_num_packages in c.new_packages_by_week: c.new_datasets.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_packages)) c.raw_new_datasets.append({'date': h.date_str_to_datetime(week_date), 'new_packages': num_packages}) return p.toolkit.render('ckanext/stats/index.html')
Example #2
Source File: claymores.py From EthMonitoringLinux with MIT License | 5 votes |
def getStats(self): data = Stats() data.type = 0 try: summary_data = self.getData() result = json.loads(summary_data) summary_response = result["result"] miner_stats = summary_response[2].split(";") data.version = summary_response[0] data.uptime = summary_response[1] if len(miner_stats) > 0: data.total_hashrate = miner_stats[0] data.accepted = miner_stats[1] data.rejected = miner_stats[2] dual_stats = summary_response[4].split(";") if len(dual_stats) > 0: data.total_dual_hashrate = dual_stats[0] data.dual_accepted = dual_stats[1] data.dual_rejected = dual_stats[2] data.hashrates = summary_response[3].split(';'); # ETH hashrates data.dcr_hashrates = summary_response[5].split(';'); # DCR Hashrates data.power_usage = amdgpu_stats.read_watts(len(data.hashrates)) # Watts used by AMD / WIP # Temps and fan speeds temp = summary_response[6].split(';') i = 0 while i < len(temp) - 1: data.temps.append(temp[i]) data.fan_speeds.append(temp[i + 1]) i += 2 data.online = self.connected except Exception, e: print("Parsing error: " + str(e))
Example #3
Source File: ewbf.py From EthMonitoringLinux with MIT License | 5 votes |
def getStats(self): data = Stats() data.type = 2 try: summary_data = self.getData() result = json.loads(summary_data) summary_response = result["result"] total_hashrate = 0 data.version = "EWBF" for gpu in summary_response: # Speed data.hashrates.append(gpu["speed_sps"]); data.power_usage.append(gpu["gpu_power_usage"]); data.fan_speeds.append("0"); data.temps.append(gpu["temperature"]); total_hashrate += gpu["speed_sps"]; # Shares data.accepted += gpu["accepted_shares"]; data.rejected += gpu["rejected_shares"]; data.total_hashrate = str(total_hashrate); data.online = self.connected except: print("paring error") return data.toJSON()
Example #4
Source File: ethminer.py From EthMonitoringLinux with MIT License | 5 votes |
def getStats(self): data = Stats() data.type = 15 try: summary_data = self.getData() result = json.loads(summary_data) summary_response = result["result"] miner_stats = summary_response[2].split(";") data.version = summary_response[0] data.uptime = summary_response[1] if len(miner_stats) > 0: data.total_hashrate = miner_stats[0] data.accepted = miner_stats[1] data.rejected = miner_stats[2] dual_stats = summary_response[4].split(";") if len(dual_stats) > 0: data.total_dual_hashrate = dual_stats[0] data.dual_accepted = dual_stats[1] data.dual_rejected = dual_stats[2] data.hashrates = summary_response[3].split(';'); # ETH hashrates # Temps and fan speeds temp = summary_response[6].split(';') i = 0 while i < len(temp) - 1: data.temps.append(temp[i]) data.fan_speeds.append(temp[i + 1]) i += 2 data.online = self.connected except Exception, e: print("Parsing error: " + str(e))
Example #5
Source File: tello_manager.py From Multi-Tello-Formation with MIT License | 5 votes |
def find_avaliable_tello(self, num): """ Find avaliable tello in server's subnets :param num: Number of Tello this method is expected to find :return: None """ print '[Start_Searching]Searching for %s available Tello...\n' % num subnets, address = self.get_subnets() possible_addr = [] for subnet, netmask in subnets: for ip in IPNetwork('%s/%s' % (subnet, netmask)): # skip local and broadcast if str(ip).split('.')[3] == '0' or str(ip).split('.')[3] == '255': continue possible_addr.append(str(ip)) while len(self.tello_ip_list) < num: print '[Still_Searching]Trying to find Tello in subnets...\n' # delete already fond Tello for tello_ip in self.tello_ip_list: if tello_ip in possible_addr: possible_addr.remove(tello_ip) # skip server itself for ip in possible_addr: if ip in address: continue # record this command self.log[ip].append(Stats('command', len(self.log[ip]))) self.socket.sendto(b'command', (ip, 8889)) time.sleep(5) # filter out non-tello addresses in log temp = defaultdict(list) for ip in self.tello_ip_list: temp[ip] = self.log[ip] self.log = temp
Example #6
Source File: dqn_agent.py From dist-dqn with MIT License | 5 votes |
def __init__(self, env, network, session, replay_memory, config, enable_summary=True): self.env = env self.network = network self.session = session self.replay_memory = replay_memory self.config = config self.training_steps = 0 # Keeps count of learning updates self.stats = Stats() self.random_action_prob = config.init_random_action_prob self.random_action_prob_decay = utils.decay_per_step( init_val=self.config.init_random_action_prob, min_val=self.config.min_random_action_prob, steps=self.config.random_action_explore_steps, ) self.summary_writer = None if enable_summary: self.summary_writer = tf.train.SummaryWriter(config.logdir, session.graph) self.frame_buffer = FrameBuffer( frames_per_state=config.frames_per_state, preprocessor=self._get_frame_resizer(env, config), ) # Prefill the replay memory with experiences based on random actions self._prefill_replay_memory(self.config.replay_start_size) # Initialize the target network self._update_target_network()
Example #7
Source File: ccminer_alexis.py From EthMonitoringLinux with MIT License | 4 votes |
def getStats(self): data = Stats() data.type = 1 try: summary_data = self.getCommand("summary|") summary_response = summary_data.split(";") if len(summary_response) > 0: data.version = summary_response[1].split("=")[1] data.total_hashrate = summary_response[5].split("=")[1] data.accepted = summary_response[7].split('=')[1] data.rejected = summary_response[8].split('=')[1] data.uptime = summary_response[14].split('=')[1] """ GPU RESPONSE """ gpu_data = self.getCommand("threads|") gpu_response = gpu_data.split("|") if len(gpu_response) > 0: for gpu in gpu_response: gpu_data = gpu.split(";") if len(gpu_data) == 12: hashrate = 0 wattage = 0 if data.version == "2.0": hashrate = gpu_data[11].split('=')[1] wattage = (int(gpu_data[4].split('=')[1]) / 1000) else: hashrate = gpu_data[8].split('=')[1] wattage = (int(gpu_data[4].split('=')[1]) / 1000) data.hashrates.append(hashrate) data.temps.append(gpu_data[3].split('=')[1]) data.power_usage.append(wattage) data.fan_speeds.append(gpu_data[5].split('=')[1]) data.online = self.connected except: print("Data parsing error") return data.toJSON()
Example #8
Source File: tello_manager.py From Multi-Tello-Formation with MIT License | 4 votes |
def send_command(self, command, ip): """ Send a command to the ip address. Will be blocked until the last command receives an 'OK'. If the command fails (either b/c time out or error), will try to resend the command :param command: (str) the command to send :param ip: (str) the ip of Tello :return: The latest command response """ #global cmd command_sof_1 = ord(command[0]) command_sof_2 = ord(command[1]) if command_sof_1 == 0x52 and command_sof_2 == 0x65: multi_cmd_send_flag = True else : multi_cmd_send_flag = False if multi_cmd_send_flag == True: self.str_cmd_index[ip] = self.str_cmd_index[ip] + 1 for num in range(1,5): str_cmd_index_h = self.str_cmd_index[ip]/128 + 1 str_cmd_index_l = self.str_cmd_index[ip]%128 if str_cmd_index_l == 0: str_cmd_index_l = str_cmd_index_l + 2 cmd_sof =[0x52,0x65,str_cmd_index_h,str_cmd_index_l,0x01,num + 1,0x20] cmd_sof_str = str(bytearray(cmd_sof)) cmd = cmd_sof_str + command[3:] self.socket.sendto(cmd.encode('utf-8'), (ip, 8889)) print '[Multi_Command]----Multi_Send----IP:%s----Command: %s\n' % (ip, command[3:]) real_command = command[3:] else: self.socket.sendto(command.encode('utf-8'), (ip, 8889)) print '[Single_Command]----Single_Send----IP:%s----Command: %s\n' % (ip, command) real_command = command self.log[ip].append(Stats(real_command, len(self.log[ip]))) start = time.time() while not self.log[ip][-1].got_response(): now = time.time() diff = now - start if diff > self.COMMAND_TIME_OUT: print '[Not_Get_Response]Max timeout exceeded...command: %s \n' % real_command return