Python fetch list

52 Python code examples are found related to " fetch list". You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: NetflixHttpSubRessourceHandler.py    From plugin.video.netflix with MIT License 6 votes vote down vote up
def fetch_video_list_ids(self, params):
        """Video list ids proxy function (caches video lists)

        Parameters
        ----------
        params : :obj:`dict` of :obj:`str`
            Request params

        Returns
        -------
        :obj:`list`
            Transformed response of the remote call
        """
        guid = self.netflix_session.user_data.get('guid')
        cached_list = self.video_list_cache.get(guid, None)
        if cached_list is not None:
            self.nx_common.log(msg='Serving cached list for user: ' + guid)
            return cached_list
        video_list_ids_raw = self.netflix_session.fetch_video_list_ids()

        if 'error' in video_list_ids_raw:
            return video_list_ids_raw
        video_list = self.netflix_session.parse_video_list_ids(
            response_data=video_list_ids_raw)
        return video_list 
Example 2
Source File: api.py    From Weibo-Album-Crawler with MIT License 6 votes vote down vote up
def fetch_album_list(uid, page=1, count=20):
        """
        获取用户的相册列表数据
        :param uid: 用户id
        :param page: 相册列表-页号
        :param count: 相册列表-页长
        :return: int 相册总数, list 相册列表
        """
        params = {
            'uid': uid,
            'page': page,
            'count': count,
            '__rnd': WeiboApi.make_rnd()
        }
        data = WeiboApi.get_json(Url.ALBUM_LIST, params=params)
        return data['total'], data['album_list'] 
Example 3
Source File: dl_bt_metaquotes.py    From FX-BT-Scripts with MIT License 6 votes vote down vote up
def fetchHistoryList(pair):
    listUrlTemplate = 'http://history.metaquotes.net/symbols/%s/list.txt'
    listUrl = listUrlTemplate % pair
    if args.verbose: print('Downloading %s list file from %s ...' % (pair, listUrl))

    history = []
    try:
        request = urllib.request.Request(listUrl, None, {'User-Agent': userAgent})
        with urllib.request.urlopen(request) as response:
            for line in response:
                history += [line.decode('utf-8').rstrip('\n')]
    except URLError as e:
        if hasattr(e, 'reason'):
            error(e.reason)
        elif hasattr(e, 'code'):
            error(e.code)

    return history 
Example 4
Source File: instance.py    From huskar with MIT License 6 votes vote down vote up
def fetch_instance_list(self, pairs, resolve=True):
        include_comment = self.include_comment and not g.auth.is_minimal_mode
        for cluster_name, key in pairs:
            info, physical_name = self.im.get_instance(
                cluster_name, key, resolve=resolve)
            if info.stat is None:
                continue
            data = {
                'application': self.application_name,
                'cluster': cluster_name,
                'key': key,
                'value': info.data,
                'meta': self.make_meta_info(info),
            }
            if self.subdomain == SERVICE_SUBDOMAIN:
                data['runtime'] = self.make_runtime_field(info)
                if physical_name:
                    data['cluster_physical_name'] = physical_name
            if include_comment:
                comment = get_comment(
                    self.application_name, cluster_name,
                    self.subdomain, key)
                data['comment'] = comment
            yield data 
Example 5
Source File: cdn_client.py    From CSGO-Market-Float-Finder with MIT License 6 votes vote down vote up
def fetch_server_list(host, port, cell_id):
		url = "http://%s:%d/serverlist/%d/%d/" % (host, port, cell_id, 20)
		
		r = requests.get(url)
		serverkv = vdf.loads(r.content)
			
		if serverkv.get('deferred') == '1':
			return None

		servers = []
		for id, child in serverkv['serverlist'].iteritems():
			if child.get('host').find(';')> 0:
				(h, p) = child.get('host').split(':')
			else:
				(h, p) = child.get('host'), 80
			
			load = child.get('weightedload')
			servers.append((h, p, load, child.get('type')))

		return sorted(servers, key=itemgetter(2)) 
Example 6
Source File: duplicati_client.py    From duplicati-client with GNU Lesser General Public License v2.1 6 votes vote down vote up
def fetch_backup_list(data):
    backups = fetch_resource_list(data, "backups")

    # Fetch progress state
    progress_state, active_id = fetch_progress_state(data)
    progress = progress_state.get("OverallProgress", 1)

    backup_list = []
    for backup in backups:
        backup_id = backup.get("Backup", {}).get("ID", 0)
        if active_id is not None and backup_id == active_id and progress != 1:
            backup["Progress"] = progress_state
        backup_list.append(backup)

    return backup_list


# Fetch all databases 
Example 7
Source File: duplicati_client.py    From duplicati-client with GNU Lesser General Public License v2.1 6 votes vote down vote up
def fetch_database_list(data):
    databases = fetch_resource_list(data, "backups")

    database_list = []
    for backup in databases:
        db_path = backup.get("Backup", {}).get("DBPath", "")
        db_exists = validate_database_exists(data, db_path)
        database = {
            "Backup": backup.get("Backup", {}).get("Name", 0),
            "DBPath": db_path,
            "ID": backup.get("Backup", {}).get("ID", 0),
            "Exists": db_exists
        }
        database_list.append(database)

    return database_list


# Validate that the database exists on the server 
Example 8
Source File: create_abundance_file_from_database.py    From TALON with MIT License 6 votes vote down vote up
def fetch_dataset_list(dataset_file, database):
    """ Gets a list of all datasets in the database """

    conn = sqlite3.connect(database)
    cursor = conn.cursor()
    all_db_datasets = qutils.fetch_all_datasets(cursor)
    conn.close()

    if dataset_file == None:
        
        return all_db_datasets

    else:
        datasets = []
        with open(dataset_file, 'r') as f:
            for line in f:
                dataset = line.strip()
                if dataset not in all_db_datasets:
                    raise ValueError("Dataset name '%s' not found in database" \
                                      % (dataset))
                datasets.append(dataset)
   
        return datasets 
Example 9
Source File: duplicati_client.py    From duplicati-client with GNU Lesser General Public License v2.1 6 votes vote down vote up
def fetch_resource_list(data, resource):
    baseurl = common.create_baseurl(data, "/api/v1/" + resource)
    common.log_output("Fetching " + resource + " list from API...", False)
    cookies = common.create_cookies(data)
    headers = common.create_headers(data)
    verify = data.get("server", {}).get("verify", True)
    r = requests.get(baseurl, headers=headers, cookies=cookies, verify=verify)
    common.check_response(data, r.status_code)
    if r.status_code == 404:
        common.log_output("No entries found", True, r.status_code)
        sys.exit(2)
    elif r.status_code != 200:
        common.log_output("Error connecting", True, r.status_code)
        sys.exit(2)
    else:
        return r.json()


# Filter logic for the list function to facilitate readable output 
Example 10
Source File: ch06_listing_source.py    From riacn-code with MIT License 6 votes vote down vote up
def fetch_autocomplete_list(conn, user, prefix):
    # 获取自动补完列表。
    candidates = conn.lrange('recent:' + user, 0, -1) 
    matches = []
    # 检查每个候选联系人。
    for candidate in candidates:                    
        if candidate.lower().startswith(prefix):  
            # 发现一个匹配的联系人。
            matches.append(candidate)         
    # 返回所有匹配的联系人。
    return matches                           
# <end id="_1314_14473_8386"/>


# 代码清单 6-3
# <start id="_1314_14473_8396"/>
# 准备一个由已知字符组成的列表。 
Example 11
Source File: option.py    From fast_arrow with MIT License 6 votes vote down vote up
def fetch_list(cls, client, ids):
        """
        fetch instruments by ids
        """
        results = []
        request_url = "https://api.robinhood.com/options/instruments/"

        for _ids in chunked_list(ids, 50):

            params = {"ids": ",".join(_ids)}
            data = client.get(request_url, params=params)
            partial_results = data["results"]

            while data["next"]:
                data = client.get(data["next"])
                partial_results.extend(data["results"])
            results.extend(partial_results)

        return results 
Example 12
Source File: tasks.py    From doufen with MIT License 6 votes vote down vote up
def fetch_note_list(self):
        url = self.account.user.alt + 'notes'
        notes = []
        while True:
            response = self.fetch_url_content(url)
            if not response:
                break
            dom = PyQuery(response.text)
            note_items = dom('#content .article>.note-container')
            for note_item in note_items:
                notes.append(PyQuery(note_item).attr('data-url'))
            next_page = dom('#content .article>.paginator>.next>a')
            if next_page:
                url = next_page.attr('href')
            else:
                break
        return notes 
Example 13
Source File: tasks.py    From doufen with MIT License 6 votes vote down vote up
def fetch_follow_list(self, user, action):
        url = 'https://api.douban.com/shuo/v2/users/{user}/{action}?count=50&page={page}'

        user_list = []
        page_count = 1
        while True:
            response = self.fetch_url_content(url.format(action=action, user=user, page=page_count))
            if not response:
                break
            user_list_partial = json.loads(response.text)
            if len(user_list_partial) == 0:
                break
            #user_list.extend([user_detail['uid'] for user_detail in user_list_partial])
            user_list.extend(user_list_partial)
            page_count += 1

        user_list.reverse()
        return user_list 
Example 14
Source File: gcp_compute.py    From google.cloud with GNU General Public License v3.0 6 votes vote down vote up
def fetch_list(self, params, link, query):
        """
            :param params: a dict containing all of the fields relevant to build URL
            :param link: a formatted URL
            :param query: a formatted query string
            :return the JSON response containing a list of instances.
        """
        lists = []
        resp = self._return_if_object(
            self.fake_module, self.auth_session.get(link, params={"filter": query})
        )
        if resp:
            lists.append(resp.get("items"))
            while resp.get("nextPageToken"):
                resp = self._return_if_object(
                    self.fake_module,
                    self.auth_session.get(
                        link,
                        params={"filter": query, "pageToken": resp.get("nextPageToken")},
                    ),
                )
                lists.append(resp.get("items"))
        return self.build_list(lists) 
Example 15
Source File: util.py    From dwdweather2 with MIT License 5 votes vote down vote up
def fetch_html_file_list(baseurl, extension):

    cwd, listing = htmllistparse.fetch_listing(baseurl, timeout=10)
    result = [
        baseurl + "/" + item.name
        for item in listing
        if item.name.endswith(extension)
    ]
    return result 
Example 16
Source File: ding_api.py    From Archery with Apache License 2.0 5 votes vote down vote up
def get_dept_list_id_fetch_child(token, parent_dept_id):
    """获取所有子部门列表"""
    ids = [int(parent_dept_id)]
    url = 'https://oapi.dingtalk.com/department/list_ids?id={0}&access_token={1}'.format(parent_dept_id, token)
    resp = requests.get(url, timeout=3).json()
    if resp.get('errcode') == 0:
        for dept_id in resp.get("sub_dept_id_list"):
            ids.extend(get_dept_list_id_fetch_child(token, dept_id))
    return list(set(ids)) 
Example 17
Source File: red.py    From restpie3 with MIT License 5 votes vote down vote up
def list_fetch(name):
    """Returns all items in queue or None. Does not remove the items."""
    slist = rdb.lrange(name, 0, -1)
    if slist:
        return [pickle.loads(s) for s in slist] 
Example 18
Source File: k8s_tools.py    From ElasticCTR with Apache License 2.0 5 votes vote down vote up
def fetch_ips_list(label_selector, phase=None):
    pod_list = fetch_pods_info(label_selector, phase)
    ips = [item[1] for item in pod_list]
    ips.sort()
    return ips 
Example 19
Source File: k8s_tools.py    From ElasticCTR with Apache License 2.0 5 votes vote down vote up
def fetch_name_list(label_selector, phase=None):
    pod_list = fetch_pods_info(label_selector, phase)
    names = [item[2] for item in pod_list]
    names.sort()
    return names 
Example 20
Source File: update_ip_list.py    From MozDef with Mozilla Public License 2.0 5 votes vote down vote up
def fetch_ip_list(aws_key_id, aws_secret_key, s3_bucket, ip_list_filename):
    logger.debug("Fetching ip list from s3")
    client = boto3.client(
        's3',
        aws_access_key_id=aws_key_id,
        aws_secret_access_key=aws_secret_key
    )
    response = client.get_object(Bucket=s3_bucket, Key=ip_list_filename)
    ip_content_list = response['Body'].read().rstrip().splitlines()
    ips = []
    for ip in ip_content_list:
        ips.append(ip.decode())
    return ips 
Example 21
Source File: Utils.py    From BiliBiliHelper with GNU General Public License v3.0 5 votes vote down vote up
def fetch_bag_list(verbose=False, bagid=None, show=True, raw=False):
        data = await BasicRequest.req_fetch_bag_list()
        if raw:
            return data
        gift_list = []
        if show:
            print("查询可用礼物...")
        for i in data["data"]["list"]:
            bag_id = i["bag_id"]
            gift_id = i["gift_id"]
            gift_num = i["gift_num"]
            gift_name = i["gift_name"]
            expireat = i["expire_at"]
            left_time = (expireat - data["data"]["time"])
            if not expireat:
                left_days = "+∞".center(6)
                left_time = None
            else:
                left_days = round(left_time / 86400, 1)
            if bagid is not None:
                if bag_id == int(bagid):
                    return gift_id, gift_num
            else:
                if verbose:
                    print(f"编号为 {bag_id} 的 {gift_name:^3} X {gift_num:^4} (在 {left_days:^6} 天后过期)")
                elif show:
                    print(f" {gift_name:^3} X {gift_num:^4} (在 {left_days:^6} 天后过期)")

            gift_list.append([gift_id, gift_num, bag_id, left_time])
        return gift_list 
Example 22
Source File: misclassified_loc.py    From ConvNetQuake with MIT License 5 votes vote down vote up
def fetch_streams_list(datadir):
    """Get the list of streams to analyze"""
    fnames = []
    for root, dirs, files in os.walk(datadir):
        for f in files:
            if f.endswith(".mseed"):
                fnames.append(os.path.join(root, f))
    return fnames 
Example 23
Source File: git.py    From phobos with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def getFetchRemotesList(self, context):
    """

    Args:
      context: 

    Returns:

    """
    remotes = getGitRemotes('fetch')
    remoteslist = [remotes[a] for a in remotes]
    print(remoteslist)
    return [(url,) * 3 for url in remoteslist] 
Example 24
Source File: utils.py    From bilibili-live-tools with MIT License 5 votes vote down vote up
def fetch_bag_list(verbose=False, bagid=None, printer=True):
    response = await bilibili().request_fetch_bag_list()
    temp = []
    gift_list = []
    json_response = await response.json()
    if printer == True:
        print('[{}] 查询可用礼物'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))
    for i in range(len(json_response['data']['list'])):
        bag_id = (json_response['data']['list'][i]['bag_id'])
        gift_id = (json_response['data']['list'][i]['gift_id'])
        gift_num = str((json_response['data']['list'][i]['gift_num'])).center(4)
        gift_name = json_response['data']['list'][i]['gift_name']
        expireat = (json_response['data']['list'][i]['expire_at'])
        if expireat != 0:
            left_time = (expireat - int(CurrentTime()))
            left_days = (expireat - int(CurrentTime())) / 86400
            gift_list.append([gift_id, gift_num, bag_id, expireat])
        else:
            left_time = 999999999999999999
            left_days = 999999999999999999
            gift_list.append([gift_id, gift_num, bag_id, expireat])
        if bagid is not None:
            if bag_id == int(bagid):
                return gift_id
        else:
            if verbose:
                print("# 编号为" + str(bag_id) + '的' + gift_name + 'X' + gift_num, '(在' + str((left_days)) + '天后过期)')
            elif printer == True:
                print("# " + gift_name + 'X' + gift_num, '(在' + str((left_days)) + '天后过期)')

        if 0 < int(left_time) < 43200:  # 剩余时间少于半天时自动送礼
            temp.append([gift_id, gift_num, bag_id, expireat])
    # print(temp)
    return temp, gift_list 
Example 25
Source File: NetflixHttpSubRessourceHandler.py    From plugin.video.netflix with MIT License 5 votes vote down vote up
def fetch_video_list(self, params):
        """Video list proxy function

        Parameters
        ----------
        params : :obj:`dict` of :obj:`str`
            Request params

        Returns
        -------
        :obj:`list`
            Transformed response of the remote call
        """
        list_id = params.get('list_id', [''])[0]
        start = int(params.get('list_from', [0])[0])
        end = int(params.get('list_to', [26])[0])
        raw_video_list = self.netflix_session.fetch_video_list(
            list_id=list_id,
            list_from=start,
            list_to=end)
        if 'error' in raw_video_list:
            return raw_video_list
        # parse the video list ids
        if 'videos' in raw_video_list.get('value', {}).keys():
            video_list = self.netflix_session.parse_video_list(
                response_data=raw_video_list)
            return video_list
        return [] 
Example 26
Source File: api.py    From Weibo-Album-Crawler with MIT License 5 votes vote down vote up
def fetch_large_list(uid, ids, type):
        """
        获取大图列表
        :param uid: 用户id
        :param ids: 图片id列表
        :param type: 相册类型
        :return: list
        """
        params = {
            'uid': uid,
            'ids': ','.join(map(str, ids)),
            'type': type
        }
        data = WeiboApi.get_json(Url.LARGE_LIST, params=params)
        return filter(None.__ne__, data.values()) 
Example 27
Source File: SignalSciences.py    From content with MIT License 5 votes vote down vote up
def get_list_of_site_names_to_fetch():
    list_of_site_names_to_fetch = None
    if SITES_TO_FETCH:
        list_of_site_names_to_fetch = SITES_TO_FETCH.split(',')
    else:
        list_of_site_names_to_fetch = get_list_of_all_site_names_in_corp()
    return list_of_site_names_to_fetch 
Example 28
Source File: tasks.py    From doufen with MIT License 5 votes vote down vote up
def fetch_review_list(self, url):
        item_list = []
        while True:
            response = self.fetch_url_content(url)
            if not response:
                break
            dom = PyQuery(response.text)
            items = dom('#content .article>.review-list .review-item')
            for item in items:
                item_div = PyQuery(item)
                douban_id = item_div.attr('id')
                title_link = item_div('.main-bd>h2>a')
                subject_url = item_div('.subject-img').attr('href')

                detail = {
                    'douban_id': douban_id,
                    'title': title_link.text(),
                    'url': title_link.attr('href'),
                    'subject': subject_url,
                }
                item_list.append(detail)
            next_page = dom('#content .article>.paginator>.next>a')
            if next_page:
                url = next_page.attr('href')
            else:
                break
        return item_list 
Example 29
Source File: ch06_listing_source.py    From https---github.com-josiahcarlson-redis-in-action with MIT License 5 votes vote down vote up
def fetch_autocomplete_list(conn, user, prefix):
    candidates = conn.lrange('recent:' + user, 0, -1) #A
    matches = []
    for candidate in candidates:                      #B
        if candidate.lower().startswith(prefix):      #B
            matches.append(candidate)                 #C
    return matches                                    #D
# <end id="_1314_14473_8386"/>
#A Fetch the autocomplete list
#B Check each candidate
#C We found a match
#D Return all of the matches
#END

# <start id="_1314_14473_8396"/> 
Example 30
Source File: tasks.py    From doufen with MIT License 5 votes vote down vote up
def fetch_comment_list(self, broadcast_url, broadcast_douban_id):
        url = broadcast_url
        comments = []
        while True:
            response = self.fetch_url_content(url)
            if not response:
                break
            dom = PyQuery(response.text)
            comment_items = dom('#comments>.comment-item')
            for comment_item in comment_items:
                item_div = PyQuery(comment_item)
                comments.append({
                    'content': item_div.outer_html(),
                    'target_type': 'broadcast',
                    'target_douban_id': broadcast_douban_id,
                    'douban_id': item_div.attr('data-cid'),
                    'user': self.fetch_user(PyQuery(item_div('.pic>a')).attr('data-uid')),
                    'text': PyQuery(item_div('.content>p.text')).text(),
                    'created': PyQuery(item_div('.content>.author>.created_at')).text(),
                })
            next_page = dom('#comments>.paginator>.next>a')
            if next_page:
                url = broadcast_url + next_page.attr('href')
            else:
                break
        return comments 
Example 31
Source File: tasks.py    From doufen with MIT License 5 votes vote down vote up
def fetch_photo_album_list(self):
        url = self.account.user.alt + 'photos'
        albums = []
        while True:
            response = self.fetch_url_content(url)
            if not response:
                break
            dom = PyQuery(response.text)
            album_items = dom('#content .article>.wr>.albumlst')
            for album_item in album_items:
                album_div = PyQuery(album_item)
                misc_text = album_div('.albumlst_r>.pl').text()
                try:
                    last_updated = re.search(r'(\d+\-\d+\-\d+)(?:创建|更新)', misc_text)[1]
                except (IndexError, TypeError):
                    last_updated = None

                albums.append((
                    album_div('.album_photo').attr('href'),
                    album_div('.album').attr('src'),
                    last_updated,
                ))
            next_page = dom('#content .article>.paginator>.next>a')
            if next_page:
                url = next_page.attr('href')
            else:
                break
        return albums 
Example 32
Source File: tasks.py    From doufen with MIT License 5 votes vote down vote up
def fetch_block_list(self):
        response = self.fetch_url_content('https://www.douban.com/contacts/blacklist')
        if not response:
            return []
        dom = PyQuery(response.text)
        return [_strip_username(PyQuery(item)) for item in dom('dl.obu>dd>a')] 
Example 33
Source File: ztp_filter.py    From aruba-switch-ansible with Apache License 2.0 5 votes vote down vote up
def fetch_allowed_list(path, ip):
    """
    Fetches all allowed attributes for a certain api path
    :param path: Swagger URI to resource
    :param ip: IP of the AOS-CX Switch
    :return: list of strings that represent attributes of the resource
    """
    # Get API Object
    response = get("https://{}/api/hpe-restapi.json".format(ip), verify=False)
    if response.status_code != 200:
        raise AnsibleParserError(
            'Get API Object Request Failed with Status Code %s .' % to_text(str(response.status_code)))

    # Var Dec
    tmp_object = response.json()['paths']
    allowed_list = []

    # Get all properties of the path
    if path in tmp_object:
        if "put" in tmp_object[path]:
            for parameter in tmp_object[path]['put']['parameters']:
                if parameter['name'] != "data":
                    continue
                else:
                    allowed_list = list(parameter['schema']['properties'].keys())
        else:
            raise AnsibleParserError('No Put Method exists for the path %s .' % to_text(str(path)))
    else:
        raise AnsibleParserError('No API Object exists for the path %s .' % to_text(str(path)))

    return allowed_list 
Example 34
Source File: fetch.py    From python-stdlib-list with MIT License 5 votes vote down vote up
def fetch_list(version=None):
    """
    For the given version of Python (or all versions if no version is set), this function:

    - Uses the `fetch_inventory` function of :py:mod`sphinx.ext.intersphinx` to
    grab and parse the Sphinx object inventory
    (ie ``http://docs.python.org/<version>/objects.inv``) for the given version.

    - Grabs the names of all of the modules in the parsed inventory data.

    - Writes the sorted list of module names to file (within the `lists` subfolder).

    :param str|None version: A specified version of Python. If not specified, then all
    available versions of Python will have their inventory objects fetched
    and parsed, and have their module names written to file.
    (one of ``"2.6"``, ``"2.7"``, ``"3.2"``, ``"3.3"``, ``"3.4"``, ``"3.5"``, ``"3.6"``, ``"3.7"``, ``"3.8"``, ``"3.9"`` or ``None``)

    """

    if version is None:
        versions = short_versions
    else:
        versions = [get_canonical_version(version)]

    for version in versions:

        url = "http://docs.python.org/{}/objects.inv".format(version)

        modules = sorted(
            list(
                fetch_inventory(DummyApp(), "", url).get("py:module").keys()
            )
        )

        with open(os.path.join(list_dir, "{}.txt".format(version)), "w") as f:
            for module in modules:
                f.write(module)
                f.write("\n") 
Example 35
Source File: spider.py    From rxivist with GNU Affero General Public License v3.0 5 votes vote down vote up
def fetch_category_list(self):
    categories = []
    with self.connection.db.cursor() as cursor:
      cursor.execute("SELECT DISTINCT collection FROM articles ORDER BY collection;")
      for cat in cursor:
        if len(cat) > 0:
          categories.append(cat[0])
    return categories 
Example 36
Source File: recipe-475181.py    From code with MIT License 5 votes vote down vote up
def fetchList(self):
        (stdout, stdin, stderr) = popen3("%s --getfilelist '%s*.*'" % (self.bin, self.dir))
        list = stdout.readlines()
        # Useless gnokii prompt
        del list[0]
        # Get rid of whitespaces at the ends of the file name
        self.file_list = map(lambda x: x.strip(), list) 
Example 37
Source File: game_settings.py    From raisetheempires with GNU General Public License v3.0 5 votes vote down vote up
def fetch_url_list(l):
    urls = []
    for v in l:
        if isinstance(v, dict):
            urls.extend(fetch_url_dict(v))
        elif isinstance(v, list):
            urls.extend(fetch_url_list(v))
    return urls 
Example 38
Source File: daily_detector.py    From CrisisMappingToolkit with Apache License 2.0 5 votes vote down vote up
def fetchArchivedDateList():
    '''Fetches the list of dates we have archived data for.'''
    
    dateList = []
    parsedIndexPage = BeautifulSoup(urllib2.urlopen((STORAGE_URL)).read(), 'html.parser')
    
    print parsedIndexPage
    
    for line in parsedIndexPage.findAll('a'):
        print line
        dateList.append(line.string)
        
    return dateList 
Example 39
Source File: server.py    From CrisisMappingToolkit with Apache License 2.0 5 votes vote down vote up
def fetchDateList(datesOnly=False):
    '''Fetches the list of available dates'''
    
    dateList = []
    parsedIndexPage = BeautifulSoup(urllib2.urlopen(STORAGE_URL).read(), 'html.parser')
    
    for line in parsedIndexPage.findAll('a'):
        dateString = line.string
        
        if datesOnly:
            dateList.append(dateString)
            continue

        # Else look through each page so we can make date__location pairs.
        subUrl = STORAGE_URL + dateString
        
        try:
            parsedSubPage = BeautifulSoup(urllib2.urlopen((subUrl)).read(), 'html.parser')
            
            for line in parsedSubPage.findAll('a'):
                kmlName = line.string
                info = extractInfoFromKmlUrl(kmlName)
                
                # Store combined date/location string.
                displayString = dateString +'__'+ info['location']
                dateList.append(displayString)
        except:
            pass # Ignore pages that we fail to parse

    return dateList 
Example 40
Source File: globus_uploader_service.py    From computing-pipeline with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fetchDatasetFileList(datasetId, requestsSession):
    clowkey = config['clowder']['secret_key']
    filelist = requests.get(config['clowder']['host']+"/api/datasets/%s/listFiles?key=%s" % (datasetId, clowkey),
                            headers={"Content-Type": "application/json"})

    if filelist.status_code == 200:
        return filelist.json()
    else:
        logger.error("- cannot find file list for dataset %s" % datasetId)
        return []


# ----------------------------------------------------------
# POSTGRES LOGGING COMPONENTS
# ---------------------------------------------------------- 
Example 41
Source File: rest_api.py    From scirius with GNU General Public License v3.0 5 votes vote down vote up
def fetch_list_sources(self, request):
        try:
            fetch_public_sources()
        except Exception as e:
            raise serializers.ValidationError({'fetch': [unicode(e)]})
        return Response({'fetch': 'ok'}) 
Example 42
Source File: crawler.py    From All-IT-eBooks-Spider with MIT License 5 votes vote down vote up
def fetch_book_name_list(self):
        while True:
            try:
                req = urllib.request.Request(
                    self.base_url + '/page/{}'.format(self.start_page), headers=self.headers)
                html = urllib.request.urlopen(req)
                doc = html.read().decode('utf8')
                alist = list(set(re.findall(cf.BOOK_LINK_PATTERN, doc)))
                print('Now working on page {}\n'.format(self.start_page))
                time.sleep(20)
                self.start_page += 1
                self.fetch_download_link(alist)
            except urllib.error.HTTPError as err:
                print(err.msg)
                break 
Example 43
Source File: racf_audit.py    From SML-Cogs with MIT License 5 votes vote down vote up
def fetch_clan_list(self, tags):
        """Get multiple clans."""
        tags = [clean_tag(tag) for tag in tags]
        tasks = [self.fetch_clan(tag) for tag in tags]
        results = await asyncio.gather(*tasks, return_exceptions=True)
        for index, r in enumerate(results):
            if isinstance(r, ClashRoyaleAPIError):
                print(r.status_message)
                results[index] = {}
                raise ClashRoyaleAPIError(status=r.status, message=r.message)
        return results 
Example 44
Source File: database.py    From pyArango with Apache License 2.0 4 votes vote down vote up
def fetch_list_as_batches(
            self, aql_query, bind_vars=None, batch_size=200,
            dont_raise_error_if_empty=False, logger=None,
            log_level=logging.DEBUG
    ):
        """Fetch list of elements as batches by running the query.

        Generator which yeilds each batch as result.

        Parameters
        ----------
        aql_query : str
            aql query string.
        bind_vars : dict, optional
            dictonary of bind variables (the default is None)
        batch_size : int, optional
            fetching batch size (the default is 200)
        dont_raise_error_if_empty: bool, optional
            do not raise error if the returned is empty. (the default is False)
        logger : Logger, optional
            logger to log the query and result.
            (the default is None means don't log)
        log_level: Logger.loglevel, optional
            level of the log. (the default is logging.DEBUG)

        Raises
        ------
        AQLFetchError
            When unable to fetch results

        Returns
        -------
        list(any)
            a list returned by query.

        """
        try:
            log = self.__get_logger(logger, log_level)
            if log is not None:
                log(aql_query)
            query = self.AQLQuery(
                aql_query, batchSize=batch_size, rawResults=True,
                bindVars=(bind_vars if bind_vars is not None else {})
            )
            batch_index = 0
            while True:
                if len(query.response['result']) is 0:
                    break
                if log is not None:
                    log(
                        "batch_result for index '%s': %s",
                        batch_index, query.response['result']
                    )
                yield query.response['result']
                batch_index += 1
                query.nextBatch()
        except StopIteration:
            return
        except:
            raise
        if batch_index == 0 and dont_raise_error_if_empty:
            return
        raise AQLFetchError(
            "No results matched for query in fetching the batch index: %s." % (
                batch_index
            )
        ) 
Example 45
Source File: database.py    From pyArango with Apache License 2.0 4 votes vote down vote up
def fetch_list(
            self, aql_query, bind_vars=None, batch_size=200,
            dont_raise_error_if_empty=False, logger=None,
            log_level=logging.DEBUG
    ):
        """Fetch list of elements by running a query and merging all the batches.

        Parameters
        ----------
        aql_query : str
            aql query string.
        bind_vars : dict, optional
            dictonary of bind variables (the default is None)
        batch_size : int, optional
            fetching batch size (the default is 200)
        dont_raise_error_if_empty: bool, optional
            do not raise error if the returned is empty. (the default is False)
        logger : Logger, optional
            logger to log the query and result.
            (the default is None means don't log)
        log_level: Logger.loglevel, optional
            level of the log. (the default is logging.DEBUG)

        Raises
        ------
        AQLFetchError
            When unable to fetch results

        Returns
        -------
        list(any)
            a list returned by query.

        """
        try:
            log = self.__get_logger(logger, log_level)
            if log is not None:
                log(aql_query)
            query = self.AQLQuery(
                aql_query, batchSize=batch_size, rawResults=True,
                bindVars=(bind_vars if bind_vars is not None else {})
            )
            batch_index = 0
            result = []
            while True:
                if len(query.response['result']) is 0:
                    break
                result.extend(query.response['result'])
                batch_index += 1
                query.nextBatch()
        except StopIteration:
            if log is not None:
                log(result)
            if len(result) is not 0:
                return result
        except:
            raise
        if batch_index == 0 and dont_raise_error_if_empty:
            return []
        raise AQLFetchError(
            "No results matched for query in fetching the batch index: %s." % (
                batch_index
            )
        ) 
Example 46
Source File: trackmanager.py    From RaceCapture_App with GNU General Public License v3.0 4 votes vote down vote up
def fetch_venue_list(self, full_response=False):
        """Fetches all venues from RCL's API and returns them as an array of dicts. RCL's API normally returns minimal
        object information when listing multiple objects. The 'full_response' arg tells this function to expand
        all objects to contain all their data. This allows us to quickly get basic information about tracks or pull
        down everything if we have no tracks locally.
        """

        total_venues = None
        next_uri = self.RCP_VENUE_URL + "?start=0&per_page=100"

        if full_response:
            next_uri += '&expand=1'

        venues_list = []

        while next_uri:
            Logger.info('TrackManager: Fetching venue data: {}'.format(next_uri))
            response = self.load_json(next_uri)
            try:
                if total_venues is None:
                    total_venues = int(response.get('total', None))
                    if total_venues is None:
                        raise MissingKeyException('Venue list JSON: could not get total venue count')

                venues = response.get('venues', None)

                if venues is None:
                    raise MissingKeyException('Venue list JSON: could not get venue list')

                venues_list += venues
                next_uri = response.get('nextURI')

            except MissingKeyException as detail:
                Logger.error('TrackManager: Malformed venue JSON from url ' + next_uri + '; json =  ' + str(response) +
                             ' ' + str(detail))

        Logger.info('TrackManager: fetched list of ' + str(len(venues_list)) + ' tracks')

        if not total_venues == len(venues_list):
            Logger.warning('TrackManager: track list count does not reflect downloaded track list size ' + str(total_venues) + '/' + str(len(venues_list)))

        return venues_list 
Example 47
Source File: presetmanager.py    From RaceCapture_App with GNU General Public License v3.0 4 votes vote down vote up
def fetch_preset_list(self, full_response=False):
        """Fetches all presets from RCL's API and returns them as an array of dicts. RCL's API normally returns minimal
        object information when listing multiple objects. The 'full_response' arg tells this function to expand
        all objects to contain all their data. This allows us to quickly get basic information about presets or pull
        down everything if we have no presets locally.
        """

        total_mappings = None
        next_uri = self.PRESET_URL + "?start=0&per_page=100"

        if full_response:
            next_uri += '&expand=1'

        mappings_list = []

        while next_uri:
            Logger.info('PresetManager: Fetching preset data: {}'.format(next_uri))
            response = self.load_json(next_uri)
            try:
                if total_mappings is None:
                    total_mappings = int(response.get('total', None))
                    if total_mappings is None:
                        raise MissingKeyException('Mapping list JSON: could not get total preset count')

                mappings = response.get('mappings', None)

                if mappings is None:
                    raise MissingKeyException('Mapping list JSON: could not get preset list')

                mappings_list += mappings
                next_uri = response.get('nextURI')

            except MissingKeyException as detail:
                Logger.error('PresetManager: Malformed venue JSON from url ' + next_uri + '; json =  ' + str(response) +
                             ' ' + str(detail))

        Logger.info('PresetManager: fetched list of ' + str(len(mappings_list)) + ' presets')

        if not total_mappings == len(mappings_list):
            Logger.warning('PresetManager: mappings list count does not reflect downloaded size ' + str(total_mappings) + '/' + str(len(mappings_list)))

        return mappings_list 
Example 48
Source File: tasks.py    From doufen with MIT License 4 votes vote down vote up
def fetch_like_list(self, url):
        item_list = []
        while True:
            response = self.fetch_url_content(url)
            if not response:
                break
            dom = PyQuery(response.text)
            items = dom('#content .article>.fav-list>li')
            for item in items:
                div = PyQuery(item)
                author_tags = div('.author-tags>.tag-add')
                douban_id = author_tags.attr('data-id')

                if not douban_id:
                    # 喜欢的对象不存在了
                    continue

                tags = author_tags.attr('data-tags')

                lnk_delete = div('.gact.lnk-delete')
                target_type = lnk_delete.attr('data-tkind')
                target_douban_id = lnk_delete.attr('data-tid')
                target_url = lnk_delete.attr('href')
                
                created = div('.status-item .time').text()

                item_list.append({
                    'douban_id': douban_id,
                    'target_type': target_type,
                    'target_douban_id': target_douban_id,
                    'created': created,
                    'tags': tags,
                    'url': target_url,
                    '_extra': div('.status-item .block .content'),
                })
            next_page = dom('#content .article>.paginator>.next>a')
            if next_page:
                url = next_page.attr('href')
            else:
                break
        item_list.reverse()
        return item_list 
Example 49
Source File: passpol.py    From CrackMapExec with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def fetchList(self, rpctransport):
        dce = DCERPC_v5(rpctransport)
        dce.connect()
        dce.bind(samr.MSRPC_UUID_SAMR)

        # Setup Connection
        resp = samr.hSamrConnect2(dce)
        if resp['ErrorCode'] != 0:
            raise Exception('Connect error')

        resp2 = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle=resp['ServerHandle'],
                                                      enumerationContext=0,
                                                      preferedMaximumLength=500)
        if resp2['ErrorCode'] != 0:
            raise Exception('Connect error')

        resp3 = samr.hSamrLookupDomainInSamServer(dce, serverHandle=resp['ServerHandle'],
                                                  name=resp2['Buffer']['Buffer'][0]['Name'])
        if resp3['ErrorCode'] != 0:
            raise Exception('Connect error')

        resp4 = samr.hSamrOpenDomain(dce, serverHandle=resp['ServerHandle'],
                                     desiredAccess=samr.MAXIMUM_ALLOWED,
                                     domainId=resp3['DomainId'])
        if resp4['ErrorCode'] != 0:
            raise Exception('Connect error')

        self.__domains = resp2['Buffer']['Buffer']
        domainHandle = resp4['DomainHandle']
        # End Setup

        re = samr.hSamrQueryInformationDomain2(dce, domainHandle=domainHandle,
                                               domainInformationClass=samr.DOMAIN_INFORMATION_CLASS.DomainPasswordInformation)
        self.__min_pass_len = re['Buffer']['Password']['MinPasswordLength'] or "None"
        self.__pass_hist_len = re['Buffer']['Password']['PasswordHistoryLength'] or "None"
        self.__max_pass_age = convert(int(re['Buffer']['Password']['MaxPasswordAge']['LowPart']), int(re['Buffer']['Password']['MaxPasswordAge']['HighPart']))
        self.__min_pass_age = convert(int(re['Buffer']['Password']['MinPasswordAge']['LowPart']), int(re['Buffer']['Password']['MinPasswordAge']['HighPart']))
        self.__pass_prop = d2b(re['Buffer']['Password']['PasswordProperties'])

        re = samr.hSamrQueryInformationDomain2(dce, domainHandle=domainHandle,
                                               domainInformationClass=samr.DOMAIN_INFORMATION_CLASS.DomainLockoutInformation)
        self.__rst_accnt_lock_counter = convert(0, re['Buffer']['Lockout']['LockoutObservationWindow'], lockout=True)
        self.__lock_accnt_dur = convert(0, re['Buffer']['Lockout']['LockoutDuration'], lockout=True)
        self.__accnt_lock_thres = re['Buffer']['Lockout']['LockoutThreshold'] or "None"

        re = samr.hSamrQueryInformationDomain2(dce, domainHandle=domainHandle,
                                               domainInformationClass=samr.DOMAIN_INFORMATION_CLASS.DomainLogoffInformation)
        self.__force_logoff_time = convert(re['Buffer']['Logoff']['ForceLogoff']['LowPart'], re['Buffer']['Logoff']['ForceLogoff']['HighPart'])

        self.pass_pol = {'min_pass_len': self.__min_pass_len, 'pass_hist_len': self.__pass_hist_len, 
                         'max_pass_age': self.__max_pass_age, 'min_pass_age': self.__min_pass_age, 
                         'pass_prop': self.__pass_prop, 'rst_accnt_lock_counter': self.__rst_accnt_lock_counter,
                         'lock_accnt_dur': self.__lock_accnt_dur, 'accnt_lock_thres': self.__accnt_lock_thres,
                         'force_logoff_time': self.__force_logoff_time} 
Example 50
Source File: getbbgdata.py    From FinanceHub with MIT License 4 votes vote down vote up
def fetch_futures_list(generic_ticker):
        """
        Given a generic ticker for a future contract, it returns all of the historical contracts that composed the
        generic.
        :param generic_ticker: str
        :return: list
        """

        session = blpapi.Session()

        if not session.start():
            raise ConnectionError("Failed to start session.")

        if not session.openService("//blp/refdata"):
            raise ConnectionError("Failed to open //blp/refdat")

        service = session.getService("//blp/refdata")
        request = service.createRequest("ReferenceDataRequest")

        request.append("securities", generic_ticker)
        request.append("fields", "FUT_CHAIN")

        overrides = request.getElement("overrides")
        override1 = overrides.appendElement()
        override1.setElement("fieldId", "INCLUDE_EXPIRED_CONTRACTS")
        override1.setElement("value", "Y")
        override2 = overrides.appendElement()
        override2.setElement("fieldId", "CHAIN_DATE")
        override2.setElement("value", pd.to_datetime('today').date().strftime('%Y%m%d'))

        session.sendRequest(request)

        # process received events
        end_reached = True
        contract_list = []
        while end_reached:

            ev = session.nextEvent()

            if ev.eventType() == blpapi.Event.RESPONSE or ev.eventType() == blpapi.Event.PARTIAL_RESPONSE:

                for msg in ev:

                    elements = msg.getElement("securityData").getValue().getElement("fieldData").getElement("FUT_CHAIN")
                    num_values = elements.numValues()

                    for cont in range(num_values):
                        contract_list.append(elements.getValue(cont).getElement("Security Description").getValue())

            if ev.eventType() == blpapi.Event.RESPONSE:
                end_reached = False
                session.stop()

        return contract_list 
Example 51
Source File: anilist.py    From PlexAniSync with GNU General Public License v3.0 4 votes vote down vote up
def fetch_user_list(username):
    query = """
        query ($username: String) {
        MediaListCollection(userName: $username, type: ANIME) {
            lists {
            name
            status
            isCustomList
            entries {
                id
                progress
                status
                repeat
                media{
                id
                type
                format
                status
                source
                season
                episodes
                startDate {
                    year
                    month
                    day
                }
                endDate {
                    year
                    month
                    day
                }
                title {
                    romaji
                    english
                    native
                }
                }
            }
            }
        }
        }
        """

    variables = {"username": username}

    url = "https://graphql.anilist.co"

    headers = {
        "Authorization": "Bearer " + ANILIST_ACCESS_TOKEN,
        "Accept": "application/json",
        "Content-Type": "application/json",
    }

    response = requests.post(
        url, headers=headers, json={"query": query, "variables": variables}
    )
    return json.loads(response.content, object_hook=to_object) 
Example 52
Source File: puppetdb.py    From ansible-inventory-puppetdb with Apache License 2.0 4 votes vote down vote up
def fetch_host_list(self):
        """
        Returns data for all hosts found in PuppetDB
        """
        groups = collections.defaultdict(dict)
        hostvars = collections.defaultdict(dict)

        groups['all']['hosts'] = list()

        group_by = self.config.get('group_by')
        group_by_tag = self.config.get('group_by_tag')

        for node in self.puppetdb.nodes():
            server = str(node)

            if group_by is not None:
                try:
                    fact_value = node.fact(group_by).value
                    if fact_value not in groups:
                        groups[fact_value]['hosts'] = list()
                    groups[fact_value]['hosts'].append(server)
                except StopIteration:
                    # This fact does not exist on the server
                    if 'unknown' not in groups:
                        groups['unknown']['hosts'] = list()
                    groups['unknown']['hosts'].append(server)

            if group_by_tag:
                for entry in group_by_tag:
                    for resource_type, tag in entry.iteritems():
                        tag_lookup = { resource_type: tag }
                        tagged_hosts = self.fetch_tag_results(tag_lookup)
                        group_key = tag
                        if server in tagged_hosts:
                            if group_key not in groups:
                                groups[group_key]['hosts'] = list()
                            groups[group_key]['hosts'].append(server)

            groups['all']['hosts'].append(server)
            hostvars[server] = self.fetch_host_facts(server)
            groups['_meta'] = {'hostvars': hostvars}

        return json.dumps(groups)