Python wikipedia.summary() Examples

The following are 17 code examples of wikipedia.summary(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module wikipedia , or try the search function .
Example #1
Source File: dataprovider.py    From bot with MIT License 6 votes vote down vote up
def get_short_answer(self, query):
		logging.info("searching in wolfram: {}".format(query))

		try:
			wolfram_res = self.wolfram_client.query(query)
			logging.info("wolfram res: {}".format(wolfram_res))

			return next(wolfram_res.results).text
		except:
			# use wikipedia as failover
			wikiepedia_res = wikipedia.summary(query, sentences=1)
			logging.info("wikipedia res: {}".format(wikiepedia_res))
			if wikiepedia_res:
				return wikiepedia_res

			return self.NOT_FOUND_MSG 
Example #2
Source File: utility.py    From RemixBot with MIT License 6 votes vote down vote up
def wiki(self, ctx, *, query):
        '''Search up something on wikipedia'''
        em = discord.Embed(title=str(query))
        em.set_footer(text='Powered by wikipedia.org')
        try:
            result = wikipedia.summary(query)
            if len(result) > 2000:
                em.color = discord.Color.red()
                em.description = f"Result is too long. View the website [here](https://wikipedia.org/wiki/{query.replace(' ', '_')}), or just google the subject."
                return await ctx.send(embed=em)
            em.color = discord.Color.green()
            em.description = result
            await ctx.send(embed=em)
        except wikipedia.exceptions.DisambiguationError as e:
            em.color = discord.Color.red()
            options = '\n'.join(e.options)
            em.description = f"**Options:**\n\n{options}"
            await ctx.send(embed=em)
        except wikipedia.exceptions.PageError:
            em.color = discord.Color.red()
            em.description = 'Error: Page not found.'
            await ctx.send(embed=em) 
Example #3
Source File: app.py    From Utlyz-CLI with Apache License 2.0 6 votes vote down vote up
def wiki(bot, update, args):
	try:
		topic = ""
		for arg in args:
			topic += arg + " "
		summary = wikipedia.summary(topic, sentences = 30)
		page = wikipedia.page(topic)
		extra = "\nFor more details visit " + page.url
		summary += extra
		bot.sendChatAction(chat_id = update.message.chat_id, action = ChatAction.TYPING)
		bot.sendMessage(chat_id = update.message.chat_id, parse_mode=ParseMode.HTML, text = summary)

	except wikipedia.exceptions.DisambiguationError as e:
		error = "Please be more specific with your search query as there are a couple of other options meaning the same."
		for options in e.options:
			error += options.decode("utf-8","ignore")+'\n'
		bot.sendChatAction(chat_id = update.message.chat_id, action = ChatAction.TYPING)
		bot.sendMessage(chat_id = update.message.chat_id, text = error)

	except wikipedia.exceptions.PageError:
		error = "No messages could be found with the topic you entered!"
		bot.sendChatAction(chat_id = update.message.chat_id, action = ChatAction.TYPING)
		bot.sendMessage(chat_id = update.message.chat_id, text = error) 
Example #4
Source File: wiki.py    From Jarvis with MIT License 6 votes vote down vote up
def __call__(self, jarvis, s):
        k = s.split(' ', 1)
        if len(k) == 1:
            jarvis.say(
                "Do you mean:\n1. wiki search <subject>\n2. wiki summary <subject>\n3. wiki content <subject>")
        else:
            data = None
            if k[0] == "search":
                data = self.search(" ".join(k[1:]))
            elif k[0] == "summary":
                data = self.summary(" ".join(k[1:]))
            elif k[0] == "content":
                data = self.content(" ".join(k[1:]))
            else:
                jarvis.say("I don't know what you mean")
                return

            if isinstance(data, list):
                print("\nDid you mean one of these pages?\n")
                for d in range(len(data)):
                    print(str(d + 1) + ": " + data[d])
            else:
                print("\n" + data) 
Example #5
Source File: app.py    From Utlyz-CLI with Apache License 2.0 5 votes vote down vote up
def help(bot, update):
	bot.sendChatAction(chat_id = update.message.chat_id, action = ChatAction.TYPING)
	bot.sendMessage(chat_id = update.message.chat_id, text = '''
		The following are the avaiable commands with me!\n
		/news				To get news bulletins
		/lyrics <name_of_song>		To get lyrics of songs
		/wiki <topic>			To get wikipedia summary on a given topic
		/fb <username> <password>	To get certain facebook updates
	''') 
Example #6
Source File: whois.py    From sarah with GNU General Public License v2.0 5 votes vote down vote up
def do_activate(self, args, argv):
        print(wikipedia.summary(' '.join(args), sentences=2)) 
Example #7
Source File: wiki.py    From sarah with GNU General Public License v2.0 5 votes vote down vote up
def do_activate(self, args, argv):
        wikipedia.set_lang("de")
        print(wikipedia.summary(' '.join(args), sentences=2)) 
Example #8
Source File: media_aggregator.py    From delbot with GNU Affero General Public License v3.0 5 votes vote down vote up
def get_gkg(query):
    try:
        s = _wk.summary(query, sentences = 5)
        for x in _findall("\(.*\)", s):
            s = s.replace(x, "")
        return s
    except _wk.DisambiguationError, e:
        return False 
Example #9
Source File: media_aggregator.py    From delbot with GNU Affero General Public License v3.0 5 votes vote down vote up
def shorten_news(url, n = 5):
    from bs4 import BeautifulSoup as bs
    from summarizer import FrequencySummarizer as fs
    response = _req.get(url)
    if not response.ok:
        return False
    page = response.content
    soup = bs(page, "lxml")
    summary = fs().summarize("\n".join([x.text for x in soup.findAll("p") if len(x.text.split()) > 1]), n)
    summary.insert(0, soup.title.text)
    return ' '.join(summary) 
Example #10
Source File: define_subject.py    From Melissa-Core with MIT License 5 votes vote down vote up
def define_subject(speech_text):
    words_of_message = speech_text.split()
    words_of_message.remove('define')
    cleaned_message = ' '.join(words_of_message).rstrip()
    if len(cleaned_message) == 0:
        msg = 'define requires subject words'
        print msg
        tts(msg)
        return

    try:
        wiki_data = wikipedia.summary(cleaned_message, sentences=5)

        regEx = re.compile(r'([^\(]*)\([^\)]*\) *(.*)')
        m = regEx.match(wiki_data)
        while m:
            wiki_data = m.group(1) + m.group(2)
            m = regEx.match(wiki_data)

        wiki_data = wiki_data.replace("'", "")
        tts(wiki_data)
    except wikipedia.exceptions.DisambiguationError as e:
        tts('Can you please be more specific? You may choose something' +
            'from the following.')
        print("Can you please be more specific? You may choose something" +
              "from the following; {0}".format(e)) 
Example #11
Source File: views.py    From Microsoft-chatbot with MIT License 5 votes vote down vote up
def who_is(query, session_id="general"):
    try:
        return wikipedia.summary(query)
    except requests.exceptions.SSLError:
        return "Sorry I could not search online due to SSL error"
    except:
        pass
    for new_query in wikipedia.search(query):
        try:
            return wikipedia.summary(new_query)
        except:
            pass
    return "Sorry I could not find any data related to '%s'" % query 
Example #12
Source File: wikipedia.py    From BotHub with Apache License 2.0 5 votes vote down vote up
def _(event):
    if event.fwd_from:
        return
    await event.edit("Processing ...")
    input_str = event.pattern_match.group(1)
    result = wikipedia.summary(input_str)
    await event.edit("**Search**: {} \n\n **Result**: \n\n {}".format(input_str, result)) 
Example #13
Source File: searchrobot.py    From youtube-video-maker with GNU General Public License v3.0 5 votes vote down vote up
def search(self, search_term):
        summary = wiki.summary(search_term, sentences = 7)
        summary = re.sub(r"\([^)]*\)", "", summary)

        return tokenize.sent_tokenize(summary) 
Example #14
Source File: label_image.py    From Celestial-bodies-detection with GNU General Public License v3.0 5 votes vote down vote up
def wiki(celestial_object):
    ans = celestial_object
    cwd = os.getcwd()
    with open(os.path.join(cwd, 'display_info.yml'), 'r') as stream:
        all_display_statistics = load(stream, Loader=SafeLoader)

    req_statistics = all_display_statistics.get(ans, {})

    if ans in ["spiral", "elliptical"]:
        print("--------------------------------------------------------")
        print("Classified Celestial Object is {} Galaxy : ".format(ans.capitalize()))
        print("-------------------------------------------------------- \n")
        # print(wikipedia.summary("Spiral Galaxy", sentences=2))
        print(wikipedia.WikipediaPage(title='{} galaxy'.format(ans)).summary)
    elif ans in ['mercury', 'venus', 'earth', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune']:
        print("--------------------------------------------------------")
        print("Classified Celestial Object is {} Planet : ".format(ans.capitalize()))
        print("-------------------------------------------------------- \n")
        statistics = "\n".join(['-- {}: {}'.format(parameter, value) for parameter, value in req_statistics.items()])
        print("{}\n\n".format(statistics))
        # print(wikipedia.summary("Mercury (planet)", sentences=2))
        print(wikipedia.WikipediaPage(title='{} (planet)'.format(ans)).summary)
    elif ans == 'moon':
        print("--------------------------------------------------------")
        print("Classified Celestial Object is the {} : ".format(ans.capitalize()))
        print("-------------------------------------------------------- \n")
        statistics = "\n".join(['-- {}: {}'.format(parameter, value) for parameter, value in req_statistics.items()])
        print("{}\n\n".format(statistics))
        print(wikipedia.WikipediaPage(title='{}'.format(ans)).summary)
    return " " 
Example #15
Source File: temporal_lobe.py    From rpi_ai with MIT License 5 votes vote down vote up
def wikipediaLookUp(a_string,num_sentences):
	print a_string
	pattern = re.compile('([^\s\w]|_)+')
	b_string = re.sub(pattern, '', a_string)
	phrase=b_string
	print phrase
	pattern = re.compile("\\b(lot|lots|a|an|who|can|you|what|is|info|somethings|whats|have|i|something|to|know|like|Id|information|about|tell|me)\\W", re.I)
	phrase_noise_removed = [pattern.sub("", phrase)] 
	print phrase_noise_removed[0]
	a = wikipedia.search(phrase_noise_removed[0])
	print a[0]
	the_summary = (wikipedia.summary(a[0], sentences=num_sentences))
	print the_summary
	return the_summary 
Example #16
Source File: wiki.py    From Jarvis with MIT License 5 votes vote down vote up
def summary(self, query, sentences=0, chars=0):
        """Returns a plain text summary from the query's page."""
        try:
            return wikipedia.summary(query, sentences, chars)
        except wikipedia.exceptions.PageError:
            return "No page matches, try another item."
        except wikipedia.exceptions.DisambiguationError as error:
            return error.options[:5] 
Example #17
Source File: search.py    From W.I.L.L with MIT License 4 votes vote down vote up
def search_google(query):
    '''Search google and determine if wikipedia is in it'''
    search_object = google.search(query)
    #Determine if a wikipedia url is in the first 5 searches
    urls = []
    for i in range(0, 4):
        url = search_object.__next__()
        urls.append(url)
        if "wikipedia.org/wiki" in url:
            wikipedia_search = wikipedia.search(query)[0]
            url = wikipedia.page(wikipedia_search).url
            response = wikipedia.summary(wikipedia_search) + " ({0})".format(url)
            return response
    #If there were no wikipedia pages
    first_url = urls[0]
    try:
        article = Article(first_url)
        article.download()
        article.parse()
        article.nlp()
        article_summary = article.summary
        article_title = article.title
        return "{0}\n{1} - ({2})".format(
            article_summary, article_title, first_url
        )

    except Exception as article_exception:
        try:
            log.debug("Got error {0}, {1} while using newspaper, switching to bs4".format(
            article_exception.message,article_exception.args
            ))
            html = requests.get(first_url).text
            #Parse the html using bs4
            soup = BeautifulSoup(html, "html.parser")
            [s.extract() for s in soup(['style', 'script', '[document]', 'head', 'title'])]
            text = soup.getText()
         # break into lines and remove leading and trailing space on each
            lines = (line.strip() for line in text.splitlines())
            # break multi-headlines into a line each
            chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
            # drop blank lines
            soup_text = '\n'.join(chunk for chunk in chunks if " " in chunk)
            response = format(soup_text) + " ({0})".format(first_url)
            return response
        except Exception as search_exception:
            log.info("Error {0},{1} occurred while searching query {2}".format(
                search_exception.message, search_exception.args, query
            ))
            return "Error encountered on query {0}".format(query)