Python logger.log() Examples

The following are 23 code examples of logger.log(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module logger , or try the search function .
Example #1
Source File: gdqn.py    From KG-A2C with MIT License 6 votes vote down vote up
def __init__(self, params):
        configure_logger(params['output_dir'])
        log('Parameters {}'.format(params))
        self.params = params
        self.binding = load_bindings(params['rom_file_path'])
        self.max_word_length = self.binding['max_word_length']
        self.sp = spm.SentencePieceProcessor()
        self.sp.Load(params['spm_file'])
        kg_env = KGA2CEnv(params['rom_file_path'], params['seed'], self.sp,
                          params['tsv_file'], step_limit=params['reset_steps'],
                          stuck_steps=params['stuck_steps'], gat=params['gat'])
        self.vec_env = VecEnv(params['batch_size'], kg_env, params['openie_path'])
        self.template_generator = TemplateActionGenerator(self.binding)
        env = FrotzEnv(params['rom_file_path'])
        self.vocab_act, self.vocab_act_rev = load_vocab(env)
        self.model = KGA2C(params, self.template_generator.templates, self.max_word_length,
                           self.vocab_act, self.vocab_act_rev, len(self.sp), gat=self.params['gat']).cuda()
        self.batch_size = params['batch_size']
        if params['preload_weights']:
            self.model = torch.load(self.params['preload_weights'])['model']
        self.optimizer = optim.Adam(self.model.parameters(), lr=params['lr'])

        self.loss_fn1 = nn.BCELoss()
        self.loss_fn2 = nn.BCEWithLogitsLoss()
        self.loss_fn3 = nn.MSELoss() 
Example #2
Source File: imageSaver.py    From Liked-Saved-Image-Downloader with MIT License 6 votes vote down vote up
def getUrlContentType(url):
    if url:
        openedUrl = None
        try:
            openedUrl = urlopen(url)
        except IOError as e:
            logger.log('[ERROR] getUrlContentType(): IOError: Url {0} raised exception:\n\t{1} {2}'
                .format(url, e.errno, e.strerror))
        except Exception as e:
            logger.log('[ERROR] Exception: Url {0} raised exception:\n\t {1}'
                        .format(url, e))
            logger.log('[ERROR] Url ' + url + 
                ' raised an exception I did not handle. Open an issue at '
                '\n\thttps://github.com/makuto/redditLikedSavedImageDownloader/issues'
                '\n and I will try to fix it')
        else:
            if sys.version_info[0] >= 3:
                return openedUrl.info().get_content_subtype()
            else:
                return openedUrl.info().subtype
    return '' 
Example #3
Source File: imgurDownloader.py    From Liked-Saved-Image-Downloader with MIT License 6 votes vote down vote up
def checkImgurAPICredits(imgurClient):
    logger.log('Imgur API Credit Report:\n'
        + '\tUserRemaining: ' + str(imgurClient.credits['UserRemaining'])
        + '\n\tClientRemaining: ' + str(imgurClient.credits['ClientRemaining']))

    if not imgurClient.credits['UserRemaining']:
        logger.log('You have used up all of your Imgur API credits! Please wait an hour')
        return False

    # Ensure that this user doesn't suck up all the credits (remove this if you're an asshole)
    if imgurClient.credits['ClientRemaining'] < 1000:
        logger.log('RedditLikedSavedImageDownloader Imgur Client is running low on Imgur API credits!\n'
            'Unfortunately, this means no one can download any Imgur albums until the end of the month.\n'
            'If you are really jonesing for access, authorize your own Imgur Client and fill in'
            ' its details in settings.txt.')
        return False

    return True 
Example #4
Source File: imgurDownloader.py    From Liked-Saved-Image-Downloader with MIT License 6 votes vote down vote up
def convertImgurIndirectUrlToImg(submission, imgurAuth, url):
    # Login to imgur
    # This is required since they made NSFW images require login
    imgurClient = imgur.ImgurClient(imgurAuth.clientId, imgurAuth.clientSecret)

    if not checkImgurAPICredits(imgurClient):
        return None

    imageId = imgurIdFromUrl(url)
    if not imageId:
        logger.log("Failed to convert {} to image id".format(url))
        
    try:
        return imgurClient.get_image(imageId).link
    except Exception as e:
        errorMessage = ('Failed to convert imgur to image link: '
                        '[ERROR] Exception: Url {} raised exception:\n\t {}'.format(url, e))
        logger.log(errorMessage)
        LikedSavedDatabase.db.addUnsupportedSubmission(submission, errorMessage)
        return None 
Example #5
Source File: redditUserImageScraper.py    From Liked-Saved-Image-Downloader with MIT License 5 votes vote down vote up
def initialize():
    settings.getSettings()
        
    if not settings.settings['Database']:
        logger.log('Please provide a location for the Database')
        return

    # Do this early so we can use it anywhere
    LikedSavedDatabase.initializeFromSettings(settings.settings) 
Example #6
Source File: videoDownloader.py    From Liked-Saved-Image-Downloader with MIT License 5 votes vote down vote up
def defaultOut(self, msg):
        outputString = "[YoutubeDL] {}".format(msg)
        self.outputList.append(outputString)
        print(outputString)
        logger.log(outputString) 
Example #7
Source File: imageSaver.py    From Liked-Saved-Image-Downloader with MIT License 5 votes vote down vote up
def findSourceForRedGif(url):
    pageSourceLines, pageEncoding = getUrlLines(url)
    videoElement = "<video id=\"video-{}".format(url[url.rfind("/") + 1:])
    logger.log("RedGifs: looking for {}".format(videoElement))

    foundSourcePosition = None
    for line in pageSourceLines:
        lineStr = line
        if sys.version_info[0] >= 3 and pageEncoding:
            # If things are breaking near here you're not reading a .html
            lineStr = line.decode(pageEncoding)

        # State machine; only look for source once we've hit the video we care about
        if foundSourcePosition:
            try:
                sourcePosition = lineStr.lower().find('<source src="'.lower())
                if sourcePosition:
                    # Ignore low quality mobile and webm formats
                    if 'mobile'.lower() not in lineStr.lower() and '.mp4' in lineStr:
                        matches = re.findall(r'src="([^"]*)"', url)
                        return matches[0]
                # Probably not reading a text file; we won't be able to determine the type
            except TypeError:
                logger.log('Unable to guess type for Url "' + url)
                return None
        else:
            try:
                foundSourcePosition = lineStr.lower().find(videoElement.lower())
                # Probably not reading a text file; we won't be able to determine the type
            except TypeError:
                logger.log('Unable to guess type for Url "' + url)
                return None


    return None

# Make sure the filename is alphanumeric or has supported symbols, and is shorter than 45 characters 
Example #8
Source File: imageSaver.py    From Liked-Saved-Image-Downloader with MIT License 5 votes vote down vote up
def gfycatToRedGifsWorkaround(gfyUrl):
    logger.log("Using Gfycat->RedGifs workaround")
    return findSourceFromHTML(gfyUrl, '<source id="mp4source" src=')

# Lazy initialize in case it's not needed 
Example #9
Source File: imgurDownloader.py    From Liked-Saved-Image-Downloader with MIT License 5 votes vote down vote up
def getImgurAuth():
    imgurAuth = None
    if settings.hasImgurSettings():
        return ImgurAuth(settings.settings['Imgur_client_id'], 
                                         settings.settings['Imgur_client_secret'])
    else:
        logger.log('No Imgur Client ID and/or Imgur Client Secret was provided, or album download is not'
                   ' enabled. This is required to download imgur albums. They will be ignored. Check'
                   ' settings.txt for how to fill in these values.')
        return None 
Example #10
Source File: redditUserImageScraper.py    From Liked-Saved-Image-Downloader with MIT License 5 votes vote down vote up
def saveRequestedUrls(pipeConnection, urls):
    if pipeConnection:
        logger.setPipe(pipeConnection)

    initialize()

    logger.log('Attempting to save {} requested urls. This may take several minutes...'
               .format(len(urls)))

    submissions = []
    # Create Submission for each URL
    for url in urls:
        convertedSubmission = submission.Submission()
        convertedSubmission.source = "UserRequested"
        convertedSubmission.title = "UserRequested"
        convertedSubmission.author = "(Requested by user)"
        convertedSubmission.subreddit = "Requested_Downloads"
        convertedSubmission.subredditTitle = "Requested Downloads"
        convertedSubmission.body = "(Requested by user)"
        convertedSubmission.bodyUrl= url
        convertedSubmission.postUrl= url
        submissions.append(convertedSubmission)

    if len(submissions) != len(urls):
        logger.log('Could not parse {} URLs!'.format(len(urls) - len(submissions)))

    unsupportedSubmissions = imageSaver.saveAllImages(settings.settings['Output_dir'], submissions, 
                                                      imgur_auth = imgurDownloader.getImgurAuth(),
                                                      only_download_albums = settings.settings['Only_download_albums'],
                                                      skip_n_percent_submissions = settings.settings['Skip_n_percent_submissions'],
                                                      soft_retrieve_imgs = settings.settings['Should_soft_retrieve'],
                                                      only_important_messages = settings.settings['Only_important_messages'])

    logger.log('Download finished. Output to \'Requested Downloads\' directory')
    
    if pipeConnection:
        logger.log(scriptFinishedSentinel)
        pipeConnection.close() 
Example #11
Source File: run.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def main(args, extra_args, save_path):
    # configure logger, disable logging in child MPI processes (with rank > 0)

    # arg_parser = common_arg_parser()
    # args, unknown_args = arg_parser.parse_known_args()
    # extra_args = parse_cmdline_kwargs(unknown_args)

    if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
        rank = 0
        logger.configure()
    else:
        logger.configure(format_strs=[])
        rank = MPI.COMM_WORLD.Get_rank()

    model, env = train(args, extra_args)
    # env.close()

    # if args.save_path is not None and rank == 0:
    #     save_path = osp.expanduser(args.save_path)
    model.save(save_path)

    if args.play:
        logger.log("Running trained model")
        print()
        # env = build_env(args)
        obs = env.reset()
        def initialize_placeholders(nlstm=128,**kwargs):
            return np.zeros((args.num_env or 1, 2*nlstm)), np.zeros((1))
        state, dones = initialize_placeholders(**extra_args)
        while True:
            actions, _, state, _ = model.step(obs,S=state, M=dones)
            obs, _, done, _ = env.step(actions)
            env.render()
            done = done.any() if isinstance(done, np.ndarray) else done

            if done:
                obs = env.reset()

        env.close() 
Example #12
Source File: CreateDatabase.py    From Liked-Saved-Image-Downloader with MIT License 5 votes vote down vote up
def AddAllFromDirectory(fileDatabase, directory):
    logger.log('Scanning {} to create FileCollectionDatabase'.format(directory))
    numFiles = 0
    for root, dirs, files in os.walk(directory):
        for file in files:
            fileDatabase.addFileNoSave(file, os.path.relpath(os.path.join(root, file), directory))
            numFiles += 1
            
    fileDatabase.save()

    logger.log('Done; {} files in database'.format(numFiles)) 
Example #13
Source File: CreateDatabase.py    From Liked-Saved-Image-Downloader with MIT License 5 votes vote down vote up
def AddAllFromReddit(database, settings):
    if not settings.hasRedditSettings():
        logger.log('Reddit settings are not provided!')
        return

    submissions = []

    logger.log('Adding last 1000 liked/saved submissions from Reddit. This will take a long time.')

    redditSubmissions, redditComments, earlyOutPoints = redditScraper.getRedditUserLikedSavedSubmissions(
        settings.settings['Username'], settings.settings['Password'], 
        settings.settings['Client_id'], settings.settings['Client_secret'],
        request_limit = None, # No limit = request as many as possible (1000)
        saveLiked = settings.settings['Reddit_Save_Liked'], 
        saveSaved = settings.settings['Reddit_Save_Saved'],
        earlyOutPointSaved = None, 
        earlyOutPointLiked = None,
        unlikeLiked = False,
        unsaveSaved = False)

    logger.log('Retrieved submissions, adding to database...')

    for submission in redditSubmissions:
        database.addSubmission(submission)

    for comment in redditComments:
        database.addComment(comment)

    logger.log('Done! Saved {} submissions and {} comments'.format(len(redditSubmissions), len(redditComments))) 
Example #14
Source File: conn_manager.py    From appetite with Apache License 2.0 5 votes vote down vote up
def _log_rc(cmd_output, funct_name, **kvarg):
    """Generic logger that picks correct log type based on return code"""

    rc = cmd_output['rc'] if 'rc' in cmd_output else cmd_output

    logger.log(logger.decide_level(rc),
               funct_name,
               **kvarg
               ) 
Example #15
Source File: fieldnode.py    From acvtool with Apache License 2.0 5 votes vote down vote up
def __parse(self, lines):
        #log("FieldNode: " + line + " parsing")
        self.buf = lines

        i = self.buf[0].find('=')
        segs = []
        if i > 0:
            segs = self.buf[0][:i].split()
            self.value = self.buf[0][i + 1:].strip()
        else:
            segs = self.buf[0].split()
        self.access = segs[1:-1]
        self.name, self.descriptor = segs[-1].split(':')
        log("FieldNode: " + self.name + " parsed!") 
Example #16
Source File: gdqn.py    From KG-A2C with MIT License 5 votes vote down vote up
def configure_logger(log_dir):
    logger.configure(log_dir, format_strs=['log'])
    global tb
    tb = logger.Logger(log_dir, [logger.make_output_format('tensorboard', log_dir),
                                 logger.make_output_format('csv', log_dir),
                                 logger.make_output_format('stdout', log_dir)])
    global log
    log = logger.log 
Example #17
Source File: run.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def main(args, extra_args, save_path):
    # configure logger, disable logging in child MPI processes (with rank > 0)

    # arg_parser = common_arg_parser()
    # args, unknown_args = arg_parser.parse_known_args()
    # extra_args = parse_cmdline_kwargs(unknown_args)

    if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
        rank = 0
        logger.configure()
    else:
        logger.configure(format_strs=[])
        rank = MPI.COMM_WORLD.Get_rank()

    model, env = train(args, extra_args)
    # env.close()

    # if args.save_path is not None and rank == 0:
    #     save_path = osp.expanduser(args.save_path)
    model.save(save_path)

    if args.play:
        logger.log("Running trained model")
        print()
        # env = build_env(args)
        obs = env.reset()
        def initialize_placeholders(nlstm=128,**kwargs):
            return np.zeros((args.num_env or 1, 2*nlstm)), np.zeros((1))
        state, dones = initialize_placeholders(**extra_args)
        while True:
            actions, _, state, _ = model.step(obs,S=state, M=dones)
            obs, _, done, _ = env.step(actions)
            env.render()
            done = done.any() if isinstance(done, np.ndarray) else done

            if done:
                obs = env.reset()

        # env.close() 
Example #18
Source File: run.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def main(args, extra_args, save_path):
    # configure logger, disable logging in child MPI processes (with rank > 0)

    # arg_parser = common_arg_parser()
    # args, unknown_args = arg_parser.parse_known_args()
    # extra_args = parse_cmdline_kwargs(unknown_args)

    if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
        rank = 0
        logger.configure()
    else:
        logger.configure(format_strs=[])
        rank = MPI.COMM_WORLD.Get_rank()

    model, env = train(args, extra_args)
    # env.close()

    # if args.save_path is not None and rank == 0:
    #     save_path = osp.expanduser(args.save_path)
    model.save(save_path)

    if args.play:
        logger.log("Running trained model")
        print()
        # env = build_env(args)
        obs = env.reset()
        def initialize_placeholders(nlstm=128,**kwargs):
            return np.zeros((args.num_env or 1, 2*nlstm)), np.zeros((1))
        state, dones = initialize_placeholders(**extra_args)
        while True:
            actions, _, state, _ = model.step(obs,S=state, M=dones)
            obs, _, done, _ = env.step(actions)
            env.render()
            done = done.any() if isinstance(done, np.ndarray) else done

            if done:
                obs = env.reset()

        env.close() 
Example #19
Source File: redditScraper.py    From Liked-Saved-Image-Downloader with MIT License 4 votes vote down vote up
def getRedditUserLikedSavedSubmissions(user_name, user_password, client_id, client_secret,
                                       request_limit = 10, saveLiked = True, saveSaved = True,
                                       earlyOutPointSaved = None, earlyOutPointLiked = None,
                                       unlikeLiked = False, unsaveSaved = False):
    r = praw.Reddit(client_id = client_id,
        client_secret=client_secret,
        username=user_name,
        password=user_password,
        user_agent=user_agent)

    logger.log('\n\nCommunicating with reddit. This should only take a minute...\n')

    savedLinks = None 
    if saveSaved: 
        logger.log('\tGetting saved links...') 
        savedLinks = r.user.me().saved(limit=request_limit) 
        savedLinks = list(savedLinks) 
 
    likedLinks = None 
    if saveLiked: 
        logger.log('\tGetting liked links...') 
        likedLinks = r.user.me().upvoted(limit=request_limit) 
        likedLinks = list(likedLinks)

    savedSubmissions = []
    savedComments = []
    if saveSaved: 
        logger.log('\n\nRetrieving your saved submissions. This can take several minutes...\n') 
        savedSubmissions, savedComments = getSubmissionsFromRedditList(savedLinks, 'saved',
                                                                       earlyOutPointSaved, unsaveSaved, user_name)
 
    likedSubmissions = []
    likedComments = []
    if saveLiked: 
        logger.log('\n\nRetrieving your liked submissions. This can take several minutes...\n') 
        likedSubmissions, likedComments = getSubmissionsFromRedditList(likedLinks, 'liked',
                                                                       earlyOutPointLiked, unlikeLiked, user_name)
 
    submissions = savedSubmissions + likedSubmissions
    # I don't think you can ever have liked comments, but I'm including it anyways
    comments = savedComments + likedComments

    newEarlyOutSaved = savedSubmissions[0] if len(savedSubmissions) else None
    newEarlyOutLiked = likedSubmissions[0] if len(likedSubmissions) else None
    return submissions, comments, (newEarlyOutSaved, newEarlyOutLiked) 
Example #20
Source File: redditUserImageScraper.py    From Liked-Saved-Image-Downloader with MIT License 4 votes vote down vote up
def runLikedSavedDownloader(pipeConnection):
    if pipeConnection:
        logger.setPipe(pipeConnection)
        
    initialize()

    if (not settings.settings['Use_cached_submissions'] 
        and not settings.hasTumblrSettings() and not settings.hasRedditSettings()):
        logger.log('Please provide Tumblr or Reddit account details in settings.txt'
                   ' or via the Settings page provided by  LikedSavedDownloader server')
        return
            
    if not settings.settings['Gfycat_Client_id']:
        logger.log('No Gfycat Client ID and/or Gfycat Client Secret was provided. '
                   'This is required to download Gfycat media reliably.')

    logger.log('Output: ' + settings.settings['Output_dir'])
    utilities.makeDirIfNonexistant(settings.settings['Output_dir'])
    utilities.makeDirIfNonexistant(settings.settings['Metadata_output_dir'])
        
    submissions = getSubmissionsToSave()

    logger.log('Saving images. This will take several minutes...')
    unsupportedSubmissions = imageSaver.saveAllImages(settings.settings['Output_dir'], submissions, 
                                                      imgur_auth = imgurDownloader.getImgurAuth(),
                                                      only_download_albums = settings.settings['Only_download_albums'],
                                                      skip_n_percent_submissions = settings.settings['Skip_n_percent_submissions'],
                                                      soft_retrieve_imgs = settings.settings['Should_soft_retrieve'],
                                                      only_important_messages = settings.settings['Only_important_messages'])

    # Write out a .json file listing all of the submissions the script failed to download
    if unsupportedSubmissions:
        submission.saveSubmissionsAsJson(unsupportedSubmissions, settings.settings['Metadata_output_dir'] + u'/' 
                                         + 'UnsupportedSubmissions_' + time.strftime("%Y%m%d-%H%M%S") + '.json') 

    if settings.settings['Should_soft_retrieve']:
        logger.log('\nYou have run the script in Soft Retrieve mode - if you actually\n'
                   'want to download images now, you should change SHOULD_SOFT_RETRIEVE\n'
                   'to False in settings.txt')

    if pipeConnection:
        logger.log(scriptFinishedSentinel)
        pipeConnection.close() 
Example #21
Source File: conn_manager.py    From appetite with Apache License 2.0 4 votes vote down vote up
def run_single(self, command, ssh=None):
        """Runs a single cmd command on the remote host
        """

        if not ssh:
            if not self.create_ssh_channel():
                return {"rc": 1,
                        "stderror": "Error creating ssh channel",
                        "stdout": "",
                        "function": self.function_name}
            ssh = self.ssh

        reads = None
        cmd = command
        if isinstance(command, dict):
            cmd = command['cmd']
            reads = command['reads']

        rc = 0
        std_out = ""
        std_error = ""

        if not CREDS.DRY_RUN:
            # Dangerous, only use if commands are filtered/protected
            # Only commands either defined here or in the command.conf should
            # run here.
            if reads:
                # Only use invoke shell if needed
                channel = ssh.invoke_shell()  # nosec

                channel.settimeout(SESSION_SHELL_TIMEOUT)

                # Remove any ssh login messages
                send_command(channel, "")

                read_commands = []
                for param, value in reads.items():
                    read_commands.append("read -s %s" % param)
                    read_commands.append(value)

                    # Don't want to log any read commands
                    send_command(channel, read_commands)

                std_out, std_error, rc = send_command(channel, self._add_root(cmd))
            else:
                stdin, stdout, stderr = ssh.exec_command(self._add_root(cmd), get_pty=True, timeout=SESSION_TIMEOUT)  # nosec
                rc = stdout.channel.recv_exit_status()

                std_out = stdout.read()
                std_error = stderr.read()
                stdin.flush()

        return {"stdout": std_out,
                "stderror": std_error,
                "function": self.function_name,
                "rc": rc}


# Helper ssh function 
Example #22
Source File: imageSaver.py    From Liked-Saved-Image-Downloader with MIT License 4 votes vote down vote up
def findSourceFromHTML(url, sourceKey, sourceKeyAttribute=''):
    SANE_NUM_LINES = 30

    pageSourceLines, pageEncoding = getUrlLines(url)

    if not pageSourceLines:
        return None

    # If a page has fewer than this number of lines, there is something wrong.
    # This is a somewhat arbitrary heuristic
    if len(pageSourceLines) <= SANE_NUM_LINES:
        logger.log('Url "' + url + '" has a suspicious number of lines (' + str(len(pageSourceLines)) + ')')

    for line in pageSourceLines:
        lineStr = line
        if sys.version_info[0] >= 3 and pageEncoding:
            # If things are breaking near here you're not reading a .html
            lineStr = line.decode(pageEncoding)

        try:
            foundSourcePosition = lineStr.lower().find(sourceKey.lower())
        # Probably not reading a text file; we won't be able to determine the type
        except TypeError:
            logger.log('Unable to guess type for Url "' + url)
            return ''

        if foundSourcePosition > -1:
            urlStartPosition = -1
            if sourceKeyAttribute:
                attributePosition = lineStr[foundSourcePosition:].lower().find(sourceKeyAttribute.lower())
                # Find the first character of the URL specified by the attribute (add 1 for the ")
                urlStartPosition = foundSourcePosition + attributePosition + len(sourceKeyAttribute) + 1
            else:
                # Find the first character of the URL (add 1 for the ")
                urlStartPosition = foundSourcePosition + len(sourceKey) + 1

            # From the start of the url, search for the next '"' which is the end of the src link
            urlEndPosition = lineStr[urlStartPosition:].find('"')

            if urlEndPosition > -1:
                sourceUrl = lineStr[urlStartPosition:urlStartPosition + urlEndPosition]

                return sourceUrl

    return '' 
Example #23
Source File: imageSaver.py    From Liked-Saved-Image-Downloader with MIT License 4 votes vote down vote up
def convertGfycatUrlToWebM(submission, url):
    global gfycatClient
    # Change this:
    #   https://gfycat.com/IndolentScalyIncatern
    #   https://gfycat.com/IndolentScalyIncatern/
    # Into this:
    #   https://zippy.gfycat.com/IndolentScalyIncatern.webm
    # Or maybe this:
    #   https://giant.gfycat.com/IndolentScalyIncatern.webm

    # Lazy initialize client
    if not gfycatClient and settings.settings['Gfycat_Client_id']:
        gfycatClient = GfycatClient(settings.settings['Gfycat_Client_id'], settings.settings['Gfycat_Client_secret'])

    # Still don't have a client?
    if not gfycatClient:
        logger.log("Warning: no Gfycat client; gifs will likely fail to download")
        newUrl = gfycatToRedGifsWorkaround(url)
        if newUrl:
            return newUrl
        # Hacky solution while Gfycat API isn't set up. This breaks if case is wrong
        return "https://giant.gfycat.com/{}.webm".format(url[url.rfind("/") + 1:])
    else:
        # Get the gfyname from the url
        matches = re.findall(r'gfycat\.com.*/([a-zA-Z]+)', url)
        if not matches:
            errorMessage = "Gfycat URL {} doesn't seem to match expected URL format".format(url)
            logger.log(errorMessage)
            LikedSavedDatabase.db.addUnsupportedSubmission(submission, errorMessage)
        else:
            try:
                gfycatUrlInfo = gfycatClient.query_gfy(matches[0])
            except Exception as e:
                errorMessage = '[ERROR] Exception: Url {0} raised exception:\n\t {1}'.format(url, e)
                logger.log(errorMessage)
                logger.log("Gfycat client was used to make this query")
                # Gfycat sucks. They created RedGifs, but broke Gfycat API by making it not actually
                # support that transition, and you can't get a RedGifs API token unless you email
                # them for one. Great engineering, folks
                newUrl = gfycatToRedGifsWorkaround(url)
                if newUrl:
                    return newUrl
                LikedSavedDatabase.db.addUnsupportedSubmission(submission, errorMessage)
                return None
            return gfycatUrlInfo['gfyItem']['mp4Url']