Python utils.make_dir() Examples

The following are 13 code examples of utils.make_dir(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils , or try the search function .
Example #1
Source File: jd_item_info.py    From jd_analysis with GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(self, name = None, **kwargs):
        super(JDItemInfoSpider, self).__init__(name, **kwargs)
        self.url = kwargs.get("url")
        self.guid = kwargs.get('guid', 'guid')
        self.product_id = kwargs.get('product_id')
        # self.url = 'https://item.jd.com/11478178241.html'
        # self.url = 'https://item.jd.com/4142680.html'
        # self.url = 'https://item.jd.com/3133859.html'
        # self.url = 'https://item.jd.com/3995645.html'
        # self.product_id = 3995645
        self.log('product_id:%s' % self.product_id)
        self.item_table = 'item_%s' % self.product_id
        self.urls_key = '%s_urls' % self.product_id

        self.log_dir = 'log/%s' % self.product_id
        self.is_record_page = False

        self.sql = kwargs.get('sql')
        self.red = kwargs.get('red')

        if self.is_record_page:
            utils.make_dir(self.log_dir) 
Example #2
Source File: jd_comment.py    From jd_analysis with GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(self, name = None, **kwargs):
        super(JDCommentSpider, self).__init__(name, **kwargs)
        self.url = kwargs.get("url")
        self.guid = kwargs.get('guid', 'guid')
        self.product_id = kwargs.get('product_id')
        # self.url = 'https://item.jd.com/11478178241.html'
        # self.url = 'https://item.jd.com/4142680.html'
        # self.url = 'https://item.jd.com/3133859.html'
        # self.url = 'https://item.jd.com/3995645.html'
        # self.product_id = 3995645
        self.log('product_id:%s' % self.product_id)
        self.item_table = 'item_%s' % self.product_id
        self.urls_key = '%s_urls' % self.product_id

        self.log_dir = 'log/%s' % self.product_id
        self.is_record_page = False

        self.sql = kwargs.get('sql')
        self.red = kwargs.get('red')
        proxymng.red = self.red
        
        if self.is_record_page:
            utils.make_dir(self.log_dir)

        self.init() 
Example #3
Source File: models.py    From deep-q-learning with MIT License 6 votes vote down vote up
def load(self):
        '''
        Loads replay memory (attributes and arrays) into self, if possible.
        '''
        # Create out dir
        utils.make_dir(self.data_dir)
        try:
            print('Loading Memory data into Replay Memory Instance...')
            # Load property list
            d = json.load(open('{}/properties.json'.format(self.data_dir)))
            # Load numpy arrays
            self._load_arrays(self.data_dir, d['saved_size'])

            print('Finished loading Memory data into Replay Memory Instance!')

        except IOError as e:
            self.__init__(self.memory_size,
                          data_dir=self.data_dir, load_existing=False)
            print("I/O error({0}): {1}".format(e.errno, e.strerror))
            print("Couldn't find initial values for Replay Memory, instance init as new.") 
Example #4
Source File: validator.py    From IPProxyTool with MIT License 5 votes vote down vote up
def init(self):
        self.dir_log = 'log/validator/%s' % self.name
        utils.make_dir(self.dir_log)

        self.sql.init_proxy_table(self.name) 
Example #5
Source File: basespider.py    From IPProxyTool with MIT License 5 votes vote down vote up
def init(self):
        self.meta = {
            'download_timeout': self.timeout,
        }

        self.dir_log = 'log/proxy/%s' % self.name
        utils.make_dir(self.dir_log)
        self.sql.init_proxy_table(config.free_ipproxy_table) 
Example #6
Source File: models.py    From deep-q-learning with MIT License 5 votes vote down vote up
def save(self, size):
        '''
        Saves replay memory (attributes and arrays).
        '''
        # Create out dir
        utils.make_dir(self.data_dir)
        print('Saving Memory data into Replay Memory Instance...')
        # Save property dict
        with open('{}/properties.json'.format(self.data_dir), 'w') as f:
            json.dump(self.to_dict(size), f)
        # Save arrays
        self._save_arrays(self.data_dir, size) 
Example #7
Source File: meg_statistics.py    From mmvt with GNU General Public License v3.0 5 votes vote down vote up
def plot_perm_ttest_results(events_id, inverse_method='dSPM', plot_type='scatter_plot'):
    print('plot_perm_ttest_results')
    all_data = defaultdict(dict)
    fsave_vertices = [np.arange(10242), np.arange(10242)]
    fs_pts = mne.vertex_to_mni(fsave_vertices, [0, 1], 'fsaverage', LOCAL_SUBJECTS_DIR) # 0 for lh
    for cond_id, cond_name, patient, hc, data in patients_hcs_conds_gen(events_id, True, inverse_method):
        all_data[patient][hc] = data[()]
    print(all_data.keys())
    for patient, pat_data in all_data.iteritems():
        print(patient)
        fol = op.join(LOCAL_ROOT_DIR, 'permutation_ttest_results', patient)
        utils.make_dir(fol)
        if op.isfile(op.join(fol, 'perm_ttest_points.npz')):
            d = np.load(op.join(fol, 'perm_ttest_points.npz'))
            if plot_type == 'scatter_plot':
                points, values = d['points'][()], d['values'][()]
            elif plot_type == 'pysurfer':
                vertices, vertives_values = d['vertices'][()], d['vertives_values'][()]
        else:
            points, values, vertices, vertives_values = calc_points(pat_data, fs_pts)
            np.savez(op.join(fol, 'perm_ttest_points'), points=points, values=values, vertices=vertices, vertives_values=vertives_values)
        max_vals = 8 # int(np.percentile([max(v) for v in values.values()], 70))
        print(max_vals)
        fol = op.join(fol, '{}_figures'.format(plot_type))
        utils.make_dir(fol)
        if plot_type == 'scatter_plot':
            scatter_plot_perm_ttest_results(points, values, fs_pts, max_vals, fol)
        elif plot_type == 'pysurfer':
            pysurfer_plot_perm_ttest_results(vertices, vertives_values, max_vals, fol) 
Example #8
Source File: meg_statistics.py    From mmvt with GNU General Public License v3.0 5 votes vote down vote up
def create_local_folds():
    for fol in ['eop', 'evo', 'inv', 'stc', 'stc_epochs', 'stc_morphed',
                'stc_epochs_morphed', 'stc_clusters', 'clusters_results',
                'permutation_ttest_results', 'results_for_blender']:
        utils.make_dir(op.join(LOCAL_ROOT_DIR, fol)) 
Example #9
Source File: 11_char_rnn_gist.py    From stanford-tensorflow-tutorials with MIT License 5 votes vote down vote up
def main():
    vocab = (
            " $%'()+,-./0123456789:;=?ABCDEFGHIJKLMNOPQRSTUVWXYZ"
            "\\^_abcdefghijklmnopqrstuvwxyz{|}")
    seq = tf.placeholder(tf.int32, [None, None])
    temp = tf.placeholder(tf.float32)
    loss, sample, in_state, out_state = create_model(seq, temp, vocab)
    global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
    optimizer = tf.train.AdamOptimizer(LR).minimize(loss, global_step=global_step)
    utils.make_dir('checkpoints')
    utils.make_dir('checkpoints/arvix')
    training(vocab, seq, loss, optimizer, global_step, temp, sample, in_state, out_state) 
Example #10
Source File: process_data.py    From stanford-tensorflow-tutorials with MIT License 5 votes vote down vote up
def build_vocab(words, vocab_size):
    """ Build vocabulary of VOCAB_SIZE most frequent words """
    dictionary = dict()
    count = [('UNK', -1)]
    count.extend(Counter(words).most_common(vocab_size - 1))
    index = 0
    utils.make_dir('processed')
    with open('processed/vocab_1000.tsv', "w") as f:
        for word, _ in count:
            dictionary[word] = index
            if index < 1000:
                f.write(word + "\n")
            index += 1
    index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return dictionary, index_dictionary 
Example #11
Source File: style_transfer.py    From stanford-tensorflow-tutorials with MIT License 5 votes vote down vote up
def main():
    with tf.variable_scope('input') as scope:
        # use variable instead of placeholder because we're training the intial image to make it
        # look like both the content image and the style image
        input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
    
    utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
    utils.make_dir('checkpoints')
    utils.make_dir('outputs')
    model = vgg_model.load_vgg(VGG_MODEL, input_image)
    model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
    
    content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    content_image = content_image - MEAN_PIXELS
    style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    style_image = style_image - MEAN_PIXELS

    model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model, 
                                                    input_image, content_image, style_image)
    ###############################
    ## TO DO: create optimizer
    ## model['optimizer'] = ...
    ###############################
    model['summary_op'] = _create_summary(model)

    initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
    train(model, input_image, initial_image) 
Example #12
Source File: style_transfer.py    From stanford-tensorflow-tutorials with MIT License 5 votes vote down vote up
def main():
    with tf.variable_scope('input') as scope:
        # use variable instead of placeholder because we're training the intial image to make it
        # look like both the content image and the style image
        input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
    
    utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
    utils.make_dir('checkpoints')
    utils.make_dir('outputs')
    model = vgg_model.load_vgg(VGG_MODEL, input_image)
    model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')

    content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    content_image = content_image - MEAN_PIXELS
    style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    style_image = style_image - MEAN_PIXELS

    model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model, 
                                                    input_image, content_image, style_image)
    ###############################
    ## TO DO: create optimizer
    model['optimizer'] = tf.train.AdamOptimizer(LR).minimize(model['total_loss'], 
                                                            global_step=model['global_step'])
    ###############################
    model['summary_op'] = _create_summary(model)

    initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
    train(model, input_image, initial_image) 
Example #13
Source File: 04_word2vec_visualize.py    From stanford-tensorflow-tutorials with MIT License 4 votes vote down vote up
def train_model(model, batch_gen, num_train_steps, weights_fld):
    saver = tf.train.Saver() # defaults to saving all variables - in this case embed_matrix, nce_weight, nce_bias

    initial_step = 0
    utils.make_dir('checkpoints')
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))
        # if that checkpoint exists, restore from checkpoint
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        total_loss = 0.0 # we use this to calculate late average loss in the last SKIP_STEP steps
        writer = tf.summary.FileWriter('improved_graph/lr' + str(LEARNING_RATE), sess.graph)
        initial_step = model.global_step.eval()
        for index in range(initial_step, initial_step + num_train_steps):
            centers, targets = next(batch_gen)
            feed_dict={model.center_words: centers, model.target_words: targets}
            loss_batch, _, summary = sess.run([model.loss, model.optimizer, model.summary_op], 
                                              feed_dict=feed_dict)
            writer.add_summary(summary, global_step=index)
            total_loss += loss_batch
            if (index + 1) % SKIP_STEP == 0:
                print('Average loss at step {}: {:5.1f}'.format(index, total_loss / SKIP_STEP))
                total_loss = 0.0
                saver.save(sess, 'checkpoints/skip-gram', index)
        
        ####################
        # code to visualize the embeddings. uncomment the below to visualize embeddings
        # run "'tensorboard --logdir='processed'" to see the embeddings
        # final_embed_matrix = sess.run(model.embed_matrix)
        
        # # it has to variable. constants don't work here. you can't reuse model.embed_matrix
        # embedding_var = tf.Variable(final_embed_matrix[:1000], name='embedding')
        # sess.run(embedding_var.initializer)

        # config = projector.ProjectorConfig()
        # summary_writer = tf.summary.FileWriter('processed')

        # # add embedding to the config file
        # embedding = config.embeddings.add()
        # embedding.tensor_name = embedding_var.name
        
        # # link this tensor to its metadata file, in this case the first 500 words of vocab
        # embedding.metadata_path = 'processed/vocab_1000.tsv'

        # # saves a configuration file that TensorBoard will read during startup.
        # projector.visualize_embeddings(summary_writer, config)
        # saver_embed = tf.train.Saver([embedding_var])
        # saver_embed.save(sess, 'processed/model3.ckpt', 1)