Python tensorpack.utils.logger.info() Examples

The following are 30 code examples of tensorpack.utils.logger.info(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorpack.utils.logger , or try the search function .
Example #1
Source File: common.py    From petridishnn with MIT License 6 votes vote down vote up
def generate_regression_callbacks(layer_info_list, name_only=False):
    """
        A list of callbacks for getting validation errors.
    """
    vcs = []
    names = []
    for info in layer_info_list:
        if info.aux_weight > 0:
            scope_name = scope_prediction(info.id)
            name = scope_name+'/mean_square_error:0'
            vcs.append(ScalarStats(\
                names=name,
                prefix='val_'))
            names.append(name)
    if name_only:
        return names
    return vcs 
Example #2
Source File: anytime_fcn.py    From petridishnn with MIT License 6 votes vote down vote up
def __init__(self, args):
        super(AnytimeFCN, self).__init__(None, args)


        # Class weight for fully convolutional networks
        self.class_weight = None
        if hasattr(args, 'class_weight'):
            self.class_weight = args.class_weight
        if self.class_weight is None:
            self.class_weight = np.ones(self.num_classes, dtype=np.float32)
        logger.info('Class weights: {}'.format(self.class_weight))

        self.is_label_one_hot = args.is_label_one_hot
        self.eval_threshold = args.eval_threshold
        self.do_scale_feat_to_label = args.do_scale_feat_to_label
        self.n_pools = args.n_pools if not self.do_scale_feat_to_label else 0
        self.is_atrous = args.is_atrous
        self.output_stride = args.output_stride
        # base_o_s / o_s * base_rate ; base_o_s == 16
        self.atrous_rates = [6,12,18]
        self.atrous_rates_base_output_stride = 16

        self.input_height = args.input_height
        self.input_width = args.input_width 
Example #3
Source File: anytime_fcn.py    From petridishnn with MIT License 6 votes vote down vote up
def __init__(self, args):
        super(FCDensenet, self).__init__(args)
        self.reduction_ratio = self.options.reduction_ratio
        self.growth_rate = self.options.growth_rate

        # Class weight for fully convolutional networks
        self.class_weight = None
        if hasattr(args, 'class_weight'):
            self.class_weight = args.class_weight
        if self.class_weight is None:
            self.class_weight = np.ones(self.num_classes, dtype=np.float32)
        logger.info('Class weights: {}'.format(self.class_weight))

        # other format is not supported yet
        assert self.n_pools * 2 + 1 == self.n_blocks

        # FC-dense doesn't like the starting pooling of imagenet initial conv/pool
        assert self.network_config.s_type == 'basic'

        # TODO This version doesn't support anytime prediction (yet)
        assert self.options.func_type == FUNC_TYPE_OPT 
Example #4
Source File: critic.py    From petridishnn with MIT License 6 votes vote down vote up
def critic_predictor(ctrl, model_dir, vs_name):
    """
    Create an OfflinePredictorWithSaver for test-time use.
    """
    model = critic_factory(ctrl, is_train=False, vs_name=vs_name)
    output_names = ['{}/predicted_accuracy:0'.format(vs_name)]
    session_config=None
    if ctrl.critic_type == CriticTypes.LSTM:
        session_config = tf.ConfigProto(device_count = {'GPU': 0})
    pred_config = PredictConfig(
        model=model,
        input_names=model.input_names,
        output_names=output_names,
        session_creator=NewSessionCreator(config=session_config)
    )
    if model_dir:
        ckpt = tf.train.latest_checkpoint(model_dir)
        logger.info("Loading {} predictor from {}".format(vs_name, ckpt))
        if ckpt:
            pred_config.session_init = SaverRestore(ckpt)
    predictor = OfflinePredictorWithSaver(pred_config)
    return predictor 
Example #5
Source File: layer_info.py    From petridishnn with MIT License 6 votes vote down vote up
def _remove_connection_from_id(info, id_to_remove):
        if not id_to_remove in info.inputs:
            return info
        if isinstance(info.stop_gradient, list):
            assert len(info.stop_gradient) == len(info.inputs), \
                "Invalid info {}".format(info)
        if isinstance(info.down_sampling, list):
            assert len(info.down_sampling) == len(info.inputs), \
                "Invalid info {}".format(info)
        assert len(info.operations) == len(info.inputs) + 1, \
            "Invalid info {}".format(info)

        idx = 0
        while idx < len(info.inputs):
            if info.inputs[idx] == id_to_remove:
                del info.inputs[idx]
                del info.operations[idx]
                if isinstance(info.stop_gradient, list):
                    del info.stop_gradient[idx]
                if isinstance(info.down_sampling, list):
                    del info.down_sampling[idx]
            idx += 1
        return info 
Example #6
Source File: layer_info.py    From petridishnn with MIT License 6 votes vote down vote up
def _create_info_merge(next_id, h_id, o_id, aux_weight, is_candidate,
            final_merge_op=LayerTypes.MERGE_WITH_SUM,
            hallu_gate_layer=LayerTypes.NO_FORWARD_LAYER):
        """
        Form the LayerInfo for the merge operation between hallu of id h_id and the original
        tensor of id o_id (out_id). The new LayerInfo will have info.id == next_id.
        Return a list of layers used for merging
        Note any change to this function need to be mirrored in _finalize_info_merge
        """
        inputs = [None] * 2
        inputs[LayerInfoList.ORIG_IDX_IN_MERGE_HALLU] = o_id
        inputs[LayerInfoList.HALLU_IDX_IN_MERGE_HALLU] = h_id
        operations = [LayerTypes.IDENTITY] * 2 + [final_merge_op]
        operations[LayerInfoList.HALLU_IDX_IN_MERGE_HALLU] = hallu_gate_layer
        info = LayerInfo(next_id, inputs=inputs, operations=operations,
            aux_weight=aux_weight, is_candidate=is_candidate)
        return [info] 
Example #7
Source File: critic.py    From petridishnn with MIT License 6 votes vote down vote up
def crawl_ve_from_remote_logs(mi_info, dn):
    """
    deprecated do not use

    Args:
    mi_info : a dict mapping from model iter to ModelSearchInfo
    dn : directory path of the one that directly contains the server log.log, i.e.,
    the remote logs are in {dn}/{model_iter}/log.log
    """
    for mi in mi_info:
        info = mi_info[mi]
        if info.ve is None or info.ve > 1.0:
            log_fn = os.path.join(dn, str(mi), 'log.log')
            if os.path.exists(log_fn):
                ve = grep_val_err_from_log(log_fn)
                mi_info[mi].ve = ve
    return mi_info 
Example #8
Source File: pointnet.py    From tensorflow-recipes with Apache License 2.0 6 votes vote down vote up
def input_transform(self, points, k=3):
        # [B,N,3] --> [3, k]
        num_point = points.get_shape()[1]
        points = tf.expand_dims(points, -1)
        with argscope(Conv2D, nl=BNReLU, padding='VALID'), \
                argscope(FullyConnected, nl=BNReLU):
            transmat = (LinearWrap(points)
                        .Conv2D('tconv0', 64, kernel_shape=[1, 3])
                        .Conv2D('tconv1', 128, kernel_shape=1)
                        .Conv2D('tconv2', 1024, kernel_shape=1)
                        .MaxPooling('tpool0', [num_point, 1])
                        .FullyConnected('tfc0', 512, nl=BNReLU)
                        .FullyConnected('tfc1', 256, nl=BNReLU)
                        .TransformPoints('transf_xyz', 3, in_dim=3)())
        logger.info('transformation matrix: {}\n\n'.format(transmat.get_shape()))
        return transmat 
Example #9
Source File: steering-filter.py    From tensorflow-recipes with Apache License 2.0 6 votes vote down vote up
def _parameter_net(self, theta, kernel_shape=9):
        """Estimate filters for convolution layers

        Args:
            theta: angle of filter
            kernel_shape: size of each filter

        Returns:
            learned filter as [B, k, k, 1]
        """
        with argscope(LeakyReLU, alpha=0.2), \
                argscope(FullyConnected, nl=LeakyReLU):
            net = FullyConnected('fc1', theta, 64)
            net = FullyConnected('fc2', net, 128)

        pred_filter = FullyConnected('fc3', net, kernel_shape ** 2, nl=tf.identity)
        pred_filter = tf.reshape(pred_filter, [BATCH, kernel_shape, kernel_shape, 1], name="pred_filter")
        logger.info('Parameter net output: {}'.format(pred_filter.get_shape().as_list()))
        return pred_filter 
Example #10
Source File: controller.py    From petridishnn with MIT License 6 votes vote down vote up
def _log_convex_hull_parent_choice(self, q_parent, mi_info, e_idx):
        l_pqef = [
            pqef for pqef in q_parent.all_as_generator(full_info=True)
        ]
        l_mi, l_ve, l_fp, l_te, l_cnt = [], [], [], [], []
        for pqef in l_pqef:
            mi = pqef[IDX_PQE].model_iter
            l_mi.append(mi)
            l_ve.append(mi_info[mi].ve)
            l_fp.append(mi_info[mi].fp)
            l_cnt.append(pqef[IDX_CNT])
        logger.info(
            "CONVEX HULL info:\nl_fp={}\nl_ve={}\nl_cnt={}\nl_mi={}".format(
            l_fp, l_ve, l_cnt, l_mi
            ))
        logger.info("Chose parent e_idx={} mi={}".format(e_idx, l_mi[e_idx])) 
Example #11
Source File: imagenet_utils.py    From GroupNorm-reproduce with Apache License 2.0 6 votes vote down vote up
def build_graph(self, image, label):
        image = self.image_preprocess(image)
        assert self.data_format in ['NCHW', 'NHWC']
        if self.data_format == 'NCHW':
            image = tf.transpose(image, [0, 3, 1, 2])

        logits = self.get_logits(image)
        loss = ImageNetModel.compute_loss_and_error(
            logits, label, label_smoothing=self.label_smoothing)

        if self.weight_decay > 0:
            wd_loss = regularize_cost(self.weight_decay_pattern,
                                      tf.contrib.layers.l2_regularizer(self.weight_decay),
                                      name='l2_regularize_loss')
            add_moving_summary(loss, wd_loss)
            total_cost = tf.add_n([loss, wd_loss], name='cost')
        else:
            total_cost = tf.identity(loss, name='cost')
            add_moving_summary(total_cost)

        if self.loss_scale != 1.:
            logger.info("Scaling the total loss by {} ...".format(self.loss_scale))
            return total_cost * self.loss_scale
        else:
            return total_cost 
Example #12
Source File: utils_tp.py    From imgclsmob with MIT License 6 votes vote down vote up
def calc_flops(model):
    # manually build the graph with batch=1
    input_desc = [
        InputDesc(tf.float32, [1, model.image_size, model.image_size, 3], "input"),
        InputDesc(tf.int32, [1], "label")
    ]
    input = PlaceholderInput()
    input.setup(input_desc)
    with TowerContext("", is_training=False):
        model.build_graph(*input.get_input_tensors())
    model_utils.describe_trainable_vars()

    tf.profiler.profile(
        tf.get_default_graph(),
        cmd="op",
        options=tf.profiler.ProfileOptionBuilder.float_operation())
    logger.info("Note that TensorFlow counts flops in a different way from the paper.")
    logger.info("TensorFlow counts multiply+add as two flops, however the paper counts them "
                "as 1 flop because it can be executed in one instruction.") 
Example #13
Source File: feedforward.py    From petridishnn with MIT License 6 votes vote down vote up
def __init__(self, options):
        super(PetridishModel, self).__init__()
        self.options = options

        # Classification info
        self.prediction_feature = options.prediction_feature
        self.out_filters = options.init_channel
        self.stem_channel_rate = options.stem_channel_rate
        self.data_format = options.data_format

        # LayerInfoList as a record of the mathematical graph
        self.net_info = options.net_info
        self.master = self.net_info.master
        self.is_cell_based = self.net_info.is_cell_based()
        self.n_layers = len(self.master)
        self.n_aux_preds = sum([int(x.aux_weight > 0) for x in self.master])

        self.ch_dim = _data_format_to_ch_dim(self.data_format)
        self.params_to_regularize = None

        self.compute_hallu_stats = False
        if hasattr(options, 'compute_hallu_stats'):
            self.compute_hallu_stats = options.compute_hallu_stats 
Example #14
Source File: petridish_main.py    From petridishnn with MIT License 6 votes vote down vote up
def fork_and_train_model(ipc, options, log_dir, child_dir, prev_dir,
        model_str, model_iter, parent_iter, search_depth, job_type):
    """
    Spawn a process to write a script for the crawler. then
    wait for the crawler to finish. Aftewards, report to the
    main process.
    """
    entry_func = partial(
        train_child_remotely,
        model_options=options, log_dir=log_dir,
        child_dir=child_dir, prev_dir=prev_dir,
        curr_iter=model_iter)
    #logger.info('Remote child {} will check finish in dir {}'.format(
    #   model_iter, log_dir))
    stop_func = partial(has_stopped, log_dir=log_dir)
    msg_func = lambda model_str=model_str, \
        model_iter=model_iter, parent_iter=parent_iter, \
        search_depth=search_depth, job_type=job_type \
        : [ model_str, model_iter, parent_iter, search_depth, job_type ]
    ipc.spawn(job_type, entry_func, stop_func, msg_func, sleep_time=1) 
Example #15
Source File: imagenet_utils.py    From ghostnet with Apache License 2.0 6 votes vote down vote up
def build_graph(self, image, label):
        image = ImageNetModel.image_preprocess(image, bgr=self.image_bgr)
        assert self.data_format in ['NCHW', 'NHWC']
        if self.data_format == 'NCHW':
            image = tf.transpose(image, [0, 3, 1, 2])

        logits = self.get_logits(image)
        print('self.label_smoothing', self.label_smoothing)
        loss = ImageNetModel.compute_loss_and_error(logits, label, self.label_smoothing)

        if self.weight_decay > 0:
            wd_loss = regularize_cost(self.weight_decay_pattern,
                                      tf.contrib.layers.l2_regularizer(self.weight_decay),
                                      name='l2_regularize_loss')
            add_moving_summary(loss, wd_loss)
            total_cost = tf.add_n([loss, wd_loss], name='cost')
        else:
            total_cost = tf.identity(loss, name='cost')
            add_moving_summary(total_cost)

        if self.loss_scale != 1.:
            logger.info("Scaling the total loss by {} ...".format(self.loss_scale))
            return total_cost * self.loss_scale
        else:
            return total_cost 
Example #16
Source File: common.py    From rl-medical with Apache License 2.0 6 votes vote down vote up
def play_n_episodes(player, predfunc, nr, render=False):
    """wraps play_one_episode, playing a single episode at a time and logs results
    used when playing demos."""
    logger.info("Start Playing ... ")
    for k in range(nr):
        # if k != 0:
        #     player.restart_episode()
        score, filename, ditance_error, q_values = play_one_episode(player,
                                                                    predfunc,
                                                                    render=render)
        logger.info(
            "{}/{} - {} - score {} - distError {} - q_values {}".format(k + 1, nr, filename, score, ditance_error,
                                                                        q_values))


############################################################################### 
Example #17
Source File: imagenet_utils.py    From benchmarks with The Unlicense 6 votes vote down vote up
def build_graph(self, image, label):
        image = self.image_preprocess(image)
        assert self.data_format == 'NCHW'
        image = tf.transpose(image, [0, 3, 1, 2])

        logits = self.get_logits(image)
        loss = ImageNetModel.compute_loss_and_error(
            logits, label, label_smoothing=self.label_smoothing)

        if self.weight_decay > 0:
            wd_loss = regularize_cost(self.weight_decay_pattern,
                                      tf.contrib.layers.l2_regularizer(self.weight_decay),
                                      name='l2_regularize_loss')
            add_moving_summary(loss, wd_loss)
            total_cost = tf.add_n([loss, wd_loss], name='cost')
        else:
            total_cost = tf.identity(loss, name='cost')
            add_moving_summary(total_cost)

        if self.loss_scale != 1.:
            logger.info("Scaling the total loss by {} ...".format(self.loss_scale))
            return total_cost * self.loss_scale
        else:
            return total_cost 
Example #18
Source File: fcdense-ann.py    From petridishnn with MIT License 5 votes vote down vote up
def get_config(ds_trian, ds_val, model_cls):
    # prepare dataset
    steps_per_epoch = ds_train.size() // args.nr_gpu
    starting_epoch = ann_app_utils.grep_starting_epoch(args.load, steps_per_epoch)
    logger.info("The starting epoch is {}".format(starting_epoch))
    args.init_lr = ann_app_utils.grep_init_lr(starting_epoch, lr_schedule)
    logger.info("The starting learning rate is {}".format(args.init_lr))
    model=model_cls(args)
    classification_cbs = model.compute_classification_callbacks()
    loss_select_cbs = model.compute_loss_select_callbacks()

    return TrainConfig(
        dataflow=ds_train,
        callbacks=[
            ModelSaver(checkpoint_dir=args.model_dir, max_to_keep=2, keep_checkpoint_every_n_hours=12),
            InferenceRunner(ds_val,
                            [ScalarStats('cost')] + classification_cbs),
            ScheduledHyperParamSetter('learning_rate', lr_schedule),
            HumanHyperParamSetter('learning_rate')
        ] + loss_select_cbs,
        model=model,
        monitors=[JSONWriter(), ScalarPrinter()],
        steps_per_epoch=steps_per_epoch,
        max_epoch=max_epoch,
        starting_epoch=starting_epoch,
    ) 
Example #19
Source File: logger.py    From ADL with MIT License 5 votes vote down vote up
def _set_file(path):
    global _FILE_HANDLER
    if os.path.isfile(path):
        backup_name = path + '.' + _get_time_str()
        shutil.move(path, backup_name)
        _logger.info("Existing log file '{}' backuped to '{}'".format(path, backup_name))  # noqa: F821
    hdl = logging.FileHandler(
        filename=path, encoding='utf-8', mode='w')
    hdl.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))

    _FILE_HANDLER = hdl
    _logger.addHandler(hdl)
    _logger.info("Argv: " + ' '.join(sys.argv)) 
Example #20
Source File: DQNModel.py    From rl-medical with Apache License 2.0 5 votes vote down vote up
def update_target_param():
        vars = tf.global_variables()
        ops = []
        G = tf.get_default_graph()
        for v in vars:
            target_name = v.op.name
            if target_name.startswith('target'):
                new_name = target_name.replace('target/', '')
                logger.info("{} <- {}".format(target_name, new_name))
                ops.append(v.assign(G.get_tensor_by_name(new_name + ':0')))
        return tf.group(*ops, name='update_target_network') 
Example #21
Source File: experiments.py    From TGAN with MIT License 5 votes vote down vote up
def run_experiment(
    name, epoch, steps_per_epoch, sample_rows, train_csv, continuous_cols,
    num_random_search, store_samples=True, force=False
):
    """Run experiment using the given params and collect the results.

    The experiment run the following steps:

    1. We fetch and split our data between test and train.

    2. We first train a TGAN data synthesizer using the real training data T and generate a
       synthetic training dataset Tsynth.

    3. We then train machine learning models on both the real and synthetic datasets.

    4. We use these trained models on real test data and see how well they perform.

    """
    if os.path.isdir(name):
        if force:
            logger.info('Folder "{}" exists, and force=True. Deleting folder.'.format(name))
            os.rmdir(name)

        else:
            raise ValueError(
                'Folder "{}" already exist. Please, use force=True to force deletion '
                'or use a different name.'.format(name))

    # Load and split data
    data = pd.read_csv(train_csv, header=-1)
    train_data, test_data = train_test_split(data, train_size=0.8)

    # Prepare hyperparameter search
    model_kwargs = prepare_hyperparameter_search(epoch, steps_per_epoch, num_random_search)

    return fit_score_model(
        name, model_kwargs, train_data, test_data,
        continuous_cols, sample_rows, store_samples
    ) 
Example #22
Source File: experiments.py    From TGAN with MIT License 5 votes vote down vote up
def fit_score_model(
        name, model_kwargs, train_data, test_data, continuous_columns,
        sample_rows, store_samples
):
    """Fit and score models using given params."""
    for index, kwargs in enumerate(model_kwargs):
        logger.info('Training TGAN Model %d/%d', index + 1, len(model_kwargs))

        tf.reset_default_graph()
        base_dir = os.path.join('experiments', name)
        output = os.path.join(base_dir, 'model_{}'.format(index))
        model = TGANModel(continuous_columns, output=output, **kwargs)
        model.fit(train_data)
        sampled_data = model.sample(sample_rows)

        if store_samples:
            dir_name = os.path.join(base_dir, 'data')
            if not os.path.isdir(dir_name):
                os.mkdir(dir_name)

            file_name = os.path.join(dir_name, 'model_{}.csv'.format(index))
            sampled_data.to_csv(file_name, index=False, header=True)

        score = evaluate_classification(sampled_data, test_data, continuous_columns)
        model_kwargs[index]['score'] = score

    return model_kwargs 
Example #23
Source File: anytime_network.py    From petridishnn with MIT License 5 votes vote down vote up
def __init__(self, input_size, args):
        super(DenseNet, self).__init__(input_size, args)
        self.reduction_ratio = self.options.reduction_ratio
        self.growth_rate = self.options.growth_rate
        self.bottleneck_width = self.options.bottleneck_width
        self.dropout_kp = self.options.dropout_kp

        if not self.options.use_init_ch:
            default_ch = self.growth_rate * 2
            if self.init_channel != default_ch:
                self.init_channel = default_ch
                logger.info("Densenet sets the init_channel to be " \
                    + "2*growth_rate by default. " \
                    + "I'm setting this automatically!") 
Example #24
Source File: options.py    From petridishnn with MIT License 5 votes vote down vote up
def model_options_processing(options):
    """
    Populate some complicated default arguments, and parse comma-separated int lists to int lists.
    """
    if options.net_info_str is None:
        options.net_info = None
        return options
    if isinstance(options.net_info_str, str):
        try:
            options.net_info = net_info_from_str(options.net_info_str)
        except:
            logger.info("Failed info str is:\n{}".format(options.net_info_str))
            raise
    return options 
Example #25
Source File: petridish_main.py    From petridishnn with MIT License 5 votes vote down vote up
def server_exit(log_dir, companion_pids=None):
    """
    At server exit (or interruption) kill companion crawlers
    to release resources.
    """
    mark_stopped(log_dir, is_interrupted=True)
    # kill companion processes.
    if companion_pids is not None:
        for pid in companion_pids.strip().split(','):
            cmd = 'kill -9 {}'.format(pid)
            logger.info('Exiting. killing process {}...'.format(pid))
            subprocess.call(cmd, shell=True) 
Example #26
Source File: ann_app_utils.py    From petridishnn with MIT License 5 votes vote down vote up
def grep_starting_epoch(load, steps_per_epoch):
    """
    load : the checkpoint to load from 
    steps_per_epoch : number of batches per epoch

    return:
    starting_epoch : the starting epoch number for the main_loop
    """
    starting_epoch = 1
    if load:
        dir_name, ckpt = os.path.split(load)
        logger.info("{} exists for loading".format(load))
        if ckpt != "checkpoint":
            file_names = [ckpt]
        else:
            file_names = os.listdir(dir_name)
        logger.info("The files we are checking are {}".format(file_names))
        max_step = 0
        for fn in file_names:
            name, ext = os.path.splitext(fn)
            if name[:5] == 'model':
                try:
                    step = int(name[name.rfind('-')+1:])
                    max_step = max(max_step, step)
                    logger.info("{} is at step {}".format(fn, step)) 
                except:
                    continue
        starting_epoch = 1 + max_step / steps_per_epoch
    return starting_epoch 
Example #27
Source File: ann_app_utils.py    From petridishnn with MIT License 5 votes vote down vote up
def log_init(args, model_cls):
    """
        Set the log root according to the args.log_dir and 
        log run info
    """
    logger.set_log_root(log_root=args.log_dir)
    logger.auto_set_dir(action='k')
    logger.info("Arguments: {}".format(args))
    logger.info("Model class is {}".format(model_cls))
    logger.info("TF version: {}".format(tf.__version__)) 
Example #28
Source File: logger.py    From petridishnn with MIT License 5 votes vote down vote up
def _set_file(path):
    global _FILE_HANDLER
    if os.path.isfile(path):
        backup_name = path + '.' + _get_time_str()
        shutil.move(path, backup_name)
        _logger.info("Existing log file '{}' backuped to '{}'".format(path, backup_name))  # noqa: F821
    hdl = logging.FileHandler(
        filename=path, encoding='utf-8', mode='w')
    hdl.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))

    _FILE_HANDLER = hdl
    _logger.addHandler(hdl)
    _logger.info("Save logs to file: {}".format(path))
    _logger.info("Argv: " + ' '.join(sys.argv)) 
Example #29
Source File: logger.py    From petridishnn with MIT License 5 votes vote down vote up
def set_log_root(log_root):
    global LOG_ROOT
    _logger.info("The log directory root is set to be '{}'".format(log_root))
    LOG_ROOT = log_root 
Example #30
Source File: petridish_main.py    From petridishnn with MIT License 5 votes vote down vote up
def server_handle_critic_message(
        msg_output, controller, mi_info, options):
    """
    Petridish server handles the return message of a forked
    process that watches over a critic job.
    """
    log_dir_root = logger.get_logger_dir()
    model_dir_root = options.model_dir
    queues = controller.queues
    queue_name, new_ci = msg_output
    is_fail, _ = is_mark_failure(
        _ci_to_dn(log_dir_root, new_ci, queue_name))
    if is_fail:
        logger.info('Failed {} ci={}'.format(queue_name, new_ci))
        return
    logger.info('Updating w/ msg of CRITIC {} ci={}'.format(
        queue_name, new_ci))
    # load the new critic
    ctrl_dn = _ci_to_dn(model_dir_root, new_ci, queue_name)
    controller.update_predictor(ctrl_dn, queue_name)
    # as we have new model for critic,
    # remove other old ones if exists.
    ctrl_dns = [_ci_to_dn(model_dir_root, ci, queue_name) \
        for ci in range(new_ci + 1 - controller.n_critic_procs)]
    for ctrl_dn in filter(lambda x : os.path.exists(x), ctrl_dns):
        logger.info('rm -rf {}'.format(ctrl_dn))
        _ = subprocess.check_output(
            'rm -rf {} &'.format(ctrl_dn), shell=True)
    # Sort the affected queue.
    logger.info('Ordering queue {}...'.format(queue_name))
    queue = queues[queue_name]
    controller.update_queue(queue, mi_info)
    logger.info('... done ordering')