Python visualizer.Visualizer() Examples
The following are 3
code examples of visualizer.Visualizer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
visualizer
, or try the search function
.
Example #1
Source File: solvers.py From ganimation_replicate with MIT License | 5 votes |
def initialize(self, opt): self.opt = opt self.visual = Visualizer() self.visual.initialize(self.opt)
Example #2
Source File: gtsrb_visualize_example.py From backdoor with MIT License | 4 votes |
def gtsrb_visualize_label_scan_bottom_right_white_4(): print('loading dataset') X_test, Y_test = load_dataset() # transform numpy arrays into data generator test_generator = build_data_loader(X_test, Y_test) print('loading model') model_file = '%s/%s' % (MODEL_DIR, MODEL_FILENAME) model = load_model(model_file) # initialize visualizer visualizer = Visualizer( model, intensity_range=INTENSITY_RANGE, regularization=REGULARIZATION, input_shape=INPUT_SHAPE, init_cost=INIT_COST, steps=STEPS, lr=LR, num_classes=NUM_CLASSES, mini_batch=MINI_BATCH, upsample_size=UPSAMPLE_SIZE, attack_succ_threshold=ATTACK_SUCC_THRESHOLD, patience=PATIENCE, cost_multiplier=COST_MULTIPLIER, img_color=IMG_COLOR, batch_size=BATCH_SIZE, verbose=2, save_last=SAVE_LAST, early_stop=EARLY_STOP, early_stop_threshold=EARLY_STOP_THRESHOLD, early_stop_patience=EARLY_STOP_PATIENCE) log_mapping = {} # y_label list to analyze y_target_list = list(range(NUM_CLASSES)) y_target_list.remove(Y_TARGET) y_target_list = [Y_TARGET] + y_target_list for y_target in y_target_list: print('processing label %d' % y_target) _, _, logs = visualize_trigger_w_mask( visualizer, test_generator, y_target=y_target, save_pattern_flag=True) log_mapping[y_target] = logs pass
Example #3
Source File: learners.py From rltrader with MIT License | 4 votes |
def __init__(self, rl_method='rl', stock_code=None, chart_data=None, training_data=None, min_trading_unit=1, max_trading_unit=2, delayed_reward_threshold=.05, net='dnn', num_steps=1, lr=0.001, value_network=None, policy_network=None, output_path='', reuse_models=True): # 인자 확인 assert min_trading_unit > 0 assert max_trading_unit > 0 assert max_trading_unit >= min_trading_unit assert num_steps > 0 assert lr > 0 # 강화학습 기법 설정 self.rl_method = rl_method # 환경 설정 self.stock_code = stock_code self.chart_data = chart_data self.environment = Environment(chart_data) # 에이전트 설정 self.agent = Agent(self.environment, min_trading_unit=min_trading_unit, max_trading_unit=max_trading_unit, delayed_reward_threshold=delayed_reward_threshold) # 학습 데이터 self.training_data = training_data self.sample = None self.training_data_idx = -1 # 벡터 크기 = 학습 데이터 벡터 크기 + 에이전트 상태 크기 self.num_features = self.agent.STATE_DIM if self.training_data is not None: self.num_features += self.training_data.shape[1] # 신경망 설정 self.net = net self.num_steps = num_steps self.lr = lr self.value_network = value_network self.policy_network = policy_network self.reuse_models = reuse_models # 가시화 모듈 self.visualizer = Visualizer() # 메모리 self.memory_sample = [] self.memory_action = [] self.memory_reward = [] self.memory_value = [] self.memory_policy = [] self.memory_pv = [] self.memory_num_stocks = [] self.memory_exp_idx = [] self.memory_learning_idx = [] # 에포크 관련 정보 self.loss = 0. self.itr_cnt = 0 self.exploration_cnt = 0 self.batch_size = 0 self.learning_cnt = 0 # 로그 등 출력 경로 self.output_path = output_path