Python worker.Worker() Examples

The following are 12 code examples of worker.Worker(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module worker , or try the search function .
Example #1
Source File: test_functional_containers_crawler.py    From agentless-system-crawler with Apache License 2.0 6 votes vote down vote up
def testCrawlContainerKafka2(self):
        emitters = EmittersManager(urls=['kafka://localhost:9092/test'])
        crawler = ContainersCrawler(
            features=['os', 'process'],
            user_list=self.container['Id'])
        worker = Worker(emitters=emitters, frequency=-1,
                        crawler=crawler)
        worker.iterate()
        kafka = pykafka.KafkaClient(hosts='localhost:9092')
        topic = kafka.topics['test']
        consumer = topic.get_simple_consumer()
        message = consumer.consume()
        assert '"cmd":"/bin/sleep 60"' in message.value

        for i in range(1, 5):
            worker.iterate()
            message = consumer.consume()
            assert '"cmd":"/bin/sleep 60"' in message.value 
Example #2
Source File: master.py    From ppo-lstm-parallel with MIT License 5 votes vote down vote up
def make_worker(env_producer, i, q, w_in_queue):
    return Worker(env_producer, i, q, w_in_queue) 
Example #3
Source File: mountaincar_a3c.py    From reinforcement_learning with MIT License 5 votes vote down vote up
def main():
  '''Example of A3C running on MountainCar environment'''
  tf.reset_default_graph()

  history = []

  with tf.device('/{}:0'.format(DEVICE)):
    sess = tf.Session()
    global_model = ac_net.AC_Net(
        STATE_SIZE,
        ACTION_SIZE,
        LEARNING_RATE,
        'global',
        n_h1=N_H1,
        n_h2=N_H2)
    workers = []
    for i in xrange(NUM_WORKERS):
      env = gym.make(ENV_NAME)
      env._max_episode_steps = MAX_STEPS
      workers.append(worker.Worker(env,
                                   state_size=STATE_SIZE, action_size=ACTION_SIZE,
                                   worker_name='worker_{}'.format(i), global_name='global',
                                   lr=LEARNING_RATE, gamma=GAMMA, t_max=T_MAX, sess=sess,
                                   history=history, n_h1=N_H1, n_h2=N_H2, logdir=LOG_DIR))

    sess.run(tf.global_variables_initializer())

    for workeri in workers:
      worker_work = lambda: workeri.work(NUM_EPISODES)
      thread = threading.Thread(target=worker_work)
      thread.start() 
Example #4
Source File: cartpole_a3c.py    From reinforcement_learning with MIT License 5 votes vote down vote up
def main():
  '''Example of A3C running on Cartpole environment'''
  tf.reset_default_graph()

  history = []

  with tf.device('/{}:0'.format(DEVICE)):
    sess = tf.Session()
    global_model = ac_net.AC_Net(
        STATE_SIZE,
        ACTION_SIZE,
        LEARNING_RATE,
        'global',
        n_h1=N_H1,
        n_h2=N_H2)
    workers = []
    for i in xrange(NUM_WORKERS):
      env = gym.make('CartPole-v0')
      env._max_episode_steps = 200
      workers.append(worker.Worker(env,
                                   state_size=STATE_SIZE, action_size=ACTION_SIZE,
                                   worker_name='worker_{}'.format(i), global_name='global',
                                   lr=LEARNING_RATE, gamma=GAMMA, t_max=T_MAX, sess=sess,
                                   history=history, n_h1=N_H1, n_h2=N_H2, logdir=LOG_DIR))

    sess.run(tf.global_variables_initializer())

    for workeri in workers:
      worker_work = lambda: workeri.work(NUM_EPISODES)
      thread = threading.Thread(target=worker_work)
      thread.start() 
Example #5
Source File: acrobot_a3c.py    From reinforcement_learning with MIT License 5 votes vote down vote up
def main():
  '''Example of A3C running on Acrobot environment'''
  tf.reset_default_graph()

  history = []

  with tf.device('/{}:0'.format(DEVICE)):
    sess = tf.Session()
    global_model = ac_net.AC_Net(
        STATE_SIZE,
        ACTION_SIZE,
        LEARNING_RATE,
        'global',
        n_h1=N_H1,
        n_h2=N_H2)
    workers = []
    for i in xrange(NUM_WORKERS):
      env = gym.make('Acrobot-v1')
      env._max_episode_steps = 3000
      workers.append(worker.Worker(env,
                                   state_size=STATE_SIZE, action_size=ACTION_SIZE,
                                   worker_name='worker_{}'.format(i), global_name='global',
                                   lr=LEARNING_RATE, gamma=GAMMA, t_max=T_MAX, sess=sess,
                                   history=history, n_h1=N_H1, n_h2=N_H2, logdir=LOG_DIR))

    sess.run(tf.global_variables_initializer())

    for workeri in workers:
      worker_work = lambda: workeri.work(NUM_EPISODES)
      thread = threading.Thread(target=worker_work)
      thread.start() 
Example #6
Source File: test_functional_logs_linker.py    From agentless-system-crawler with Apache License 2.0 5 votes vote down vote up
def testLinkUnlinkContainer(self):
        docker_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace,
                                  self.container_name, 'docker.log')
        messages_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace,
                                    self.container_name, 'var/log/messages')
        crawler = DockerContainersLogsLinker(
            environment='cloudsight',
            user_list='ALL',
            host_namespace=self.host_namespace)
        worker = Worker(crawler=crawler)

        self.startContainer()
        worker.iterate()
        with open(docker_log, 'r') as log:
            assert 'hi' in log.read()
        with open(messages_log, 'r') as log:
            assert 'hi' in log.read()
        assert os.path.exists(docker_log)
        assert os.path.exists(messages_log)
        assert os.path.islink(docker_log)
        assert os.path.islink(messages_log)

        self.removeContainer()
        worker.iterate()
        assert not os.path.exists(docker_log)
        assert not os.path.exists(messages_log)
        assert not os.path.islink(docker_log)
        assert not os.path.islink(messages_log)

        self.startContainer()
        worker.iterate()
        assert os.path.exists(docker_log)
        with open(docker_log, 'r') as log:
            assert 'hi' in log.read()
        with open(messages_log, 'r') as log:
            assert 'hi' in log.read()
        assert os.path.exists(messages_log)
        assert os.path.islink(docker_log)
        assert os.path.islink(messages_log)

        self.removeContainer() 
Example #7
Source File: bilibili_member_crawler.py    From bilibili_member_crawler with MIT License 5 votes vote down vote up
def start(cls):
        cls.init()
        # 开启任务分发线程
        Distributor(FETCH_MID_FROM, FETCH_MID_TO + 1).start()
        # 开启爬虫线程
        for i in range(0, THREADS_NUM):
            Worker(f'Worker-{i}').start() 
Example #8
Source File: train.py    From Reinforcement-Learning-Pytorch-Cartpole with MIT License 5 votes vote down vote up
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    print('state size:', num_inputs)
    print('action size:', num_actions)

    online_net = QNet(num_inputs, num_actions)
    target_net = QNet(num_inputs, num_actions)
    target_net.load_state_dict(online_net.state_dict())
    online_net.share_memory()
    target_net.share_memory()

    optimizer = SharedAdam(online_net.parameters(), lr=lr)
    global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()

    writer = SummaryWriter('logs')

    online_net.to(device)
    target_net.to(device)
    online_net.train()
    target_net.train()

    workers = [Worker(online_net, target_net, optimizer, global_ep, global_ep_r, res_queue, i) for i in range(mp.cpu_count())]
    [w.start() for w in workers]
    res = []
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
            [ep, ep_r, loss] = r
            writer.add_scalar('log/score', float(ep_r), ep)
            writer.add_scalar('log/loss', float(loss), ep)
        else:
            break
    [w.join() for w in workers] 
Example #9
Source File: train.py    From Reinforcement-Learning-Pytorch-Cartpole with MIT License 5 votes vote down vote up
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    env.close()

    global_model = Model(num_inputs, num_actions)
    global_average_model = Model(num_inputs, num_actions)
    global_model.share_memory()
    global_average_model.share_memory()
    global_optimizer = SharedAdam(global_model.parameters(), lr=lr)
    global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()

    writer = SummaryWriter('logs')

    n = mp.cpu_count()
    workers = [Worker(global_model, global_average_model, global_optimizer, global_ep, global_ep_r, res_queue, i) for i in range(n)]
    [w.start() for w in workers]
    res = []
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
            [ep, ep_r, loss] = r
            writer.add_scalar('log/score', float(ep_r), ep)
            writer.add_scalar('log/loss', float(loss), ep)
        else:
            break
    [w.join() for w in workers] 
Example #10
Source File: train.py    From Reinforcement-Learning-Pytorch-Cartpole with MIT License 5 votes vote down vote up
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    global_model = Model(num_inputs, num_actions)
    global_model.share_memory()
    global_optimizer = SharedAdam(global_model.parameters(), lr=lr)
    global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()

    writer = SummaryWriter('logs')

    workers = [Worker(global_model, global_optimizer, global_ep, global_ep_r, res_queue, i) for i in range(mp.cpu_count())]
    [w.start() for w in workers]
    res = []
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
            [ep, ep_r, loss] = r
            writer.add_scalar('log/score', float(ep_r), ep)
            writer.add_scalar('log/loss', float(loss), ep)
        else:
            break
    [w.join() for w in workers] 
Example #11
Source File: train_A3C.py    From reinforce_py with MIT License 4 votes vote down vote up
def main(args):
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    summary_writer = tf.summary.FileWriter(os.path.join(args.save_path, 'log'))
    global_steps_counter = itertools.count()  # thread-safe

    global_net = Net(S_DIM, A_DIM, 'global', args)
    num_workers = args.threads
    workers = []

    # create workers
    for i in range(1, num_workers + 1):
        worker_summary_writer = summary_writer if i == 0 else None
        worker = Worker(i, make_env(args), global_steps_counter,
                        worker_summary_writer, args)
        workers.append(worker)

    saver = tf.train.Saver(max_to_keep=5)

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        if args.model_path is not None:
            print('Loading model...\n')
            ckpt = tf.train.get_checkpoint_state(args.model_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('Initializing a new model...\n')
            sess.run(tf.global_variables_initializer())
        print_params_nums()
        # Start work process for each worker in a separated thread
        worker_threads = []
        for worker in workers:
            t = threading.Thread(target=lambda: worker.run(sess, coord, saver))
            t.start()
            time.sleep(0.5)
            worker_threads.append(t)

        if args.eval_every > 0:
            evaluator = Evaluate(
                global_net, summary_writer, global_steps_counter, args)
            evaluate_thread = threading.Thread(
                target=lambda: evaluator.run(sess, coord))
            evaluate_thread.start()

        coord.join(worker_threads) 
Example #12
Source File: train_A3C.py    From reinforce_py with MIT License 4 votes vote down vote up
def main(args):
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    tf.reset_default_graph()

    global_ep = tf.Variable(
        0, dtype=tf.int32, name='global_ep', trainable=False)
    
    env = Doom(visiable=False)
    Net(env.state_dim, env.action_dim, 'global', None)
    num_workers = args.parallel
    workers = []

    # create workers
    for i in range(num_workers):
        w = Worker(i, Doom(), global_ep, args)
        workers.append(w)

    print('%d workers in total.\n' % num_workers)
    saver = tf.train.Saver(max_to_keep=3)

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        if args.model_path is not None:
            print('Loading model...')
            ckpt = tf.train.get_checkpoint_state(args.model_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('Initializing a new model...')
            sess.run(tf.global_variables_initializer())
        print_net_params_number()

        # Start work process for each worker in a separated thread
        worker_threads = []
        for w in workers:
            run_fn = lambda: w.run(sess, coord, saver)
            t = threading.Thread(target=run_fn)
            t.start()
            time.sleep(0.5)
            worker_threads.append(t)
        coord.join(worker_threads)