Python memory.Memory() Examples

The following are 19 code examples of memory.Memory(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module memory , or try the search function .
Example #1
Source File: syscalls.py    From darkc0de-old-stuff with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, mmemory, typeaccess=0) :
		if not isinstance(mmemory, Memory):
			raise TypeError("ERREUR")

		self.mmemory = mmemory
		self.mmemory.open("r", typeaccess)

		try :
			fichier = open("/usr/include/asm/unistd.h", "r")
		except IOError :
			print "No such file /usr/include/asm/unistd.h"
			sys.exit(-1)
			
		liste = fichier.readlines()
		fichier.close()
		count = 0
		for i in liste :
			if(re.match("#define __NR_", i)) :
				l = string.split(i)
				if(l[2][0].isdigit()) :
					count = string.atoi(l[2], 10)
					self.lists_syscalls.append([count, l[1][5:]])
				else :
					count = count + 1
					self.lists_syscalls.append([count, l[1][5:]]) 
Example #2
Source File: model.py    From hands-detection with MIT License 5 votes vote down vote up
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
Example #3
Source File: model.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
Example #4
Source File: model.py    From models with Apache License 2.0 5 votes vote down vote up
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
Example #5
Source File: model.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
Example #6
Source File: model.py    From HumanRecognition with MIT License 5 votes vote down vote up
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
Example #7
Source File: model.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
Example #8
Source File: model.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
Example #9
Source File: tasks.py    From darkc0de-old-stuff with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, mmemory, typeaccess=0) :
		if not isinstance(mmemory, Memory):
			raise TypeError("ERREUR")

		self.mmemory = mmemory
		self.symbols = Symbols(self.mmemory)
		self.typeaccess = typeaccess 
Example #10
Source File: networks.py    From darkc0de-old-stuff with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, mmemory, typeaccess=0) :
		if not isinstance(mmemory, Memory):
			raise TypeError("ERREUR")

		self.mmemory = mmemory
		self.mmemory.open("r", typeaccess) 
Example #11
Source File: model.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
Example #12
Source File: worker.py    From Reinforcement-Learning-Pytorch-Cartpole with MIT License 5 votes vote down vote up
def __init__(self, global_model, global_average_model, global_optimizer, global_ep, global_ep_r, res_queue, name):
        super(Worker, self).__init__()

        self.env = gym.make(env_name)
        self.env.seed(500)

        self.name = 'w%i' % name
        self.global_ep, self.global_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
        self.global_model, self.global_average_model, self.global_optimizer = global_model, global_average_model, global_optimizer
        self.local_model = LocalModel(self.env.observation_space.shape[0], self.env.action_space.n)
        self.num_actions = self.env.action_space.n

        self.memory = Memory(replay_memory_capacity) 
Example #13
Source File: model.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
Example #14
Source File: model.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def get_memory(self):
    cls = memory.LSHMemory if self.use_lsh else memory.Memory
    return cls(self.rep_dim, self.memory_size, self.vocab_size) 
Example #15
Source File: train.py    From Reinforcement-Learning-Pytorch-Cartpole with MIT License 4 votes vote down vote up
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    print('state size:', num_inputs)
    print('action size:', num_actions)

    net = TRPO(num_inputs, num_actions)
    writer = SummaryWriter('logs')

    net.to(device)
    net.train()
    running_score = 0
    steps = 0
    loss = 0
    for e in range(30000):
        done = False
        memory = Memory()

        score = 0
        state = env.reset()
        state = torch.Tensor(state).to(device)
        state = state.unsqueeze(0)

        while not done:
            steps += 1

            action = net.get_action(state)
            next_state, reward, done, _ = env.step(action)

            next_state = torch.Tensor(next_state)
            next_state = next_state.unsqueeze(0)

            mask = 0 if done else 1
            reward = reward if not done or score == 499 else -1

            action_one_hot = torch.zeros(2)
            action_one_hot[action] = 1
            memory.push(state, next_state, action_one_hot, reward, mask)

            score += reward
            state = next_state

        loss = TRPO.train_model(net, memory.sample())

        score = score if score == 500.0 else score + 1
        running_score = 0.99 * running_score + 0.01 * score
        if e % log_interval == 0:
            print('{} episode | score: {:.2f}'.format(
                e, running_score))
            writer.add_scalar('log/score', float(running_score), e)
            writer.add_scalar('log/loss', float(loss), e)

        if running_score > goal_score:
            break 
Example #16
Source File: train.py    From Reinforcement-Learning-Pytorch-Cartpole with MIT License 4 votes vote down vote up
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    print('state size:', num_inputs)
    print('action size:', num_actions)

    net = TNPG(num_inputs, num_actions)
    writer = SummaryWriter('logs')

    net.to(device)
    net.train()
    running_score = 0
    steps = 0
    loss = 0
    for e in range(30000):
        done = False
        memory = Memory()

        score = 0
        state = env.reset()
        state = torch.Tensor(state).to(device)
        state = state.unsqueeze(0)

        while not done:
            steps += 1

            action = net.get_action(state)
            next_state, reward, done, _ = env.step(action)

            next_state = torch.Tensor(next_state)
            next_state = next_state.unsqueeze(0)

            mask = 0 if done else 1
            reward = reward if not done or score == 499 else -1

            action_one_hot = torch.zeros(2)
            action_one_hot[action] = 1
            memory.push(state, next_state, action_one_hot, reward, mask)

            score += reward
            state = next_state

        loss = TNPG.train_model(net, memory.sample())

        score = score if score == 500.0 else score + 1
        running_score = 0.99 * running_score + 0.01 * score
        if e % log_interval == 0:
            print('{} episode | score: {:.2f}'.format(
                e, running_score))
            writer.add_scalar('log/score', float(running_score), e)
            writer.add_scalar('log/loss', float(loss), e)

        if running_score > goal_score:
            break 
Example #17
Source File: worker.py    From Reinforcement-Learning-Pytorch-Cartpole with MIT License 4 votes vote down vote up
def run(self):

        while self.global_ep.value < max_episode:
            self.local_model.pull_from_global_model(self.global_model)
            done = False
            score = 0
            steps = 0

            state = self.env.reset()
            state = torch.Tensor(state)
            state = state.unsqueeze(0)
            memory = Memory(n_step)

            while True:
                policy, value = self.local_model(state)
                action = self.get_action(policy, self.num_actions)

                next_state, reward, done, _ = self.env.step(action)
                next_state = torch.Tensor(next_state)
                next_state = next_state.unsqueeze(0)

                mask = 0 if done else 1
                reward = reward if not done or score == 499 else -1
                action_one_hot = torch.zeros(2)
                action_one_hot[action] = 1
                memory.push(state, next_state, action_one_hot, reward, mask)

                score += reward
                state = next_state

                if len(memory) == n_step or done:
                    batch = memory.sample()
                    loss = self.local_model.push_to_global_model(batch, self.global_model, self.global_optimizer)
                    self.local_model.pull_from_global_model(self.global_model)
                    memory = Memory(n_step)

                    if done:
                        running_score = self.record(score, loss)
                        break


        self.res_queue.put(None) 
Example #18
Source File: train.py    From Reinforcement-Learning-Pytorch-Cartpole with MIT License 4 votes vote down vote up
def main():
    env = gym.make(env_name)
    env.seed(500)
    torch.manual_seed(500)

    num_inputs = env.observation_space.shape[0]
    num_actions = env.action_space.n
    env.close()

    global_target_model = Model(num_inputs, num_actions)
    global_online_model = Model(num_inputs, num_actions)
    global_target_model.train()
    global_online_model.train()
    
    global_target_model.load_state_dict(global_online_model.state_dict())
    global_target_model.share_memory()
    global_online_model.share_memory()
    
    global_memory = Memory(replay_memory_capacity)
    
    
    global_ep, global_ep_r, res_queue, global_memory_pipe = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue(), mp.Queue()

    writer = SummaryWriter('logs')

    n = 2 
    epsilons = [(i * 0.05 + 0.1) for i in range(n)]

    actors = [Actor(global_target_model, global_memory_pipe, global_ep, global_ep_r, epsilons[i], i) for i in range(n)]
    [w.start() for w in actors]
    learner = Learner(global_online_model, global_target_model, global_memory, global_memory_pipe, res_queue)
    learner.start()

    res = []
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
            [ep, loss] = r
            # writer.add_scalar('log/score', float(ep_r), ep)
            writer.add_scalar('log/loss', float(loss), ep)
        else:
            break
    [w.join() for w in actors] 
Example #19
Source File: worker.py    From Reinforcement-Learning-Pytorch-Cartpole with MIT License 4 votes vote down vote up
def run(self):
        epsilon = 1.0
        steps = 0
        while self.global_ep.value < max_episode:
            if self.global_ep_r.value > goal_score:
                break
            done = False

            score = 0
            state = self.env.reset()
            state = torch.Tensor(state).to(device)
            state = state.unsqueeze(0)

            memory = Memory(async_update_step)

            while not done:
                steps += 1

                action = self.get_action(state, epsilon)
                next_state, reward, done, _ = self.env.step(action)

                next_state = torch.Tensor(next_state)
                next_state = next_state.unsqueeze(0)

                mask = 0 if done else 1
                reward = reward if not done or score == 499 else -1
                action_one_hot = np.zeros(2)
                action_one_hot[action] = 1
                memory.push(state, next_state, action_one_hot, reward, mask)

                score += reward
                state = next_state

                epsilon -= 0.00001
                epsilon = max(epsilon, 0.1)

                if len(memory) == async_update_step or done:
                    batch = memory.sample()
                    loss = QNet.train_model(self.online_net, self.target_net, self.optimizer, batch)
                    memory = Memory(async_update_step)
                    if done:
                        self.record(score, epsilon, loss)
                        break
                if steps % update_target == 0:
                    self.update_target_model()

            score = score if score == 500.0 else score + 1

        self.res_queue.put(None)