Python ale_python_interface.ALEInterface() Examples
The following are 19
code examples of ale_python_interface.ALEInterface().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
ale_python_interface
, or try the search function
.
Example #1
Source File: atari_game.py From SNIPER-mxnet with Apache License 2.0 | 6 votes |
def ale_load_from_rom(rom_path, display_screen): rng = get_numpy_rng() try: from ale_python_interface import ALEInterface except ImportError as e: raise ImportError('Unable to import the python package of Arcade Learning Environment. ' \ 'ALE may not have been installed correctly. Refer to ' \ '`https://github.com/mgbellemare/Arcade-Learning-Environment` for some' \ 'installation guidance') ale = ALEInterface() ale.setInt(b'random_seed', rng.randint(1000)) if display_screen: import sys if sys.platform == 'darwin': import pygame pygame.init() ale.setBool(b'sound', False) # Sound doesn't work on OSX ale.setBool(b'display_screen', True) else: ale.setBool(b'display_screen', False) ale.setFloat(b'repeat_action_probability', 0) ale.loadROM(str.encode(rom_path)) return ale
Example #2
Source File: game_state.py From pathnet with MIT License | 6 votes |
def __init__(self, rand_seed, display=False, no_op_max=7): self.ale = ALEInterface() self.ale.setInt(b'random_seed', rand_seed) self.ale.setFloat(b'repeat_action_probability', 0.0) self.ale.setBool(b'color_averaging', True) self.ale.setInt(b'frame_skip', 4) self._no_op_max = no_op_max if display: self._setup_display() self.ale.loadROM(ROM.encode('ascii')) # collect minimal action set self.real_actions = self.ale.getMinimalActionSet() # height=210, width=160 self._screen = np.empty((210, 160, 1), dtype=np.uint8) self.reset()
Example #3
Source File: game_state.py From async_deep_reinforce with Apache License 2.0 | 6 votes |
def __init__(self, rand_seed, display=False, no_op_max=7): self.ale = ALEInterface() self.ale.setInt(b'random_seed', rand_seed) self.ale.setFloat(b'repeat_action_probability', 0.0) self.ale.setBool(b'color_averaging', True) self.ale.setInt(b'frame_skip', 4) self._no_op_max = no_op_max if display: self._setup_display() self.ale.loadROM(ROM.encode('ascii')) # collect minimal action set self.real_actions = self.ale.getMinimalActionSet() # height=210, width=160 self._screen = np.empty((210, 160, 1), dtype=np.uint8) self.reset()
Example #4
Source File: atari_game.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def ale_load_from_rom(rom_path, display_screen): rng = get_numpy_rng() try: from ale_python_interface import ALEInterface except ImportError as e: raise ImportError('Unable to import the python package of Arcade Learning Environment. ' \ 'ALE may not have been installed correctly. Refer to ' \ '`https://github.com/mgbellemare/Arcade-Learning-Environment` for some' \ 'installation guidance') ale = ALEInterface() ale.setInt(b'random_seed', rng.randint(1000)) if display_screen: import sys if sys.platform == 'darwin': import pygame pygame.init() ale.setBool(b'sound', False) # Sound doesn't work on OSX ale.setBool(b'display_screen', True) else: ale.setBool(b'display_screen', False) ale.setFloat(b'repeat_action_probability', 0) ale.loadROM(str.encode(rom_path)) return ale
Example #5
Source File: emulator.py From fathom with Apache License 2.0 | 6 votes |
def __init__(self, rom_name, vis,frameskip=1,windowname='preview'): self.ale = ALEInterface() self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode"); self.ale.setInt("random_seed",123) self.ale.setInt("frame_skip",frameskip) romfile = str(ROM_PATH)+str(rom_name) if not os.path.exists(romfile): print('No ROM file found at "'+romfile+'".\nAdjust ROM_PATH or double-check the filt exists.') self.ale.loadROM(romfile) self.legal_actions = self.ale.getMinimalActionSet() self.action_map = dict() self.windowname = windowname for i in range(len(self.legal_actions)): self.action_map[self.legal_actions[i]] = i # print(self.legal_actions) self.screen_width,self.screen_height = self.ale.getScreenDims() print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height)) self.vis = vis if vis: cv2.startWindowThread() cv2.namedWindow(self.windowname, flags=cv2.WINDOW_AUTOSIZE) # permit manual resizing
Example #6
Source File: game_state.py From a3c-distributed_tensorflow with MIT License | 6 votes |
def __init__(self, rand_seed, display=False, no_op_max=7): self.ale = ALEInterface() self.ale.setInt(b'random_seed', rand_seed) self.ale.setFloat(b'repeat_action_probability', 0.0) self.ale.setBool(b'color_averaging', True) self.ale.setInt(b'frame_skip', 4) self._no_op_max = no_op_max if display: self._setup_display() self.ale.loadROM(ROM.encode('ascii')) # collect minimal action set self.real_actions = self.ale.getMinimalActionSet() # height=210, width=160 self._screen = np.empty((210, 160, 1), dtype=np.uint8) self.reset()
Example #7
Source File: atari_game.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def ale_load_from_rom(rom_path, display_screen): rng = get_numpy_rng() try: from ale_python_interface import ALEInterface except ImportError as e: raise ImportError('Unable to import the python package of Arcade Learning Environment. ' \ 'ALE may not have been installed correctly. Refer to ' \ '`https://github.com/mgbellemare/Arcade-Learning-Environment` for some' \ 'installation guidance') ale = ALEInterface() ale.setInt(b'random_seed', rng.randint(1000)) if display_screen: import sys if sys.platform == 'darwin': import pygame pygame.init() ale.setBool(b'sound', False) # Sound doesn't work on OSX ale.setBool(b'display_screen', True) else: ale.setBool(b'display_screen', False) ale.setFloat(b'repeat_action_probability', 0) ale.loadROM(str.encode(rom_path)) return ale
Example #8
Source File: Environment.py From Deep-RL-agents with MIT License | 5 votes |
def __init__(self, render=False): self.ale = ALEInterface() self.ale.setInt(b'random_seed', 0) self.ale.setFloat(b'repeat_action_probability', 0.0) self.ale.setBool(b'color_averaging', True) self.ale.setInt(b'frame_skip', 4) self.ale.setBool(b'display_screen', render) self.ale.loadROM(ENV.encode('ascii')) self._screen = np.empty((250, 160, 1), dtype=np.uint8) self._no_op_max = 7 self.img_buffer = []
Example #9
Source File: main.py From tensorflow-rl with Apache License 2.0 | 5 votes |
def get_num_actions(rom_path, rom_name): from ale_python_interface import ALEInterface filename = '{0}/{1}.bin'.format(rom_path, rom_name) ale = ALEInterface() ale.loadROM(filename) return len(ale.getMinimalActionSet())
Example #10
Source File: Environment.py From Deep-RL-agents with MIT License | 5 votes |
def __init__(self, render=False): self.ale = ALEInterface() self.ale.setInt(b'random_seed', 0) self.ale.setFloat(b'repeat_action_probability', 0.0) self.ale.setBool(b'color_averaging', True) self.ale.setInt(b'frame_skip', 4) self.ale.setBool(b'display_screen', render) self.ale.loadROM(ENV.encode('ascii')) self._screen = np.empty((210, 160, 1), dtype=np.uint8) self._no_op_max = 7
Example #11
Source File: Environment.py From Deep-RL-agents with MIT License | 5 votes |
def __init__(self, render=False): self.ale = ALEInterface() self.ale.setInt(b'random_seed', 0) self.ale.setFloat(b'repeat_action_probability', 0.0) self.ale.setBool(b'color_averaging', True) self.ale.setInt(b'frame_skip', 3) self.ale.setBool(b'display_screen', render) self.ale.loadROM(ENV.encode('ascii')) self._screen = np.empty((210, 160, 1), dtype=np.uint8) self._no_op_max = 7 self.img_buffer = []
Example #12
Source File: Environment.py From Deep-RL-agents with MIT License | 5 votes |
def __init__(self, render=False): self.ale = ALEInterface() self.ale.setInt(b'random_seed', 0) self.ale.setFloat(b'repeat_action_probability', 0.0) self.ale.setBool(b'color_averaging', True) self.ale.setInt(b'frame_skip', 4) self.ale.setBool(b'display_screen', render) self.ale.loadROM(ENV.encode('ascii')) self._screen = np.empty((210, 160, 1), dtype=np.uint8) self._no_op_max = 7 self.img_buffer = []
Example #13
Source File: arcade_learning_environment.py From tensorforce with Apache License 2.0 | 5 votes |
def __init__( self, level, life_loss_terminal=False, life_loss_punishment=0.0, repeat_action_probability=0.0, visualize=False, frame_skip=1, seed=None ): super().__init__() from ale_python_interface import ALEInterface self.environment = ALEInterface() self.rom_file = level self.life_loss_terminal = life_loss_terminal self.life_loss_punishment = life_loss_punishment self.environment.setFloat(b'repeat_action_probability', repeat_action_probability) self.environment.setBool(b'display_screen', visualize) self.environment.setInt(b'frame_skip', frame_skip) if seed is not None: self.environment.setInt(b'random_seed', seed) # All set commands must be done before loading the ROM. self.environment.loadROM(rom_file=self.rom_file.encode()) self.available_actions = tuple(self.environment.getLegalActionSet()) # Full list of actions: # No-Op, Fire, Up, Right, Left, Down, Up Right, Up Left, Down Right, Down Left, Up Fire, # Right Fire, Left Fire, Down Fire, Up Right Fire, Up Left Fire, Down Right Fire, Down Left # Fire
Example #14
Source File: atari.py From Chimp with Apache License 2.0 | 5 votes |
def __init__(self, settings): '''Initiate Arcade Learning Environment (ALE) using Python interface https://github.com/bbitmaster/ale_python_interface/wiki - Set number of frames to be skipped, random seed, ROM and title for display. - Retrieve a set of legal actions and their number. - Retrieve dimensions of the original screen (width/height), and set the dimensions of the cropped screen, together with the padding used to crop the screen rectangle. - Set dimensions of the pygame display that will show visualization of the simulation. (May be cropped --- showing what the learner sees, or not --- showing full Atari screen) - Allocate memory for generated grayscale screenshots. Accepts dims in (height/width) format ''' self.ale = ALEInterface() self.ale.setInt("frame_skip",settings["frame_skip"]) self.ale.setInt("random_seed",settings["seed_simulator"]) self.ale.loadROM(settings["rom_dir"] + '/' + settings["rom"]) self.title = "ALE Simulator: " + str(settings["rom"]) self.actions = self.ale.getLegalActionSet() self.n_actions = self.actions.size self.screen_dims = self.ale.getScreenDims() self.model_dims = settings['model_dims'] self.pad = settings['pad'] print("Original screen width/height: " + str(self.screen_dims[0]) + "/" + str(self.screen_dims[1])) print("Cropped screen width/height: " + str(self.model_dims[0]) + "/" + str(self.model_dims[1])) self.viz_cropped = settings['viz_cropped'] if self.viz_cropped: self.display_dims = (int(self.model_dims[0]*2), int(self.model_dims[1]*2)) else: self.display_dims = (int(self.screen_dims[0]*2), int(self.screen_dims[1]*2)) # preallocate an array to accept ALE screen data (height/width) ! self.screen_data = np.empty((self.screen_dims[1],self.screen_dims[0]),dtype=np.uint8)
Example #15
Source File: main.py From async-deep-rl with Apache License 2.0 | 5 votes |
def get_num_actions(rom_path, rom_name): from ale_python_interface import ALEInterface filename = rom_path + "/" + rom_name + ".bin" ale = ALEInterface() ale.loadROM(filename) return len(ale.getMinimalActionSet())
Example #16
Source File: emulator.py From tensorflow-rl with Apache License 2.0 | 4 votes |
def __init__(self, rom_path, rom_name, visualize, actor_id, rseed, single_life_episodes = False): self.ale = ALEInterface() self.ale.setInt("random_seed", rseed * (actor_id +1)) # For fuller control on explicit action repeat (>= ALE 0.5.0) self.ale.setFloat("repeat_action_probability", 0.0) # Disable frame_skip and color_averaging # See: http://is.gd/tYzVpj self.ale.setInt("frame_skip", 1) self.ale.setBool("color_averaging", False) self.ale.loadROM(rom_path + "/" + rom_name + ".bin") self.legal_actions = self.ale.getMinimalActionSet() self.screen_width,self.screen_height = self.ale.getScreenDims() #self.ale.setBool('display_screen', True) # Processed historcal frames that will be fed in to the network # (i.e., four 84x84 images) self.screen_images_processed = np.zeros((IMG_SIZE_X, IMG_SIZE_Y, NR_IMAGES)) self.rgb_screen = np.zeros((self.screen_height,self.screen_width, 3), dtype=np.uint8) self.gray_screen = np.zeros((self.screen_height,self.screen_width,1), dtype=np.uint8) self.frame_pool = np.empty((2, self.screen_height, self.screen_width)) self.current = 0 self.lives = self.ale.lives() self.visualize = visualize self.visualize_processed = False self.windowname = rom_name + ' ' + str(actor_id) if self.visualize: logger.debug("Opening emulator window...") #from skimage import io #io.use_plugin('qt') cv2.startWindowThread() cv2.namedWindow(self.windowname) logger.debug("Emulator window opened") if self.visualize_processed: logger.debug("Opening processed frame window...") cv2.startWindowThread() logger.debug("Processed frame window opened") cv2.namedWindow(self.windowname + "_processed") self.single_life_episodes = single_life_episodes
Example #17
Source File: emulator.py From async-deep-rl with Apache License 2.0 | 4 votes |
def __init__(self, rom_path, rom_name, visualize, actor_id, rseed, single_life_episodes = False): self.ale = ALEInterface() self.ale.setInt("random_seed", rseed * (actor_id +1)) # For fuller control on explicit action repeat (>= ALE 0.5.0) self.ale.setFloat("repeat_action_probability", 0.0) # Disable frame_skip and color_averaging # See: http://is.gd/tYzVpj self.ale.setInt("frame_skip", 1) self.ale.setBool("color_averaging", False) self.ale.loadROM(rom_path + "/" + rom_name + ".bin") self.legal_actions = self.ale.getMinimalActionSet() self.screen_width,self.screen_height = self.ale.getScreenDims() #self.ale.setBool('display_screen', True) # Processed historcal frames that will be fed in to the network # (i.e., four 84x84 images) self.screen_images_processed = np.zeros((IMG_SIZE_X, IMG_SIZE_Y, NR_IMAGES)) self.rgb_screen = np.zeros((self.screen_height,self.screen_width, 3), dtype=np.uint8) self.gray_screen = np.zeros((self.screen_height,self.screen_width,1), dtype=np.uint8) self.frame_pool = np.empty((2, self.screen_height, self.screen_width)) self.current = 0 self.lives = self.ale.lives() self.visualize = visualize self.visualize_processed = False self.windowname = rom_name + ' ' + str(actor_id) if self.visualize: logger.debug("Opening emulator window...") #from skimage import io #io.use_plugin('qt') cv2.startWindowThread() cv2.namedWindow(self.windowname) logger.debug("Emulator window opened") if self.visualize_processed: logger.debug("Opening processed frame window...") cv2.startWindowThread() logger.debug("Processed frame window opened") cv2.namedWindow(self.windowname + "_processed") self.single_life_episodes = single_life_episodes
Example #18
Source File: pacman.py From hpg with MIT License | 4 votes |
def __init__(self, rom_path, seed=123, frameskip=4, show_display=False, stack_num_states=4, concatenate_state_every=4): """ Parameters: Frameskip should be either a tuple (indicating a random range to choose from, with the top value exclude), or an int. It's aka action repeat. stack_num_states: Number of dimensions/channels to have. concatenate_state_every: After how many frames should one channel be appended to state. Number is in terms of absolute frames independent of frameskip """ self.stack_num_states = stack_num_states self.concatenate_state_every = concatenate_state_every self.game_path = rom_path if not os.path.exists(self.game_path): raise IOError('Path %s does not exist' % (self.game_path)) self.frameskip = frameskip try: self.ale = ALEInterface() except Exception as e: print("ALEInterface could not be loaded. ale_python_interface import failed") raise e # Set some default options self.ale.setInt(b'random_seed', seed) self.ale.setBool(b'sound', False) self.ale.setBool(b'display_screen', show_display) self.ale.setFloat(b'repeat_action_probability', 0.) # Load the rom self.ale.loadROM(self.game_path) (self.screen_width, self.screen_height) = self.ale.getScreenDims() # Holds the two closest frames to max. self.latest_frame_fifo = deque(maxlen=2) self.state_fifo = deque(maxlen=stack_num_states)
Example #19
Source File: ale.py From async-rl with MIT License | 4 votes |
def __init__(self, rom_filename, seed=None, use_sdl=False, n_last_screens=4, frame_skip=4, treat_life_lost_as_terminal=True, crop_or_scale='scale', max_start_nullops=30, record_screen_dir=None): self.n_last_screens = n_last_screens self.treat_life_lost_as_terminal = treat_life_lost_as_terminal self.crop_or_scale = crop_or_scale self.max_start_nullops = max_start_nullops ale = ALEInterface() if seed is not None: assert seed >= 0 and seed < 2 ** 16, \ "ALE's random seed must be represented by unsigned int" else: # Use numpy's random state seed = np.random.randint(0, 2 ** 16) ale.setInt(b'random_seed', seed) ale.setFloat(b'repeat_action_probability', 0.0) ale.setBool(b'color_averaging', False) if record_screen_dir is not None: ale.setString(b'record_screen_dir', str.encode(record_screen_dir)) self.frame_skip = frame_skip if use_sdl: if 'DISPLAY' not in os.environ: raise RuntimeError( 'Please set DISPLAY environment variable for use_sdl=True') # SDL settings below are from the ALE python example if sys.platform == 'darwin': import pygame pygame.init() ale.setBool(b'sound', False) # Sound doesn't work on OSX elif sys.platform.startswith('linux'): ale.setBool(b'sound', True) ale.setBool(b'display_screen', True) ale.loadROM(str.encode(rom_filename)) assert ale.getFrameNumber() == 0 self.ale = ale self.legal_actions = ale.getMinimalActionSet() self.initialize()