Python theano.gof.local_optimizer() Examples
The following are 12
code examples of theano.gof.local_optimizer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.gof
, or try the search function
.
Example #1
Source File: test_debugmode.py From D-VAE with MIT License | 6 votes |
def test_badoptimization(): @gof.local_optimizer([theano.tensor.add]) def insert_broken_add(node): if node.op == theano.tensor.add: return [off_by_half(*node.inputs)] return False edb = gof.EquilibriumDB() edb.register('insert_broken_add', insert_broken_add, 'all') opt = edb.query('+all') a = theano.tensor.dvector() b = theano.tensor.dvector() f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt)) try: f([1.0, 2.0, 3.0], [2, 3, 4],) except debugmode.BadOptimization as e: assert str(e.reason) == 'insert_broken_add' return # TEST PASS assert False
Example #2
Source File: test_debugmode.py From attention-lvcsr with MIT License | 6 votes |
def test_badoptimization(): @gof.local_optimizer([theano.tensor.add]) def insert_broken_add(node): if node.op == theano.tensor.add: return [off_by_half(*node.inputs)] return False edb = gof.EquilibriumDB() edb.register('insert_broken_add', insert_broken_add, 'all') opt = edb.query('+all') a = theano.tensor.dvector() b = theano.tensor.dvector() f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt)) try: f([1.0, 2.0, 3.0], [2, 3, 4],) except debugmode.BadOptimization as e: assert str(e.reason) == 'insert_broken_add' return # TEST PASS assert False
Example #3
Source File: test_debugmode.py From D-VAE with MIT License | 5 votes |
def test_badoptimization_opt_err(): """This variant of test_badoptimization() replace the working code with a new apply node that will raise an error. """ @gof.local_optimizer([theano.tensor.add]) def insert_bigger_b_add(node): if node.op == theano.tensor.add: inputs = list(node.inputs) if inputs[-1].owner is None: inputs[-1] = theano.tensor.concatenate((inputs[-1], inputs[-1])) return [node.op(*inputs)] return False edb = gof.EquilibriumDB() edb.register('insert_bigger_b_add', insert_bigger_b_add, 'all') opt = edb.query('+all') a = theano.tensor.dvector() b = theano.tensor.dvector() f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt)) try: f([1.0, 2.0, 3.0], [2, 3, 4],) except Exception as e: assert 'insert_bigger_b_add' in exc_message(e) return # TEST PASS assert False
Example #4
Source File: test_debugmode.py From D-VAE with MIT License | 5 votes |
def test_stochasticoptimization(): # this optimization alternates between triggering and not triggering. last_time_replaced = [False] @gof.local_optimizer([theano.tensor.add]) def insert_broken_add_sometimes(node): if node.op == theano.tensor.add: last_time_replaced[0] = not last_time_replaced[0] if last_time_replaced[0]: return [off_by_half(*node.inputs)] return False edb = gof.EquilibriumDB() edb.register( 'insert_broken_add_sometimes', insert_broken_add_sometimes, 'all') opt = edb.query('+all') a = theano.tensor.dvector() b = theano.tensor.dvector() try: theano.function([a, b], theano.tensor.add(a, b), mode=debugmode.DebugMode( optimizer=opt, check_c_code=True, stability_patience=max(2, config.DebugMode.patience))) except debugmode.StochasticOrder: return # TEST PASS assert False
Example #5
Source File: opt_util.py From D-VAE with MIT License | 5 votes |
def alpha_merge(cls, alpha_in, beta_in): def wrapper(maker): @local_optimizer([GpuElemwise]) @wraps(maker) def opt(node): if (isinstance(node.op, GpuElemwise) and node.op.scalar_op == scal.mul and node.nin == 2): targ = find_node(node.inputs[0], cls) if targ is None: targ = find_node(node.inputs[1], cls) if targ is None: return lr = grab_cpu_scalar(node.inputs[0], nd=targ.outputs[0].ndim) else: lr = grab_cpu_scalar(node.inputs[1], nd=targ.outputs[0].ndim) if lr is None or targ is None: return None inputs = list(targ.inputs) try: c = get_scalar_constant_value(lr) if c == 0: inputs[alpha_in] = lr inputs[beta_in] = lr elif c == 1: inputs[alpha_in] = targ.inputs[alpha_in] inputs[beta_in] = targ.inputs[beta_in] else: inputs[alpha_in] = lr * targ.inputs[alpha_in] inputs[beta_in] = lr * targ.inputs[beta_in] except NotScalarConstantError: inputs[alpha_in] = lr * targ.inputs[alpha_in] inputs[beta_in] = lr * targ.inputs[beta_in] return maker(targ, *inputs) return opt return wrapper
Example #6
Source File: opt_util.py From D-VAE with MIT License | 5 votes |
def output_merge(cls, alpha_in, beta_in, out_in): def wrapper(maker): @local_optimizer([GpuElemwise]) @wraps(maker) def opt(node): if (isinstance(node.op, GpuElemwise) and node.op.scalar_op == scal.add and node.nin == 2): targ = find_node(node.inputs[0], cls) W = node.inputs[1] if targ is None: targ = find_node(node.inputs[1], cls) W = node.inputs[0] if targ is None: return None if not is_equal(targ.inputs[beta_in], 0.0): # other cases are too complex for now return None if W.broadcastable != targ.inputs[out_in].broadcastable: # May change later to do the broadcast, but it's # under discussion. return None inputs = list(targ.inputs) inputs[out_in] = W inputs[beta_in] = _one.clone() return maker(targ, *inputs) return opt return wrapper
Example #7
Source File: test_debugmode.py From attention-lvcsr with MIT License | 5 votes |
def test_badoptimization_opt_err(): """This variant of test_badoptimization() replace the working code with a new apply node that will raise an error. """ @gof.local_optimizer([theano.tensor.add]) def insert_bigger_b_add(node): if node.op == theano.tensor.add: inputs = list(node.inputs) if inputs[-1].owner is None: inputs[-1] = theano.tensor.concatenate((inputs[-1], inputs[-1])) return [node.op(*inputs)] return False edb = gof.EquilibriumDB() edb.register('insert_bigger_b_add', insert_bigger_b_add, 'all') opt = edb.query('+all') a = theano.tensor.dvector() b = theano.tensor.dvector() f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt)) try: f([1.0, 2.0, 3.0], [2, 3, 4],) except Exception as e: assert 'insert_bigger_b_add' in exc_message(e) return # TEST PASS assert False
Example #8
Source File: test_debugmode.py From attention-lvcsr with MIT License | 5 votes |
def test_stochasticoptimization(): # this optimization alternates between triggering and not triggering. last_time_replaced = [False] @gof.local_optimizer([theano.tensor.add]) def insert_broken_add_sometimes(node): if node.op == theano.tensor.add: last_time_replaced[0] = not last_time_replaced[0] if last_time_replaced[0]: return [off_by_half(*node.inputs)] return False edb = gof.EquilibriumDB() edb.register( 'insert_broken_add_sometimes', insert_broken_add_sometimes, 'all') opt = edb.query('+all') a = theano.tensor.dvector() b = theano.tensor.dvector() try: theano.function([a, b], theano.tensor.add(a, b), mode=debugmode.DebugMode( optimizer=opt, check_c_code=True, stability_patience=max(2, config.DebugMode.patience))) except debugmode.StochasticOrder: return # TEST PASS assert False
Example #9
Source File: opt_util.py From attention-lvcsr with MIT License | 5 votes |
def alpha_merge(cls, alpha_in, beta_in): def wrapper(maker): @local_optimizer([GpuElemwise]) @wraps(maker) def opt(node): if (isinstance(node.op, GpuElemwise) and node.op.scalar_op == scal.mul and node.nin == 2): targ = find_node(node.inputs[0], cls) if targ is None: targ = find_node(node.inputs[1], cls) if targ is None: return lr = grab_cpu_scalar(node.inputs[0], nd=targ.outputs[0].ndim) else: lr = grab_cpu_scalar(node.inputs[1], nd=targ.outputs[0].ndim) if lr is None or targ is None: return None inputs = list(targ.inputs) try: c = get_scalar_constant_value(lr) if c == 0: inputs[alpha_in] = lr inputs[beta_in] = lr elif c == 1: inputs[alpha_in] = targ.inputs[alpha_in] inputs[beta_in] = targ.inputs[beta_in] else: inputs[alpha_in] = lr * targ.inputs[alpha_in] inputs[beta_in] = lr * targ.inputs[beta_in] except NotScalarConstantError: inputs[alpha_in] = lr * targ.inputs[alpha_in] inputs[beta_in] = lr * targ.inputs[beta_in] return maker(targ, *inputs) return opt return wrapper
Example #10
Source File: opt_util.py From attention-lvcsr with MIT License | 5 votes |
def output_merge(cls, alpha_in, beta_in, out_in): def wrapper(maker): @local_optimizer([GpuElemwise]) @wraps(maker) def opt(node): if (isinstance(node.op, GpuElemwise) and node.op.scalar_op == scal.add and node.nin == 2): targ = find_node(node.inputs[0], cls) W = node.inputs[1] if targ is None: targ = find_node(node.inputs[1], cls) W = node.inputs[0] if targ is None: return None if not is_equal(targ.inputs[beta_in], 0.0): # other cases are too complex for now return None if W.broadcastable != targ.inputs[out_in].broadcastable: # May change later to do the broadcast, but it's # under discussion. return None inputs = list(targ.inputs) inputs[out_in] = W inputs[beta_in] = _one.clone() return maker(targ, *inputs) return opt return wrapper
Example #11
Source File: opt.py From D-VAE with MIT License | 4 votes |
def op_lifter(OP, cuda_only=False): """ OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...)) gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...) """ def f(maker): def local_opt(node): if type(node.op) in OP: # Either one of our inputs is on the gpu or # all of our clients are on the gpu replace = False # TODO: Maybe set context_name with infer_context_name()? context_name = None # We replace if any input is a host_from_gpu for i in node.inputs: if i.owner and i.owner.op == host_from_gpu: context_name = i.owner.inputs[0].type.context_name replace = True break if not replace: # We replace if *all* clients are on the GPU clients = [c for o in node.outputs for c in o.clients] replace = len(clients) != 0 for c, idx in clients: if (c == 'output' or not isinstance(c.op, GpuFromHost)): replace = False # TODO: check that the clients want the same context? if replace: # All clients are GpuFromHost and we have at least one context_name = clients[0][0].op.context_name # Check if we should replace if (not replace or (cuda_only and get_context(context_name).kind != 'cuda')): return False # tag the inputs with the context in case # the context was derived from the outputs for i in node.inputs: i.tag.context_name = context_name new_op = maker(node, context_name) # This is needed as sometimes new_op inherits from OP. if new_op and new_op != node.op: if isinstance(new_op, theano.Op): return [safe_to_cpu(o) for o in new_op(*node.inputs, return_list=True)] elif isinstance(new_op, (tuple, list)): return [safe_to_cpu(o) for o in new_op] else: # suppose it is a variable on the GPU return [host_from_gpu(new_op)] return False local_opt.__name__ = maker.__name__ return local_optimizer(OP)(local_opt) return f
Example #12
Source File: opt.py From attention-lvcsr with MIT License | 4 votes |
def op_lifter(OP, cuda_only=False): """ OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...)) gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...) """ def f(maker): def local_opt(node): if type(node.op) in OP: # Either one of our inputs is on the gpu or # all of our clients are on the gpu replace = False # TODO: Maybe set context_name with infer_context_name()? context_name = None # We replace if any input is a host_from_gpu for i in node.inputs: if i.owner and i.owner.op == host_from_gpu: context_name = i.owner.inputs[0].type.context_name replace = True break if not replace: # We replace if *all* clients are on the GPU clients = [c for o in node.outputs for c in o.clients] replace = len(clients) != 0 for c, idx in clients: if (c == 'output' or not isinstance(c.op, GpuFromHost)): replace = False # TODO: check that the clients want the same context? if replace: # All clients are GpuFromHost and we have at least one context_name = clients[0][0].op.context_name # Check if we should replace if (not replace or (cuda_only and get_context(context_name).kind != 'cuda')): return False # tag the inputs with the context in case # the context was derived from the outputs for i in node.inputs: i.tag.context_name = context_name new_op = maker(node, context_name) # This is needed as sometimes new_op inherits from OP. if new_op and new_op != node.op: if isinstance(new_op, theano.Op): return [safe_to_cpu(o) for o in new_op(*node.inputs, return_list=True)] elif isinstance(new_op, (tuple, list)): return [safe_to_cpu(o) for o in new_op] else: # suppose it is a variable on the GPU return [host_from_gpu(new_op)] return False local_opt.__name__ = maker.__name__ return local_optimizer(OP)(local_opt) return f