Python theano.compile.SharedVariable() Examples
The following are 23
code examples of theano.compile.SharedVariable().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.compile
, or try the search function
.
Example #1
Source File: sharedvar.py From DualLearning with MIT License | 6 votes |
def __init__(self, svobj): '''Constructor of the MVSharedVariable The constructor will create ArrayTableHandler and associate the shared variable with it. The initial value of ArrayTableHandler will be same as the value of SharedVariable. If different initial value is used in different processes, the average of them will be used as the initial value ''' assert(isinstance(svobj, SharedVariable)) self._svobj = svobj self._mv_array = mv.ArrayTableHandler(self._svobj.get_value().size, init_value=self._svobj.get_value().reshape((-1,))) mv.barrier() # add barrier to make sure the initial values have token effect # _last_mv_data restore a copy of value. It will be used for calculate # the update for multiverso when calling mv_sync self._last_mv_data = self._mv_array.get().reshape(self._svobj.get_value().shape) self._svobj.set_value(self._last_mv_data, borrow=False)
Example #2
Source File: sharedvar.py From attention-lvcsr with MIT License | 6 votes |
def sparse_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, format=None): """ SharedVariable Constructor for SparseType. writeme """ if not isinstance(value, scipy.sparse.spmatrix): raise TypeError("Expected a sparse matrix in the sparse shared variable constructor. Received: ", value.__class__) if format is None: format = value.format type = SparseType(format=format, dtype=value.dtype) if not borrow: value = copy.deepcopy(value) return SparseTensorSharedVariable(type=type, value=value, name=name, strict=strict, allow_downcast=allow_downcast)
Example #3
Source File: sharedvar.py From D-VAE with MIT License | 6 votes |
def sparse_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, format=None): """ SharedVariable Constructor for SparseType. writeme """ if not isinstance(value, scipy.sparse.spmatrix): raise TypeError("Expected a sparse matrix in the sparse shared variable constructor. Received: ", value.__class__) if format is None: format = value.format type = SparseType(format=format, dtype=value.dtype) if not borrow: value = copy.deepcopy(value) return SparseTensorSharedVariable(type=type, value=value, name=name, strict=strict, allow_downcast=allow_downcast)
Example #4
Source File: sharedvar.py From attention-lvcsr with MIT License | 5 votes |
def tensor_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None, target='cpu'): """ SharedVariable Constructor for TensorType. Notes ----- Regarding the inference of the broadcastable pattern... The default is to assume that the value might be resized in any dimension, so the default broadcastable is ``(False,)*len(value.shape)``. The optional `broadcastable` argument will override this default. """ if target != 'cpu': raise TypeError('not for cpu') if not isinstance(value, numpy.ndarray): raise TypeError() # if no broadcastable is given, then the default is to assume that # the value might be resized in any dimension in the future. # if broadcastable is None: broadcastable = (False,) * len(value.shape) type = TensorType(value.dtype, broadcastable=broadcastable) return TensorSharedVariable(type=type, value=numpy.array(value, copy=(not borrow)), name=name, strict=strict, allow_downcast=allow_downcast) # TensorSharedVariable brings in the tensor operators, is not ideal, but works # as long as we dont do purely scalar-scalar operations # _tensor_py_operators is first to have its version of __{gt,ge,lt,le}__ # # N.B. THERE IS ANOTHER CLASS CALLED ScalarSharedVariable in the # theano.scalar.sharedvar file. It is not registered as a shared_constructor, # this one is.
Example #5
Source File: sharedvar.py From DualLearning with MIT License | 5 votes |
def mv_shared(*args, **kwargs): '''mv_shared works same as `theano.shared` It calls `theano.shared` to create the SharedVariable and use MVSharedVariable to wrap it. ''' var = shared(*args, **kwargs) mv_shared.shared_vars.append(MVSharedVariable(var)) return var
Example #6
Source File: sharedvar.py From DualLearning with MIT License | 5 votes |
def __getattribute__(self, attr): '''This function make MVSharedVariable act same as SharedVariable''' if attr in ['_svobj', '_mv_array', '_last_mv_data']: # If get the attribute of self, use parent __getattribute__ to get # attribute from the object, otherwise it will fall into infinite # loop return object.__getattribute__(self, attr) elif attr in ['mv_sync', "__getstate__"]: # If call method of MVSharedVariable, then call the method directly # and bound the method to self object return getattr(MVSharedVariable, attr).__get__(self) else: # Otherwise I will get attribute from the wrapped object return getattr(self._svobj, attr)
Example #7
Source File: sharedvar.py From DualLearning with MIT License | 5 votes |
def mv_sync(self): ''' sync values with multiverso server mv_sync will add the delta of SharedVariable, which is usually the gradients in typical examples, to parameter server and then get the latest value in multiverso. ''' # because multiverso always use add method to sync value, the delta # will be the difference of the current value of last synced value self._mv_array.add(self._svobj.get_value() - self._last_mv_data) self._svobj.set_value(self._mv_array.get().reshape(self._svobj.get_value().shape)) self._last_mv_data = self._svobj.get_value(borrow=False)
Example #8
Source File: sharedvar.py From attention-lvcsr with MIT License | 5 votes |
def shared(value, name=None, strict=False, allow_downcast=None): """ SharedVariable constructor for scalar values. Default: int64 or float64. Notes ----- We implement this using 0-d tensors for now. """ if not isinstance(value, (numpy.number, float, int, complex)): raise TypeError() try: dtype = value.dtype except AttributeError: dtype = numpy.asarray(value).dtype dtype = str(dtype) value = getattr(numpy, dtype)(value) scalar_type = Scalar(dtype=dtype) rval = ScalarSharedVariable( type=scalar_type, value=value, name=name, strict=strict, allow_downcast=allow_downcast) return rval
Example #9
Source File: type.py From attention-lvcsr with MIT License | 5 votes |
def gpuarray_shared_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None, target=None): """ SharedVariable constructor for GpuArrayType. See :func:`theano.shared`. """ if target == 'gpu' or target == 'cpu': raise TypeError('not for me') if not isinstance(value, (numpy.ndarray, pygpu.gpuarray.GpuArray)): raise TypeError('ndarray or GpuArray required') try: get_context(target) except ContextNotDefined: # Don't make this a hard error if we attempt to make a shared # variable while there is no default context. if target is None: raise TypeError('No default context and no context specified') raise if broadcastable is None: broadcastable = (False,) * value.ndim type = GpuArrayType(value.dtype, broadcastable, context_name=target) deviceval = pygpu.gpuarray.array(value, copy=(not borrow), context=type.context) return GpuArraySharedVariable(type=type, value=deviceval, name=name, strict=strict)
Example #10
Source File: var.py From attention-lvcsr with MIT License | 5 votes |
def cuda_shared_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None, target='gpu'): """ SharedVariable Constructor for CudaNdarrayType. """ if target != 'gpu': raise TypeError('not for gpu') # THIS CONSTRUCTOR TRIES TO CAST VALUE TO A FLOAT32, WHICH THEN GOES ONTO THE CARD # SO INT shared vars, float64 shared vars, etc. all end up on the card. # THIS IS NOT THE DEFAULT BEHAVIOUR THAT WE WANT. # SEE float32_shared_constructor # TODO: what should strict mean in this context, since we always have to make a copy? if strict: _value = value else: _value = theano._asarray(value, dtype='float32') if not isinstance(_value, numpy.ndarray): raise TypeError('ndarray required') if _value.dtype.num != CudaNdarrayType.typenum: raise TypeError('float32 ndarray required') if broadcastable is None: broadcastable = (False,) * len(value.shape) type = CudaNdarrayType(broadcastable=broadcastable) print("trying to return?") try: rval = CudaNdarraySharedVariable(type=type, value=_value, name=name, strict=strict) except Exception as e: print("ERROR", e) raise return rval
Example #11
Source File: var.py From attention-lvcsr with MIT License | 5 votes |
def get_value(self, borrow=False, return_internal_type=False): """ Return the value of this SharedVariable's internal array. Parameters ---------- borrow Permit the return of internal storage, when used in conjunction with ``return_internal_type=True``. return_internal_type True to return the internal ``cuda_ndarray`` instance rather than a ``numpy.ndarray`` (Default False). By default ``get_value()`` copies from the GPU to a ``numpy.ndarray`` and returns that host-allocated array. ``get_value(False,True)`` will return a GPU-allocated copy of the original GPU array. ``get_value(True,True)`` will return the original GPU-allocated array without any copying. """ if return_internal_type or not self.get_value_return_ndarray: # return a cuda_ndarray if borrow: return self.container.value else: return copy.deepcopy(self.container.value) else: # return an ndarray return numpy.asarray(self.container.value)
Example #12
Source File: builders.py From attention-lvcsr with MIT License | 5 votes |
def __init__(self, inputs, outputs, **kwargs): if not isinstance(outputs, list): raise TypeError('outputs must be list', outputs) for i in inputs + outputs: if not isinstance(i, gof.Variable): raise TypeError( 'inputs and outputs must be Variable instances', i) if 'updates' in kwargs or 'givens' in kwargs: raise TypeError('updates and givens are not allowed in kwargs') # To support correctly shared variables the inner fct should # not see them. Otherwise their is problem with the gradient. self.shared_inputs = [var for var in gof.graph.inputs(outputs) if isinstance(var, SharedVariable)] shared_vars = [var.type() for var in self.shared_inputs] new = rebuild_collect_shared(outputs, inputs=inputs + shared_vars, replace=dict(izip(self.shared_inputs, shared_vars)), copy_inputs_over=False) (new_inputs, new_outputs, [clone_d, update_d, update_expr, shared_inputs]) = new assert len(new_inputs) == len(inputs) + len(self.shared_inputs) assert len(new_outputs) == len(outputs) assert not update_d assert not update_expr assert not shared_inputs self.new_inputs = new_inputs self.new_outputs = new_outputs self.inputs = inputs self.outputs = outputs self.kwargs = kwargs self.input_types = [input.type for input in inputs] self.output_types = [output.type for output in outputs]
Example #13
Source File: sharedvar.py From attention-lvcsr with MIT License | 5 votes |
def scalar_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, target='cpu'): """ SharedVariable constructor for scalar values. Default: int64 or float64. Notes ----- We implement this using 0-d tensors for now. We ignore the borrow parameter as we convert ``value`` to an ndarray (this is a new object). This respects the semantic of borrow, as it is a hint to Theano that we can reuse it. """ if target != 'cpu': raise TypeError('not for cpu') if not isinstance(value, (numpy.number, float, int, complex)): raise TypeError() try: dtype = value.dtype except Exception: dtype = numpy.asarray(value).dtype dtype = str(dtype) value = theano._asarray(value, dtype=dtype) tensor_type = TensorType(dtype=str(value.dtype), broadcastable=[]) try: # Do not pass the dtype to asarray because we want this to fail if # strict is True and the types do not match. rval = ScalarSharedVariable(type=tensor_type, value=numpy.array(value, copy=True), name=name, strict=strict, allow_downcast=allow_downcast) return rval except Exception: traceback.print_exc() raise
Example #14
Source File: input.py From gelato with MIT License | 5 votes |
def __init__(self, shape, input_var=None, name=None, testval=None, **kwargs): _InputLayer.__init__(self, shape, input_var=input_var, name=name, **kwargs) if testval is not None: self.input_var.tag.test_value = testval if (not isinstance(self.input_var, SharedVariable) and not hasattr(self.input_var.tag, 'test_value')): shape = [s if s is not None else 2 for s in self.shape] dtype = self.input_var.dtype self.input_var.tag.test_value = np.random.uniform(size=shape).astype(dtype)
Example #15
Source File: sharedvar.py From D-VAE with MIT License | 5 votes |
def shared(value, name=None, strict=False, allow_downcast=None): """ SharedVariable constructor for scalar values. Default: int64 or float64. Notes ----- We implement this using 0-d tensors for now. """ if not isinstance(value, (numpy.number, float, integer_types, complex)): raise TypeError() try: dtype = value.dtype except AttributeError: dtype = numpy.asarray(value).dtype dtype = str(dtype) value = getattr(numpy, dtype)(value) scalar_type = Scalar(dtype=dtype) rval = ScalarSharedVariable( type=scalar_type, value=value, name=name, strict=strict, allow_downcast=allow_downcast) return rval
Example #16
Source File: type.py From D-VAE with MIT License | 5 votes |
def gpuarray_shared_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None, target=None): """ SharedVariable constructor for GpuArrayType. See :func:`theano.shared`. """ if target == 'gpu' or target == 'cpu': raise TypeError('not for me') if not isinstance(value, (numpy.ndarray, pygpu.gpuarray.GpuArray)): raise TypeError('ndarray or GpuArray required') try: get_context(target) except ContextNotDefined: # Don't make this a hard error if we attempt to make a shared # variable while there is no default context. if target is None: raise TypeError('No default context and no context specified') raise if broadcastable is None: broadcastable = (False,) * value.ndim type = GpuArrayType(value.dtype, broadcastable, context_name=target) deviceval = pygpu.gpuarray.array(value, copy=(not borrow), context=type.context) return GpuArraySharedVariable(type=type, value=deviceval, name=name, strict=strict)
Example #17
Source File: var.py From D-VAE with MIT License | 5 votes |
def cuda_shared_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None, target='gpu'): """ SharedVariable Constructor for CudaNdarrayType. """ if target != 'gpu': raise TypeError('not for gpu') # THIS CONSTRUCTOR TRIES TO CAST VALUE TO A FLOAT32, WHICH THEN GOES ONTO THE CARD # SO INT shared vars, float64 shared vars, etc. all end up on the card. # THIS IS NOT THE DEFAULT BEHAVIOUR THAT WE WANT. # SEE float32_shared_constructor # TODO: what should strict mean in this context, since we always have to make a copy? if strict: _value = value else: _value = theano._asarray(value, dtype='float32') if not isinstance(_value, numpy.ndarray): raise TypeError('ndarray required') if _value.dtype.num != CudaNdarrayType.typenum: raise TypeError('float32 ndarray required') if broadcastable is None: broadcastable = (False,) * len(value.shape) type = CudaNdarrayType(broadcastable=broadcastable) print("trying to return?") try: rval = CudaNdarraySharedVariable(type=type, value=_value, name=name, strict=strict) except Exception as e: print("ERROR", e) raise return rval
Example #18
Source File: var.py From D-VAE with MIT License | 5 votes |
def get_value(self, borrow=False, return_internal_type=False): """ Return the value of this SharedVariable's internal array. Parameters ---------- borrow Permit the return of internal storage, when used in conjunction with ``return_internal_type=True``. return_internal_type True to return the internal ``cuda_ndarray`` instance rather than a ``numpy.ndarray`` (Default False). By default ``get_value()`` copies from the GPU to a ``numpy.ndarray`` and returns that host-allocated array. ``get_value(False,True)`` will return a GPU-allocated copy of the original GPU array. ``get_value(True,True)`` will return the original GPU-allocated array without any copying. """ if return_internal_type or not self.get_value_return_ndarray: # return a cuda_ndarray if borrow: return self.container.value else: return copy.deepcopy(self.container.value) else: # return an ndarray return numpy.asarray(self.container.value)
Example #19
Source File: builders.py From D-VAE with MIT License | 5 votes |
def __init__(self, inputs, outputs, **kwargs): if not isinstance(outputs, list): raise TypeError('outputs must be list', outputs) for i in inputs + outputs: if not isinstance(i, gof.Variable): raise TypeError( 'inputs and outputs must be Variable instances', i) if 'updates' in kwargs or 'givens' in kwargs: raise TypeError('updates and givens are not allowed in kwargs') # To support correctly shared variables the inner fct should # not see them. Otherwise their is problem with the gradient. self.shared_inputs = [var for var in gof.graph.inputs(outputs) if isinstance(var, SharedVariable)] shared_vars = [var.type() for var in self.shared_inputs] new = rebuild_collect_shared(outputs, inputs=inputs + shared_vars, replace=dict(izip(self.shared_inputs, shared_vars)), copy_inputs_over=False) (new_inputs, new_outputs, [clone_d, update_d, update_expr, shared_inputs]) = new assert len(new_inputs) == len(inputs) + len(self.shared_inputs) assert len(new_outputs) == len(outputs) assert not update_d assert not update_expr assert not shared_inputs self.new_inputs = new_inputs self.new_outputs = new_outputs self.inputs = inputs self.outputs = outputs self.kwargs = kwargs self.input_types = [input.type for input in inputs] self.output_types = [output.type for output in outputs]
Example #20
Source File: sharedvar.py From D-VAE with MIT License | 5 votes |
def scalar_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, target='cpu'): """ SharedVariable constructor for scalar values. Default: int64 or float64. Notes ----- We implement this using 0-d tensors for now. We ignore the borrow parameter as we convert ``value`` to an ndarray (this is a new object). This respects the semantic of borrow, as it is a hint to Theano that we can reuse it. """ if target != 'cpu': raise TypeError('not for cpu') if not isinstance(value, (numpy.number, float, integer_types, complex)): raise TypeError() try: dtype = value.dtype except Exception: dtype = numpy.asarray(value).dtype dtype = str(dtype) value = theano._asarray(value, dtype=dtype) tensor_type = TensorType(dtype=str(value.dtype), broadcastable=[]) try: # Do not pass the dtype to asarray because we want this to fail if # strict is True and the types do not match. rval = ScalarSharedVariable(type=tensor_type, value=numpy.array(value, copy=True), name=name, strict=strict, allow_downcast=allow_downcast) return rval except Exception: traceback.print_exc() raise
Example #21
Source File: sharedvar.py From D-VAE with MIT License | 5 votes |
def tensor_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None, target='cpu'): """ SharedVariable Constructor for TensorType. Notes ----- Regarding the inference of the broadcastable pattern... The default is to assume that the value might be resized in any dimension, so the default broadcastable is ``(False,)*len(value.shape)``. The optional `broadcastable` argument will override this default. """ if target != 'cpu': raise TypeError('not for cpu') if not isinstance(value, numpy.ndarray): raise TypeError() # if no broadcastable is given, then the default is to assume that # the value might be resized in any dimension in the future. # if broadcastable is None: broadcastable = (False,) * len(value.shape) type = TensorType(value.dtype, broadcastable=broadcastable) return TensorSharedVariable(type=type, value=numpy.array(value, copy=(not borrow)), name=name, strict=strict, allow_downcast=allow_downcast) # TensorSharedVariable brings in the tensor operators, is not ideal, but works # as long as we dont do purely scalar-scalar operations # _tensor_py_operators is first to have its version of __{gt,ge,lt,le}__ # # N.B. THERE IS ANOTHER CLASS CALLED ScalarSharedVariable in the # theano.scalar.sharedvar file. It is not registered as a shared_constructor, # this one is.
Example #22
Source File: var.py From attention-lvcsr with MIT License | 4 votes |
def float32_shared_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None, target='gpu'): """ SharedVariable Constructor for CudaNdarrayType from numpy.ndarray or CudaNdarray. """ if target != 'gpu': raise TypeError('not for gpu') if theano.sandbox.cuda.use.device_number is None: theano.sandbox.cuda.use("gpu", force=True, default_to_move_computation_to_gpu=False, move_shared_float32_to_gpu=False, enable_cuda=False) # if value isn't a float32 ndarray, or a CudaNdarray then raise if not isinstance(value, (numpy.ndarray, theano.sandbox.cuda.CudaNdarray)): raise TypeError('ndarray or CudaNdarray required') if isinstance(value, numpy.ndarray) and value.dtype.num != CudaNdarrayType.typenum: raise TypeError('float32 ndarray required') if broadcastable is None: broadcastable = (False,) * len(value.shape) type = CudaNdarrayType(broadcastable=broadcastable) get_value_return_ndarray = True if isinstance(value, theano.sandbox.cuda.CudaNdarray): get_value_return_ndarray = False if borrow: deviceval = value else: deviceval = value.copy() else: # type.broadcastable is guaranteed to be a tuple, which this next # function requires deviceval = type_support_filter(value, type.broadcastable, False, None) try: rval = CudaNdarraySharedVariable(type=type, value=deviceval, name=name, strict=strict) except Exception as e: print("ERROR", e) raise rval.get_value_return_ndarray = get_value_return_ndarray return rval
Example #23
Source File: var.py From D-VAE with MIT License | 4 votes |
def float32_shared_constructor(value, name=None, strict=False, allow_downcast=None, borrow=False, broadcastable=None, target='gpu'): """ SharedVariable Constructor for CudaNdarrayType from numpy.ndarray or CudaNdarray. """ if target != 'gpu': raise TypeError('not for gpu') if theano.sandbox.cuda.use.device_number is None: theano.sandbox.cuda.use("gpu", force=True, default_to_move_computation_to_gpu=False, move_shared_float32_to_gpu=False, enable_cuda=False) # if value isn't a float32 ndarray, or a CudaNdarray then raise if not isinstance(value, (numpy.ndarray, theano.sandbox.cuda.CudaNdarray)): raise TypeError('ndarray or CudaNdarray required') if isinstance(value, numpy.ndarray) and value.dtype.num != CudaNdarrayType.typenum: raise TypeError('float32 ndarray required') if broadcastable is None: broadcastable = (False,) * len(value.shape) type = CudaNdarrayType(broadcastable=broadcastable) get_value_return_ndarray = True if isinstance(value, theano.sandbox.cuda.CudaNdarray): get_value_return_ndarray = False if borrow: deviceval = value else: deviceval = value.copy() else: # type.broadcastable is guaranteed to be a tuple, which this next # function requires deviceval = type_support_filter(value, type.broadcastable, False, None) try: rval = CudaNdarraySharedVariable(type=type, value=deviceval, name=name, strict=strict) except Exception as e: print("ERROR", e) raise rval.get_value_return_ndarray = get_value_return_ndarray return rval