Python mxnet.ndarray.NDArray() Examples
The following are 30
code examples of mxnet.ndarray.NDArray().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.ndarray
, or try the search function
.
Example #1
Source File: DataParallelExecutorGroup.py From Deep-Feature-Flow with MIT License | 6 votes |
def get_params(self, arg_params, aux_params): """ Copy data from each executor to `arg_params` and `aux_params`. Parameters ---------- arg_params : list of NDArray target parameter arrays aux_params : list of NDArray target aux arrays Notes ----- - This function will inplace update the NDArrays in arg_params and aux_params. """ for name, block in zip(self.param_names, self.param_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(arg_params[name].dtype).copyto(arg_params[name]) for name, block in zip(self.aux_names, self.aux_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(aux_params[name].dtype).copyto(aux_params[name])
Example #2
Source File: DataParallelExecutorGroup.py From MANet_for_Video_Object_Detection with Apache License 2.0 | 6 votes |
def backward(self, out_grads=None): """Run backward on all devices. A backward should be called after a call to the forward function. Backward cannot be called unless `self.for_training` is `True`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. """ assert self.for_training, 're-bind with for_training=True to run backward' if out_grads is None: out_grads = [] for i, exec_ in enumerate(self.execs): out_grads_slice = [] exec_.backward(out_grads=out_grads_slice)
Example #3
Source File: DataParallelExecutorGroup.py From MANet_for_Video_Object_Detection with Apache License 2.0 | 6 votes |
def get_states(self, merge_multi_context=True): """Get states from all devices Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the states will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ assert not merge_multi_context, \ "merge_multi_context=True is not supported for get_states yet." return self.state_arrays
Example #4
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_outputs(self, merge_multi_context=True): """Get outputs of the previous forward computation. Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ outputs = [[exec_.outputs[i] for exec_ in self.execs] for i in range(len(self.execs[0].outputs))] if merge_multi_context: outputs = _merge_multi_context(outputs, self.output_layouts) return outputs
Example #5
Source File: DataParallelExecutorGroup.py From MANet_for_Video_Object_Detection with Apache License 2.0 | 6 votes |
def get_params(self, arg_params, aux_params): """ Copy data from each executor to `arg_params` and `aux_params`. Parameters ---------- arg_params : list of NDArray target parameter arrays aux_params : list of NDArray target aux arrays Notes ----- - This function will inplace update the NDArrays in arg_params and aux_params. """ for name, block in zip(self.param_names, self.param_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(arg_params[name].dtype).copyto(arg_params[name]) for name, block in zip(self.aux_names, self.aux_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(aux_params[name].dtype).copyto(aux_params[name])
Example #6
Source File: MxnetA.py From nn_tools with MIT License | 6 votes |
def __init__(self, interval=1, pattern='.*', sort=False): def stat(x): return x.shape self.stat_func = stat self.interval = interval self.activated = False self.queue = [] self.step = 0 self.exes = [] self.re_prog = re.compile(pattern) self.sort = sort def stat_helper(name, array): array = ctypes.cast(array, NDArrayHandle) array = NDArray(array, writable=False) if not self.activated or not self.re_prog.match(py_str(name)): return self.queue.append((self.step, py_str(name), stat(array))) self.stat_helper = stat_helper
Example #7
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def backward(self, out_grads=None): """Run backward on all devices. A backward should be called after a call to the forward function. Backward cannot be called unless `self.for_training` is `True`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. """ assert self.for_training, 're-bind with for_training=True to run backward' if out_grads is None: out_grads = [] for i, exec_ in enumerate(self.execs): out_grads_slice = [] exec_.backward(out_grads=out_grads_slice)
Example #8
Source File: DataParallelExecutorGroup.py From MANet_for_Video_Object_Detection with Apache License 2.0 | 6 votes |
def get_outputs(self, merge_multi_context=True): """Get outputs of the previous forward computation. Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ outputs = [[exec_.outputs[i] for exec_ in self.execs] for i in range(len(self.execs[0].outputs))] if merge_multi_context: outputs = _merge_multi_context(outputs, self.output_layouts) return outputs
Example #9
Source File: DataParallelExecutorGroup.py From Deep-Feature-Flow with MIT License | 6 votes |
def get_outputs(self, merge_multi_context=True): """Get outputs of the previous forward computation. Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ outputs = [[exec_.outputs[i] for exec_ in self.execs] for i in range(len(self.execs[0].outputs))] if merge_multi_context: outputs = _merge_multi_context(outputs, self.output_layouts) return outputs
Example #10
Source File: DataParallelExecutorGroup.py From Deep-Feature-Flow with MIT License | 6 votes |
def get_states(self, merge_multi_context=True): """Get states from all devices Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the states will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ assert not merge_multi_context, \ "merge_multi_context=True is not supported for get_states yet." return self.state_arrays
Example #11
Source File: DataParallelExecutorGroup.py From Deep-Feature-Flow with MIT License | 6 votes |
def backward(self, out_grads=None): """Run backward on all devices. A backward should be called after a call to the forward function. Backward cannot be called unless `self.for_training` is `True`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. """ assert self.for_training, 're-bind with for_training=True to run backward' if out_grads is None: out_grads = [] for i, exec_ in enumerate(self.execs): out_grads_slice = [] exec_.backward(out_grads=out_grads_slice)
Example #12
Source File: DataParallelExecutorGroup.py From Deep-Feature-Flow with MIT License | 6 votes |
def get_params(self, arg_params, aux_params): """ Copy data from each executor to `arg_params` and `aux_params`. Parameters ---------- arg_params : list of NDArray target parameter arrays aux_params : list of NDArray target aux arrays Notes ----- - This function will inplace update the NDArrays in arg_params and aux_params. """ for name, block in zip(self.param_names, self.param_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(arg_params[name].dtype).copyto(arg_params[name]) for name, block in zip(self.aux_names, self.aux_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(aux_params[name].dtype).copyto(aux_params[name])
Example #13
Source File: DataParallelExecutorGroup.py From Deep-Feature-Flow with MIT License | 6 votes |
def get_states(self, merge_multi_context=True): """Get states from all devices Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the states will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ assert not merge_multi_context, \ "merge_multi_context=True is not supported for get_states yet." return self.state_arrays
Example #14
Source File: DataParallelExecutorGroup.py From Deep-Feature-Flow with MIT License | 6 votes |
def get_input_grads(self, merge_multi_context=True): """Get the gradients with respect to the inputs of the module. Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output elements are `NDArray`. """ assert self.inputs_need_grad if merge_multi_context: return _merge_multi_context(self.input_grad_arrays, self.data_layouts) return self.input_grad_arrays
Example #15
Source File: DataParallelExecutorGroup.py From Deep-Feature-Flow with MIT License | 6 votes |
def backward(self, out_grads=None): """Run backward on all devices. A backward should be called after a call to the forward function. Backward cannot be called unless `self.for_training` is `True`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. """ assert self.for_training, 're-bind with for_training=True to run backward' if out_grads is None: out_grads = [] for i, exec_ in enumerate(self.execs): out_grads_slice = [] exec_.backward(out_grads=out_grads_slice)
Example #16
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_input_grads(self, merge_multi_context=True): """Get the gradients with respect to the inputs of the module. Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output elements are `NDArray`. """ assert self.inputs_need_grad if merge_multi_context: return _merge_multi_context(self.input_grad_arrays, self.data_layouts) return self.input_grad_arrays
Example #17
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_states(self, merge_multi_context=True): """Get states from all devices Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the states will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ assert not merge_multi_context, \ "merge_multi_context=True is not supported for get_states yet." return self.state_arrays
Example #18
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_params(self, arg_params, aux_params): """ Copy data from each executor to `arg_params` and `aux_params`. Parameters ---------- arg_params : list of NDArray target parameter arrays aux_params : list of NDArray target aux arrays Notes ----- - This function will inplace update the NDArrays in arg_params and aux_params. """ for name, block in zip(self.param_names, self.param_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(arg_params[name].dtype).copyto(arg_params[name]) for name, block in zip(self.aux_names, self.aux_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(aux_params[name].dtype).copyto(aux_params[name])
Example #19
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def backward(self, out_grads=None): """Run backward on all devices. A backward should be called after a call to the forward function. Backward cannot be called unless `self.for_training` is `True`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. """ assert self.for_training, 're-bind with for_training=True to run backward' if out_grads is None: out_grads = [] for i, exec_ in enumerate(self.execs): out_grads_slice = [] exec_.backward(out_grads=out_grads_slice)
Example #20
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_states(self, merge_multi_context=True): """Get states from all devices Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the states will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ assert not merge_multi_context, \ "merge_multi_context=True is not supported for get_states yet." return self.state_arrays
Example #21
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_outputs(self, merge_multi_context=True): """Get outputs of the previous forward computation. Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ outputs = [[exec_.outputs[i] for exec_ in self.execs] for i in range(len(self.execs[0].outputs))] if merge_multi_context: outputs = _merge_multi_context(outputs, self.output_layouts) return outputs
Example #22
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_params(self, arg_params, aux_params): """ Copy data from each executor to `arg_params` and `aux_params`. Parameters ---------- arg_params : list of NDArray target parameter arrays aux_params : list of NDArray target aux arrays Notes ----- - This function will inplace update the NDArrays in arg_params and aux_params. """ for name, block in zip(self.param_names, self.param_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(arg_params[name].dtype).copyto(arg_params[name]) for name, block in zip(self.aux_names, self.aux_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(aux_params[name].dtype).copyto(aux_params[name])
Example #23
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def backward(self, out_grads=None): """Run backward on all devices. A backward should be called after a call to the forward function. Backward cannot be called unless `self.for_training` is `True`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. """ assert self.for_training, 're-bind with for_training=True to run backward' if out_grads is None: out_grads = [] for i, exec_ in enumerate(self.execs): out_grads_slice = [] exec_.backward(out_grads=out_grads_slice)
Example #24
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_input_grads(self, merge_multi_context=True): """Get the gradients with respect to the inputs of the module. Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output elements are `NDArray`. """ assert self.inputs_need_grad if merge_multi_context: return _merge_multi_context(self.input_grad_arrays, self.data_layouts) return self.input_grad_arrays
Example #25
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_states(self, merge_multi_context=True): """Get states from all devices Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the states will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ assert not merge_multi_context, \ "merge_multi_context=True is not supported for get_states yet." return self.state_arrays
Example #26
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_params(self, arg_params, aux_params): """ Copy data from each executor to `arg_params` and `aux_params`. Parameters ---------- arg_params : list of NDArray target parameter arrays aux_params : list of NDArray target aux arrays Notes ----- - This function will inplace update the NDArrays in arg_params and aux_params. """ for name, block in zip(self.param_names, self.param_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(arg_params[name].dtype).copyto(arg_params[name]) for name, block in zip(self.aux_names, self.aux_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(aux_params[name].dtype).copyto(aux_params[name])
Example #27
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def backward(self, out_grads=None): """Run backward on all devices. A backward should be called after a call to the forward function. Backward cannot be called unless `self.for_training` is `True`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. """ assert self.for_training, 're-bind with for_training=True to run backward' if out_grads is None: out_grads = [] # for i, (exec_, islice) in enumerate(zip(self.execs, self.slices)): for i, exec_ in enumerate(self.execs): out_grads_slice = [] exec_.backward(out_grads=out_grads_slice)
Example #28
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_input_grads(self, merge_multi_context=True): """Get the gradients with respect to the inputs of the module. Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output elements are `NDArray`. """ assert self.inputs_need_grad if merge_multi_context: return _merge_multi_context(self.input_grad_arrays, self.data_layouts) return self.input_grad_arrays
Example #29
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_states(self, merge_multi_context=True): """Get states from all devices Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the states will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output elements are `NDArray`. """ assert not merge_multi_context, \ "merge_multi_context=True is not supported for get_states yet." return self.state_arrays
Example #30
Source File: DataParallelExecutorGroup.py From kaggle-rsna18 with MIT License | 6 votes |
def get_params(self, arg_params, aux_params): """ Copy data from each executor to `arg_params` and `aux_params`. Parameters ---------- arg_params : list of NDArray target parameter arrays aux_params : list of NDArray target aux arrays Notes ----- - This function will inplace update the NDArrays in arg_params and aux_params. """ for name, block in zip(self.param_names, self.param_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(arg_params[name].dtype).copyto(arg_params[name]) for name, block in zip(self.aux_names, self.aux_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(aux_params[name].dtype).copyto(aux_params[name])