Python cntk.constant() Examples
The following are 30
code examples of cntk.constant().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cntk
, or try the search function
.
Example #1
Source File: cntk_backend.py From keras-lambda with MIT License | 6 votes |
def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x
Example #2
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x
Example #3
Source File: cntk_backend.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def _padding(x, pattern, axis): # pragma: no cover base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x
Example #4
Source File: cntk_backend.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x
Example #5
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x
Example #6
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x
Example #7
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x
Example #8
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads
Example #9
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads
Example #10
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const
Example #11
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads
Example #12
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const
Example #13
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads
Example #14
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const
Example #15
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads
Example #16
Source File: cntk_backend.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 5 votes |
def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const
Example #17
Source File: cntk_backend.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 5 votes |
def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads
Example #18
Source File: cntk_backend.py From keras-lambda with MIT License | 5 votes |
def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = _FLOATX if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = shape const._uses_learning_phase = False return const
Example #19
Source File: cntk_backend.py From keras-lambda with MIT License | 5 votes |
def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads
Example #20
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const
Example #21
Source File: helpers_cntk.py From MachineLearningSamples-ImageClassificationUsingCntk with MIT License | 5 votes |
def create_model(base_model_file, input_features, num_classes, dropout_rate = 0.5, freeze_weights = False): # Load the pretrained classification net and find nodes base_model = load_model(base_model_file) feature_node = find_by_name(base_model, 'features') beforePooling_node = find_by_name(base_model, "z.x.x.r") #graph.plot(base_model, filename="base_model.pdf") # Write graph visualization # Clone model until right before the pooling layer, ie. until including z.x.x.r modelCloned = combine([beforePooling_node.owner]).clone( CloneMethod.freeze if freeze_weights else CloneMethod.clone, {feature_node: placeholder(name='features')}) # Center the input around zero and set model input. # Do this early, to avoid CNTK bug with wrongly estimated layer shapes feat_norm = input_features - constant(114) model = modelCloned(feat_norm) # Pool over all spatial dimensions and add dropout layer avgPool = GlobalAveragePooling(name = "poolingLayer")(model) if dropout_rate > 0: avgPoolDrop = Dropout(dropout_rate)(avgPool) else: avgPoolDrop = avgPool # Add new dense layer for class prediction finalModel = Dense(num_classes, activation=None, name="prediction") (avgPoolDrop) return finalModel # Trains a transfer learning model
Example #22
Source File: cntk_backend.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const
Example #23
Source File: cntk_backend.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads
Example #24
Source File: train_end2end.py From end2end_AU_speech with MIT License | 5 votes |
def std_normalized_l2_loss(output, target): std_inv = np.array([6.6864805402, 5.2904440280, 3.7165409939, 4.1421640454, 8.1537399389, 7.0312877415, 2.6712380967, 2.6372177876, 8.4253649884, 6.7482162880, 9.0849960354, 10.2624412692, 3.1325531319, 3.1091179819, 2.7337937590, 2.7336441031, 4.3542467871, 5.4896293687, 6.2003761588, 3.1290341469, 5.7677042738, 11.5460919611, 9.9926451700, 5.4259818848, 20.5060642486, 4.7692101480, 3.1681517575, 3.8582905289, 3.4222250436, 4.6828286809, 3.0070785113, 2.8936539301, 4.0649030157, 25.3068458731, 6.0030623160, 3.1151977458, 7.7773542649, 6.2057372469, 9.9494258692, 4.6865422850, 5.3300697628, 2.7722027974, 4.0658663003, 18.1101618617, 3.5390113731, 2.7794520068], dtype=np.float32) weights = C.constant(value=std_inv) #.reshape((1, label_dim))) dif = output - target ret = C.reduce_mean(C.square(C.element_times(dif, weights))) return ret
Example #25
Source File: LayerUtils.py From end2end_AU_speech with MIT License | 5 votes |
def lrelu(input, leak=0.2, name=""): return C.param_relu(C.constant((np.ones(input.shape)*leak).astype(np.float32)), input, name=name)
Example #26
Source File: LayerUtils.py From end2end_AU_speech with MIT License | 5 votes |
def broadcast_xy(input_vec, h, w): """ broadcast input vector of length d to tensor (d x h x w) """ assert(h > 0 and w > 0) d = input_vec.shape[0] # reshape vector to d x 1 x 1 x = C.reshape(input_vec, (d, 1, 1)) # create a zeros-like tensor of size (d x h x w) t = np.zeros((d, h, w), dtype=np.float32) y = C.constant(t) z = C.reconcile_dynamic_axes(y, x) z = z + x return z
Example #27
Source File: helpers_cntk.py From ImageSimilarityUsingCntk with MIT License | 5 votes |
def create_model(base_model_file, input_features, num_classes, dropout_rate = 0.5, freeze_weights = False): # Load the pretrained classification net and find nodes base_model = load_model(base_model_file) feature_node = find_by_name(base_model, 'features') beforePooling_node = find_by_name(base_model, "z.x.x.r") #graph.plot(base_model, filename="base_model.pdf") # Write graph visualization # Clone model until right before the pooling layer, ie. until including z.x.x.r modelCloned = combine([beforePooling_node.owner]).clone( CloneMethod.freeze if freeze_weights else CloneMethod.clone, {feature_node: placeholder(name='features')}) # Center the input around zero and set model input. # Do this early, to avoid CNTK bug with wrongly estimated layer shapes feat_norm = input_features - constant(114) model = modelCloned(feat_norm) # Pool over all spatial dimensions and add dropout layer avgPool = GlobalAveragePooling(name = "poolingLayer")(model) if dropout_rate > 0: avgPoolDrop = Dropout(dropout_rate)(avgPool) else: avgPoolDrop = avgPool # Add new dense layer for class prediction finalModel = Dense(num_classes, activation=None, name="prediction") (avgPoolDrop) return finalModel # Trains a transfer learning model
Example #28
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const
Example #29
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads
Example #30
Source File: cntk_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const