Python cntk.reduce_mean() Examples

The following are 30 code examples of cntk.reduce_mean(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cntk , or try the search function .
Example #1
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #2
Source File: cntk_backend.py    From keras-lambda with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #3
Source File: cntk_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #4
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #5
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #6
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #7
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #8
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #9
Source File: cntk_backend.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #10
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims) 
Example #11
Source File: cntk_backend.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims) 
Example #12
Source File: cntk_backend.py    From keras-lambda with MIT License 5 votes vote down vote up
def classification_error(output, target, axis=-1):
    return C.ops.reduce_mean(
        C.equal(
            argmax(
                output,
                axis=-1),
            argmax(
                target,
                axis=-1)),
        axis=C.Axis.all_axes()) 
Example #13
Source File: cntk_backend.py    From keras-lambda with MIT License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims) 
Example #14
Source File: cntk_backend.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def classification_error(target, output, axis=-1):
    return C.ops.reduce_mean(
        C.equal(
            argmax(
                output,
                axis=-1),
            argmax(
                target,
                axis=-1)),
        axis=C.Axis.all_axes()) 
Example #15
Source File: cntk_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def classification_error(target, output, axis=-1):
    return C.ops.reduce_mean(
        C.equal(
            argmax(
                output,
                axis=-1),
            argmax(
                target,
                axis=-1)),
        axis=C.Axis.all_axes()) 
Example #16
Source File: cntk_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims) 
Example #17
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims) 
Example #18
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def classification_error(target, output, axis=-1):
    return C.ops.reduce_mean(
        C.equal(
            argmax(
                output,
                axis=-1),
            argmax(
                target,
                axis=-1)),
        axis=C.Axis.all_axes()) 
Example #19
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims) 
Example #20
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def classification_error(target, output, axis=-1):
    return C.ops.reduce_mean(
        C.equal(
            argmax(
                output,
                axis=-1),
            argmax(
                target,
                axis=-1)),
        axis=C.Axis.all_axes()) 
Example #21
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims) 
Example #22
Source File: train_end2end.py    From end2end_AU_speech with MIT License 5 votes vote down vote up
def l2_loss(output, target):
    return C.reduce_mean(C.square(output - target)) 
Example #23
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims) 
Example #24
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def classification_error(target, output, axis=-1):
    return C.ops.reduce_mean(
        C.equal(
            argmax(
                output,
                axis=-1),
            argmax(
                target,
                axis=-1)),
        axis=C.Axis.all_axes()) 
Example #25
Source File: train_end2end.py    From end2end_AU_speech with MIT License 5 votes vote down vote up
def l1_reg_loss(output):
    # don't need C.abs(output), because output is already non-negative
    # use abs() if your desired output could be negative
    return C.reduce_mean(output)


#----------------------------------------
# create computational graph and learner
#---------------------------------------- 
Example #26
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims) 
Example #27
Source File: train_end2end.py    From end2end_AU_speech with MIT License 5 votes vote down vote up
def std_normalized_l2_loss(output, target):
    std_inv = np.array([6.6864805402, 5.2904440280, 3.7165409939, 4.1421640454, 8.1537399389, 7.0312877415, 2.6712380967,
                        2.6372177876, 8.4253649884, 6.7482162880, 9.0849960354, 10.2624412692, 3.1325531319, 3.1091179819,
                        2.7337937590, 2.7336441031, 4.3542467871, 5.4896293687, 6.2003761588, 3.1290341469, 5.7677042738,
                        11.5460919611, 9.9926451700, 5.4259818848, 20.5060642486, 4.7692101480, 3.1681517575, 3.8582905289,
                        3.4222250436, 4.6828286809, 3.0070785113, 2.8936539301, 4.0649030157, 25.3068458731, 6.0030623160,
                        3.1151977458, 7.7773542649, 6.2057372469, 9.9494258692, 4.6865422850, 5.3300697628, 2.7722027974,
                        4.0658663003, 18.1101618617, 3.5390113731, 2.7794520068], dtype=np.float32)
    weights = C.constant(value=std_inv) #.reshape((1, label_dim)))
    dif = output - target
    ret = C.reduce_mean(C.square(C.element_times(dif, weights)))
    return ret 
Example #28
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims) 
Example #29
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def classification_error(target, output, axis=-1):
    return C.ops.reduce_mean(
        C.equal(
            argmax(
                output,
                axis=-1),
            argmax(
                target,
                axis=-1)),
        axis=C.Axis.all_axes()) 
Example #30
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def mean(x, axis=None, keepdims=False):
    axis = _normalize_axis(axis, x)
    output = _reduce_on_axis(x, axis, 'reduce_mean')

    return _remove_dims(output, axis, keepdims)