Java Code Examples for org.nd4j.linalg.factory.Nd4j#dataType()
The following examples show how to use
org.nd4j.linalg.factory.Nd4j#dataType() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestGraphNodes.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testReshapeNode() { Nd4j.getRandom().setSeed(12345); GraphVertex reshapeVertex = new ReshapeVertex(null, "", -1, 'c', new int[] {-1, 736}, null, Nd4j.dataType()); val inputShape = new long[] {1, 1, 1, 736}; INDArray input = Nd4j.create(inputShape); reshapeVertex.setInputs(input); INDArray out = reshapeVertex.doForward(false, LayerWorkspaceMgr.noWorkspaces()); assertArrayEquals(new long[] {1, 736}, out.shape()); reshapeVertex.setEpsilon(out); INDArray[] backward = reshapeVertex.doBackward(false, LayerWorkspaceMgr.noWorkspaces()).getSecond(); assertTrue(Arrays.equals(backward[0].shape(), inputShape)); }
Example 2
Source File: JcublasLevel3.java From nd4j with Apache License 2.0 | 5 votes |
@Override protected void dsymm(char Order, char Side, char Uplo, int M, int N, double alpha, INDArray A, int lda, INDArray B, int ldb, double beta, INDArray C, int ldc) { if (Nd4j.dataType() != DataBuffer.Type.DOUBLE) logger.warn("DOUBLE symm called"); Nd4j.getExecutioner().push(); CudaContext ctx = allocator.getFlowController().prepareAction(C, A, B); CublasPointer aPointer = new CublasPointer(A, ctx); CublasPointer bPointer = new CublasPointer(B, ctx); CublasPointer cPointer = new CublasPointer(C, ctx); cublasHandle_t handle = ctx.getHandle(); synchronized (handle) { cublasSetStream_v2(new cublasContext(handle), new CUstream_st(ctx.getOldStream())); cublasDsymm_v2(new cublasContext(handle), convertSideMode(Side), convertUplo(Uplo), M, N, new DoublePointer(alpha), (DoublePointer) aPointer.getDevicePointer(), lda, (DoublePointer) bPointer.getDevicePointer(), ldb, new DoublePointer(beta), (DoublePointer) cPointer.getDevicePointer(), ldc); } allocator.registerAction(ctx, C, A, B); OpExecutionerUtil.checkForAny(C); }
Example 3
Source File: JcublasLevel3.java From nd4j with Apache License 2.0 | 5 votes |
@Override protected void dsyr2k(char Order, char Uplo, char Trans, int N, int K, double alpha, INDArray A, int lda, INDArray B, int ldb, double beta, INDArray C, int ldc) { if (Nd4j.dataType() != DataBuffer.Type.DOUBLE) logger.warn("DOUBLE syr2k called"); Nd4j.getExecutioner().push(); CudaContext ctx = allocator.getFlowController().prepareAction(C, A, B); CublasPointer aPointer = new CublasPointer(A, ctx); CublasPointer bPointer = new CublasPointer(B, ctx); CublasPointer cPointer = new CublasPointer(C, ctx); cublasHandle_t handle = ctx.getHandle(); synchronized (handle) { cublasSetStream_v2(new cublasContext(handle), new CUstream_st(ctx.getOldStream())); cublasDsyr2k_v2(new cublasContext(handle), convertUplo(Uplo), Trans, N, K, new DoublePointer(alpha), (DoublePointer) aPointer.getDevicePointer(), lda, (DoublePointer) bPointer.getDevicePointer(), ldb, new DoublePointer(beta), (DoublePointer) cPointer.getDevicePointer(), ldc); } allocator.registerAction(ctx, C, A, B); OpExecutionerUtil.checkForAny(C); }
Example 4
Source File: GradCheckTransforms.java From nd4j with Apache License 2.0 | 5 votes |
@Before public void before() throws Exception { Nd4j.create(1); initialType = Nd4j.dataType(); Nd4j.setDataType(DataBuffer.Type.DOUBLE); Nd4j.getRandom().setSeed(123); }
Example 5
Source File: JcublasLevel1.java From nd4j with Apache License 2.0 | 5 votes |
@Override protected void saxpy(long N, float alpha, INDArray X, int incX, INDArray Y, int incY) { if (Nd4j.dataType() != DataBuffer.Type.FLOAT) logger.warn("FLOAT axpy called"); // CudaContext ctx = allocator.getFlowController().prepareAction(Y, X); Nd4j.getExecutioner().exec(new Axpy(X, Y, alpha, N)); OpExecutionerUtil.checkForAny(Y); /* CublasPointer xAPointer = new CublasPointer(X, ctx); CublasPointer xBPointer = new CublasPointer(Y, ctx); cublasHandle_t handle = ctx.getHandle(); synchronized (handle) { cublasSetStream_v2(new cublasContext(handle), new CUstream_st(ctx.getOldStream())); PointerPointer p = new cublasContext(handle); cublasSaxpy_v2(p, N, alpha, xAPointer.getDevicePointer(), incX, xBPointer.getDevicePointer(), incY); } */ // allocator.registerAction(ctx, Y, X); }
Example 6
Source File: GradCheckReductions.java From nd4j with Apache License 2.0 | 5 votes |
@Before public void before() throws Exception { Nd4j.create(1); initialType = Nd4j.dataType(); Nd4j.setDataType(DataBuffer.Type.DOUBLE); Nd4j.getRandom().setSeed(123); }
Example 7
Source File: JCublasNDArrayFactory.java From nd4j with Apache License 2.0 | 4 votes |
public INDArray[] tear(INDArray tensor, int... dimensions) { if (tensor.isCompressed()) Nd4j.getCompressor().decompressi(tensor); Arrays.sort(dimensions); Pair<DataBuffer, DataBuffer> tadBuffers = Nd4j.getExecutioner().getTADManager().getTADOnlyShapeInfo(tensor, dimensions); long tadLength = 1; val shape = new long[dimensions.length]; for (int i = 0; i < dimensions.length; i++) { tadLength *= tensor.shape()[dimensions[i]]; shape[i] = tensor.shape()[dimensions[i]]; } int numTads = (int)(tensor.lengthLong() / tadLength); INDArray[] result = new INDArray[numTads]; long[] xPointers = new long[numTads]; CudaContext context = AtomicAllocator.getInstance().getFlowController().prepareAction(null, tensor); for (int x = 0; x < numTads; x++) { result[x] = Nd4j.createUninitialized(shape); context = AtomicAllocator.getInstance().getFlowController().prepareAction(result[x]); xPointers[x] = AtomicAllocator.getInstance().getPointer(result[x], context).address(); } CudaDoubleDataBuffer tempX = new CudaDoubleDataBuffer(numTads); AtomicAllocator.getInstance().memcpyBlocking(tempX, new LongPointer(xPointers), xPointers.length * 8, 0); PointerPointer extraz = new PointerPointer(null, // not used context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer()); if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) { nativeOps.tearDouble(extraz, (DoublePointer) AtomicAllocator.getInstance().getPointer(tensor, context), (LongPointer) AtomicAllocator.getInstance().getPointer(tensor.shapeInfoDataBuffer(), context), new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context)), (LongPointer) AtomicAllocator.getInstance().getPointer(result[0].shapeInfoDataBuffer(), context), (LongPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context), new LongPointerWrapper(AtomicAllocator.getInstance().getPointer(tadBuffers.getSecond(), context)) ); } else if (Nd4j.dataType() == DataBuffer.Type.FLOAT) { nativeOps.tearFloat(extraz, (FloatPointer) AtomicAllocator.getInstance().getPointer(tensor, context), (LongPointer) AtomicAllocator.getInstance().getPointer(tensor.shapeInfoDataBuffer(), context), new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context)), (LongPointer) AtomicAllocator.getInstance().getPointer(result[0].shapeInfoDataBuffer(), context), (LongPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context), new LongPointerWrapper(AtomicAllocator.getInstance().getPointer(tadBuffers.getSecond(), context)) ); } else if (Nd4j.dataType() == DataBuffer.Type.HALF) { nativeOps.tearHalf(extraz, (ShortPointer) AtomicAllocator.getInstance().getPointer(tensor, context), (LongPointer) AtomicAllocator.getInstance().getPointer(tensor.shapeInfoDataBuffer(), context), new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context)), (LongPointer) AtomicAllocator.getInstance().getPointer(result[0].shapeInfoDataBuffer(), context), (LongPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context), new LongPointerWrapper(AtomicAllocator.getInstance().getPointer(tadBuffers.getSecond(), context)) ); } AtomicAllocator.getInstance().getFlowController().registerActionAllWrite(context, result); AtomicAllocator.getInstance().getFlowController().registerAction(context,null, result); return result; }
Example 8
Source File: AbstractCompressor.java From deeplearning4j with Apache License 2.0 | 4 votes |
protected DataTypeEx getGlobalTypeEx() { DataType type = Nd4j.dataType(); return convertType(type); }
Example 9
Source File: ConvolutionTests.java From nd4j with Apache License 2.0 | 4 votes |
@Test @Ignore public void testCompareIm2ColImpl() { int[] miniBatches = {1, 3, 5}; int[] depths = {1, 3, 5}; int[] inHeights = {5, 21}; int[] inWidths = {5, 21}; int[] strideH = {1, 2}; int[] strideW = {1, 2}; int[] sizeW = {1, 2, 3}; int[] sizeH = {1, 2, 3}; int[] padH = {0, 1, 2}; int[] padW = {0, 1, 2}; boolean[] coverall = {false, true}; DataBuffer.Type[] types = new DataBuffer.Type[] {DataBuffer.Type.FLOAT, DataBuffer.Type.DOUBLE, DataBuffer.Type.FLOAT, DataBuffer.Type.DOUBLE}; DataBuffer.AllocationMode[] modes = new DataBuffer.AllocationMode[] {DataBuffer.AllocationMode.HEAP, DataBuffer.AllocationMode.HEAP, DataBuffer.AllocationMode.DIRECT, DataBuffer.AllocationMode.DIRECT}; String factoryClassName = Nd4j.factory().getClass().toString().toLowerCase(); if (factoryClassName.contains("jcublas") || factoryClassName.contains("cuda")) { //Only test direct for CUDA; test all for CPU types = new DataBuffer.Type[] {DataBuffer.Type.FLOAT, DataBuffer.Type.DOUBLE}; modes = new DataBuffer.AllocationMode[] {DataBuffer.AllocationMode.DIRECT, DataBuffer.AllocationMode.DIRECT}; } DataBuffer.Type initialType = Nd4j.dataType(); for (int i = 0; i < types.length; i++) { DataBuffer.Type type = types[i]; DataBuffer.AllocationMode mode = modes[i]; DataTypeUtil.setDTypeForContext(type); Nd4j.alloc = mode; AllocUtil.setAllocationModeForContext(mode); for (int m : miniBatches) { for (int d : depths) { for (int h : inHeights) { for (int w : inWidths) { for (int sh : strideH) { for (int sw : strideW) { for (int kh : sizeH) { for (int kw : sizeW) { for (int ph : padH) { for (int pw : padW) { if ((w - kw + 2 * pw) % sw != 0 || (h - kh + 2 * ph) % sh != 0) continue; //(w-kp+2*pw)/sw + 1 is not an integer, i.e., number of outputs doesn't fit System.out.println("Running " + m + " " + d + " " + h + " " + w); for (boolean cAll : coverall) { INDArray in = Nd4j.rand(new int[] {m, d, h, w}); //assertEquals(in.data().allocationMode(), mode); //assertEquals(in.data().dataType(), opType); INDArray outOrig = OldConvolution.im2col(in, kh, kw, sh, sw, ph, pw, -1, cAll); //Old implementation INDArray outNew = Convolution.im2col(in, kh, kw, sh, sw, ph, pw, cAll); //Current implementation assertArrayEquals(outOrig.data().asFloat(), outNew.data().asFloat(), 0.01f); assertEquals(outOrig, outNew); } } } } } } } } } } } } DataTypeUtil.setDTypeForContext(initialType); }
Example 10
Source File: TestGraphNodes.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testStackUnstackNodeVariableLength() { Nd4j.getRandom().setSeed(12345); GraphVertex stack = new StackVertex(null, "", -1, Nd4j.dataType()); //Test stack with variable length + mask arrays INDArray in0 = Nd4j.rand(new int[] {5, 2, 5}); INDArray in1 = Nd4j.rand(new int[] {5, 2, 6}); INDArray in2 = Nd4j.rand(new int[] {5, 2, 7}); INDArray mask0 = Nd4j.ones(5, 5); INDArray mask1 = Nd4j.ones(5, 6); INDArray mask2 = Nd4j.ones(5, 7); stack.setInputs(in0, in1, in2); Pair<INDArray, MaskState> p = stack.feedForwardMaskArrays(new INDArray[] {mask0, mask1, mask2}, MaskState.Active, 5); assertArrayEquals(new long[] {15, 7}, p.getFirst().shape()); assertEquals(MaskState.Active, p.getSecond()); INDArray out = stack.doForward(false, LayerWorkspaceMgr.noWorkspaces()); assertEquals(in0, out.get(NDArrayIndex.interval(0, 5), NDArrayIndex.all(), NDArrayIndex.interval(0, 5))); assertEquals(in1, out.get(NDArrayIndex.interval(5, 10), NDArrayIndex.all(), NDArrayIndex.interval(0, 6))); assertEquals(in2, out.get(NDArrayIndex.interval(10, 15), NDArrayIndex.all(), NDArrayIndex.interval(0, 7))); stack.setEpsilon(out); Pair<Gradient, INDArray[]> b = stack.doBackward(false, LayerWorkspaceMgr.noWorkspaces()); assertEquals(in0, b.getSecond()[0]); assertEquals(in1, b.getSecond()[1]); assertEquals(in2, b.getSecond()[2]); //Test unstack with variable length + mask arrays //Note that we don't actually need changes here - unstack has a single input, and the unstacked mask //might be a bit longer than we really need, but it'll still be correct GraphVertex unstack0 = new UnstackVertex(null, "u0", 0, 0, 3, Nd4j.dataType()); GraphVertex unstack1 = new UnstackVertex(null, "u1", 0, 1, 3, Nd4j.dataType()); GraphVertex unstack2 = new UnstackVertex(null, "u2", 0, 2, 3, Nd4j.dataType()); unstack0.setInputs(out); unstack1.setInputs(out); unstack2.setInputs(out); INDArray f0 = unstack0.doForward(true, LayerWorkspaceMgr.noWorkspaces()); INDArray f1 = unstack1.doForward(true, LayerWorkspaceMgr.noWorkspaces()); INDArray f2 = unstack2.doForward(true, LayerWorkspaceMgr.noWorkspaces()); assertEquals(in0, f0.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 5))); assertEquals(in1, f1.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 6))); assertEquals(in2, f2.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 7))); Pair<INDArray, MaskState> p0 = unstack0.feedForwardMaskArrays(new INDArray[] {p.getFirst()}, MaskState.Active, 5); Pair<INDArray, MaskState> p1 = unstack1.feedForwardMaskArrays(new INDArray[] {p.getFirst()}, MaskState.Active, 5); Pair<INDArray, MaskState> p2 = unstack2.feedForwardMaskArrays(new INDArray[] {p.getFirst()}, MaskState.Active, 5); assertEquals(mask0, p0.getFirst().get(NDArrayIndex.all(), NDArrayIndex.interval(0, 5))); assertEquals(mask1, p1.getFirst().get(NDArrayIndex.all(), NDArrayIndex.interval(0, 6))); assertEquals(mask2, p2.getFirst().get(NDArrayIndex.all(), NDArrayIndex.interval(0, 7))); }
Example 11
Source File: DataTypeValidationTests.java From nd4j with Apache License 2.0 | 4 votes |
@Before public void setUp() { initialType = Nd4j.dataType(); Nd4j.setDataType(DataBuffer.Type.FLOAT); }
Example 12
Source File: ConvolutionTestsC.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test @Ignore public void testCompareIm2ColImpl() { int[] miniBatches = {1, 3, 5}; int[] depths = {1, 3, 5}; int[] inHeights = {5, 21}; int[] inWidths = {5, 21}; int[] strideH = {1, 2}; int[] strideW = {1, 2}; int[] sizeW = {1, 2, 3}; int[] sizeH = {1, 2, 3}; int[] padH = {0, 1, 2}; int[] padW = {0, 1, 2}; boolean[] coverall = {false, true}; DataType[] types = new DataType[] {DataType.FLOAT, DataType.DOUBLE, DataType.FLOAT, DataType.DOUBLE}; DataBuffer.AllocationMode[] modes = new DataBuffer.AllocationMode[] {DataBuffer.AllocationMode.HEAP, DataBuffer.AllocationMode.HEAP, DataBuffer.AllocationMode.DIRECT, DataBuffer.AllocationMode.DIRECT}; String factoryClassName = Nd4j.factory().getClass().toString().toLowerCase(); if (factoryClassName.contains("jcublas") || factoryClassName.contains("cuda")) { //Only test direct for CUDA; test all for CPU types = new DataType[] {DataType.FLOAT, DataType.DOUBLE}; modes = new DataBuffer.AllocationMode[] {DataBuffer.AllocationMode.DIRECT, DataBuffer.AllocationMode.DIRECT}; } DataType initialType = Nd4j.dataType(); for (int i = 0; i < types.length; i++) { DataType type = types[i]; DataBuffer.AllocationMode mode = modes[i]; DataTypeUtil.setDTypeForContext(type); Nd4j.alloc = mode; AllocUtil.setAllocationModeForContext(mode); for (int m : miniBatches) { for (int d : depths) { for (int h : inHeights) { for (int w : inWidths) { for (int sh : strideH) { for (int sw : strideW) { for (int kh : sizeH) { for (int kw : sizeW) { for (int ph : padH) { for (int pw : padW) { if ((w - kw + 2 * pw) % sw != 0 || (h - kh + 2 * ph) % sh != 0) continue; //(w-kp+2*pW)/sw + 1 is not an integer, i.e., number of outputs doesn't fit System.out.println("Running " + m + " " + d + " " + h + " " + w); for (boolean cAll : coverall) { INDArray in = Nd4j.rand(new int[] {m, d, h, w}); //assertEquals(in.data().allocationMode(), mode); //assertEquals(in.data().dataType(), opType); INDArray outOrig = OldConvolution.im2col(in, kh, kw, sh, sw, ph, pw, -1, cAll); //Old implementation INDArray outNew = Convolution.im2col(in, kh, kw, sh, sw, ph, pw, cAll); //Current implementation assertEquals(outOrig, outNew); } } } } } } } } } } } } DataTypeUtil.setDTypeForContext(initialType); }
Example 13
Source File: LongTests.java From nd4j with Apache License 2.0 | 4 votes |
public LongTests(Nd4jBackend backend) { super(backend); this.initialType = Nd4j.dataType(); }
Example 14
Source File: SpecialWorkspaceTests.java From deeplearning4j with Apache License 2.0 | 4 votes |
public SpecialWorkspaceTests(Nd4jBackend backend) { super(backend); this.initialType = Nd4j.dataType(); }
Example 15
Source File: TestEigen.java From nd4j with Apache License 2.0 | 4 votes |
public TestEigen(Nd4jBackend backend) { super(backend); initialType = Nd4j.dataType(); }
Example 16
Source File: Nd4jTestsComparisonFortran.java From nd4j with Apache License 2.0 | 4 votes |
public Nd4jTestsComparisonFortran(Nd4jBackend backend) { super(backend); this.initialType = Nd4j.dataType(); }
Example 17
Source File: BaseCudaDataBuffer.java From nd4j with Apache License 2.0 | 4 votes |
public BaseCudaDataBuffer(long length) { this(length, Nd4j.dataType() == Type.DOUBLE ? 8 : Nd4j.dataType() == Type.FLOAT ? 4 : 2); }
Example 18
Source File: TestEigen.java From deeplearning4j with Apache License 2.0 | 4 votes |
public TestEigen(Nd4jBackend backend) { super(backend); initialType = Nd4j.dataType(); }
Example 19
Source File: BasicSerDeTests.java From nd4j with Apache License 2.0 | 3 votes |
@Test public void testBasicDataTypeSwitch1() throws Exception { DataBuffer.Type initialType = Nd4j.dataType(); Nd4j.setDataType(DataBuffer.Type.FLOAT); INDArray array = Nd4j.create(new float[] {1, 2, 3, 4, 5, 6}); ByteArrayOutputStream bos = new ByteArrayOutputStream(); Nd4j.write(bos, array); Nd4j.setDataType(DataBuffer.Type.DOUBLE); INDArray restored = Nd4j.read(new ByteArrayInputStream(bos.toByteArray())); assertEquals(Nd4j.create(new float[] {1, 2, 3, 4, 5, 6}), restored); assertEquals(8, restored.data().getElementSize()); assertEquals(8, restored.shapeInfoDataBuffer().getElementSize()); Nd4j.setDataType(initialType); }
Example 20
Source File: CudaExecutioner.java From nd4j with Apache License 2.0 | 2 votes |
@Override public INDArray thresholdDecode(INDArray encoded, INDArray target) { DataBuffer buffer = encoded.data(); if (buffer.dataType() != DataBuffer.Type.INT) throw new UnsupportedOperationException(); long compressedLength = buffer.getInt(0); long originalLength = buffer.getInt(1); if (target.lengthLong() != originalLength) throw new ND4JIllegalStateException("originalLength ["+ originalLength+"] stored in encoded array doesn't match target length ["+ target.lengthLong()+"]"); DataBuffer result = target.data(); CudaContext context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext(); //nativeOps.memsetAsync(AtomicAllocator.getInstance().getPointer(result), 0,result.length(), 0, context.getOldStream()); if (extraz.get() == null) extraz.set(new PointerPointer(32)); PointerPointer extras = extraz.get().put(1, context.getOldStream()); //log.info("DEC Source length: {}", buffer.length()); //log.info("DEC Source: {}", Arrays.toString(buffer.asInt())); if (Nd4j.dataType() == DataBuffer.Type.FLOAT) { nativeOps.decodeThresholdFloat(extras, AtomicAllocator.getInstance().getPointer(buffer), compressedLength, (FloatPointer) AtomicAllocator.getInstance().getPointer(result)); } else if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) { nativeOps.decodeThresholdDouble(extras, AtomicAllocator.getInstance().getPointer(buffer), compressedLength, (DoublePointer) AtomicAllocator.getInstance().getPointer(result)); } else if (Nd4j.dataType() == DataBuffer.Type.HALF) { nativeOps.decodeThresholdHalf(extras, AtomicAllocator.getInstance().getPointer(buffer), compressedLength, (ShortPointer) AtomicAllocator.getInstance().getPointer(result)); } AtomicAllocator.getInstance().getAllocationPoint(result).tickDeviceWrite(); //DataBuffer result = Nd4j.getNDArrayFactory().convertDataEx(DataBuffer.TypeEx.THRESHOLD, buffer, getGlobalTypeEx()); return target; }