Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#shapeInfoDataBuffer()
The following examples show how to use
org.nd4j.linalg.api.ndarray.INDArray#shapeInfoDataBuffer() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SynchronousFlowController.java From nd4j with Apache License 2.0 | 6 votes |
protected void prepareDelayedMemory(INDArray array) { if (configuration.getMemoryModel() == Configuration.MemoryModel.DELAYED) { AllocationPoint pointData = allocator.getAllocationPoint(array.shapeInfoDataBuffer()); AllocationPoint pointShape = allocator.getAllocationPoint(array.shapeInfoDataBuffer()); if (pointData.getAllocationStatus() != AllocationStatus.DEVICE) prepareDelayedMemory(array.data()); if (pointShape.getAllocationStatus() == AllocationStatus.HOST) { DataBuffer oShape = array.shapeInfoDataBuffer(); DataBuffer nShape = Nd4j.getConstantHandler().relocateConstantSpace(oShape); if (nShape == oShape) Nd4j.getConstantHandler().moveToConstantSpace(nShape); ((JCublasNDArray) array).setShapeInfoDataBuffer(nShape); } } }
Example 2
Source File: SynchronousFlowController.java From deeplearning4j with Apache License 2.0 | 6 votes |
protected void prepareDelayedMemory(INDArray array) { if (configuration.getMemoryModel() == Configuration.MemoryModel.DELAYED) { val pointData = allocator.getAllocationPoint(array.shapeInfoDataBuffer()); val pointShape = allocator.getAllocationPoint(array.shapeInfoDataBuffer()); if (pointData.getAllocationStatus() != AllocationStatus.DEVICE) prepareDelayedMemory(array.data()); if (pointShape.getAllocationStatus() == AllocationStatus.HOST) { val oShape = array.shapeInfoDataBuffer(); val nShape = Nd4j.getConstantHandler().relocateConstantSpace(oShape); if (nShape == oShape) Nd4j.getConstantHandler().moveToConstantSpace(nShape); ((JCublasNDArray) array).setShapeInfoDataBuffer(nShape); } } }
Example 3
Source File: AbstractCompressor.java From nd4j with Apache License 2.0 | 5 votes |
@Override public INDArray decompress(INDArray array) { DataBuffer buffer = decompress(array.data()); DataBuffer shapeInfo = array.shapeInfoDataBuffer(); INDArray rest = Nd4j.createArrayFromShapeBuffer(buffer, shapeInfo); return rest; }
Example 4
Source File: AbstractCompressor.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public INDArray decompress(INDArray array) { if (!array.isCompressed()) return array; val descriptor = ((CompressedDataBuffer)array.data()).getCompressionDescriptor(); val buffer = decompress(array.data(), descriptor.getOriginalDataType()); val shapeInfo = array.shapeInfoDataBuffer(); INDArray rest = Nd4j.createArrayFromShapeBuffer(buffer, shapeInfo); return rest; }
Example 5
Source File: DeviceTADManager.java From nd4j with Apache License 2.0 | 4 votes |
@Override public Pair<DataBuffer, DataBuffer> getTADOnlyShapeInfo(INDArray array, int[] dimension) { /* so, we check, if we have things cached. If we don't - we just create new TAD shape, and push it to constant memory */ if (dimension != null && dimension.length > 1) Arrays.sort(dimension); Integer deviceId = AtomicAllocator.getInstance().getDeviceId(); //log.info("Requested TAD for device [{}], dimensions: [{}]", deviceId, Arrays.toString(dimension)); //extract the dimensions and shape buffer for comparison TadDescriptor descriptor = new TadDescriptor(array, dimension); if (!tadCache.get(deviceId).containsKey(descriptor)) { log.trace("Creating new TAD..."); //create the TAD with the shape information and corresponding offsets //note that we use native code to get access to the shape information. Pair<DataBuffer, DataBuffer> buffers = super.getTADOnlyShapeInfo(array, dimension); /** * Store the buffers in constant memory. * The main implementation of this is cuda right now. * * Explanation from: http://cuda-programming.blogspot.jp/2013/01/what-is-constant-memory-in-cuda.html * The CUDA language makes available another kind of memory known as constant memory. As the opName may indicate, we use constant memory for data that will not change over the course of a kernel execution. Why Constant Memory? NVIDIA hardware provides 64KB of constant memory that it treats differently than it treats standard global memory. In some situations, using constant memory rather than global memory will reduce the required memory bandwidth. NOTE HERE FOR US: We use 48kb of it using these methods. Note also that we use the {@link AtomicAllocator} which is the cuda memory manager for moving the current host space data buffer to constant memory. We do this for device access to shape information. */ if (buffers.getFirst() != array.shapeInfoDataBuffer()) AtomicAllocator.getInstance().moveToConstant(buffers.getFirst()); /** * @see {@link org.nd4j.jita.constant.ProtectedCudaConstantHandler} */ if (buffers.getSecond() != null) AtomicAllocator.getInstance().moveToConstant(buffers.getSecond()); // so, at this point we have buffer valid on host side. // And we just need to replace DevicePointer with constant pointer tadCache.get(deviceId).put(descriptor, buffers); bytes.addAndGet((buffers.getFirst().length() * 4)); if (buffers.getSecond() != null) bytes.addAndGet(buffers.getSecond().length() * 8); log.trace("Using TAD from cache..."); } return tadCache.get(deviceId).get(descriptor); }
Example 6
Source File: ProtectedCudaShapeInfoProviderTest.java From nd4j with Apache License 2.0 | 4 votes |
@Test public void testPurge3() throws Exception { INDArray arrayA = Nd4j.create(10, 10); DataBuffer shapeInfoA = arrayA.shapeInfoDataBuffer(); int[] shapeA = shapeInfoA.asInt(); log.info("ShapeA: {}", shapeA); Nd4j.getMemoryManager().purgeCaches(); INDArray arrayB = Nd4j.create(20, 20); DataBuffer shapeInfoB = arrayB.shapeInfoDataBuffer(); int[] shapeB = shapeInfoB.asInt(); log.info("ShapeB: {}", shapeB); }
Example 7
Source File: CpuTADManager.java From nd4j with Apache License 2.0 | 4 votes |
@Override public Pair<DataBuffer, DataBuffer> getTADOnlyShapeInfo(INDArray array, int[] dimension) { if (dimension != null && dimension.length > 1) Arrays.sort(dimension); if (dimension == null || dimension.length >= 1 && dimension[0] == Integer.MAX_VALUE) { return new Pair<>(array.shapeInfoDataBuffer(), null); } else { TadDescriptor descriptor = new TadDescriptor(array, dimension); if (!cache.containsKey(descriptor)) { int dimensionLength = dimension.length; // FIXME: this is fast triage, remove it later int targetRank = array.rank(); //dimensionLength <= 1 ? 2 : dimensionLength; long offsetLength; long tadLength = 1; for (int i = 0; i < dimensionLength; i++) { tadLength *= array.shape()[dimension[i]]; } offsetLength = array.lengthLong() / tadLength; DataBuffer outputBuffer = new LongBuffer(targetRank * 2 + 4); DataBuffer offsetsBuffer = new LongBuffer(offsetLength); DataBuffer dimensionBuffer = constantHandler.getConstantBuffer(dimension); Pointer dimensionPointer = dimensionBuffer.addressPointer(); Pointer xShapeInfo = array.shapeInfoDataBuffer().addressPointer(); Pointer targetPointer = outputBuffer.addressPointer(); Pointer offsetsPointer = offsetsBuffer.addressPointer(); nativeOps.tadOnlyShapeInfo((LongPointer) xShapeInfo, (IntPointer) dimensionPointer, dimension.length, (LongPointer) targetPointer, new LongPointerWrapper(offsetsPointer)); // If the line below will be uncommented, shapes from JVM will be used on native side //outputBuffer = array.tensorAlongDimension(0, dimension).shapeInfoDataBuffer(); Pair<DataBuffer, DataBuffer> pair = new Pair<>(outputBuffer, offsetsBuffer); if (counter.get() < MAX_ENTRIES) { counter.incrementAndGet(); cache.put(descriptor, pair); bytes.addAndGet((outputBuffer.length() * 4) + (offsetsBuffer.length() * 8)); } return pair; } return cache.get(descriptor); } }
Example 8
Source File: StaticShapeTests.java From nd4j with Apache License 2.0 | 4 votes |
@Test public void testBufferToIntShapeStrideMethods() { //Specifically: Shape.shape(IntBuffer), Shape.shape(DataBuffer) //.isRowVectorShape(DataBuffer), .isRowVectorShape(IntBuffer) //Shape.size(DataBuffer,int), Shape.size(IntBuffer,int) //Also: Shape.stride(IntBuffer), Shape.stride(DataBuffer) //Shape.stride(DataBuffer,int), Shape.stride(IntBuffer,int) List<List<Pair<INDArray, String>>> lists = new ArrayList<>(); lists.add(NDArrayCreationUtil.getAllTestMatricesWithShape(3, 4, 12345)); lists.add(NDArrayCreationUtil.getAllTestMatricesWithShape(1, 4, 12345)); lists.add(NDArrayCreationUtil.getAllTestMatricesWithShape(3, 1, 12345)); lists.add(NDArrayCreationUtil.getAll3dTestArraysWithShape(12345, 3, 4, 5)); lists.add(NDArrayCreationUtil.getAll4dTestArraysWithShape(12345, 3, 4, 5, 6)); lists.add(NDArrayCreationUtil.getAll4dTestArraysWithShape(12345, 3, 1, 5, 1)); lists.add(NDArrayCreationUtil.getAll5dTestArraysWithShape(12345, 3, 4, 5, 6, 7)); lists.add(NDArrayCreationUtil.getAll6dTestArraysWithShape(12345, 3, 4, 5, 6, 7, 8)); val shapes = new long[][] {{3, 4}, {1, 4}, {3, 1}, {3, 4, 5}, {3, 4, 5, 6}, {3, 1, 5, 1}, {3, 4, 5, 6, 7}, {3, 4, 5, 6, 7, 8}}; for (int i = 0; i < shapes.length; i++) { List<Pair<INDArray, String>> list = lists.get(i); val shape = shapes[i]; for (Pair<INDArray, String> p : list) { INDArray arr = p.getFirst(); assertArrayEquals(shape, arr.shape()); val thisStride = arr.stride(); val ib = arr.shapeInfo(); DataBuffer db = arr.shapeInfoDataBuffer(); //Check shape calculation assertEquals(shape.length, Shape.rank(ib)); assertEquals(shape.length, Shape.rank(db)); assertArrayEquals(shape, Shape.shape(ib)); assertArrayEquals(shape, Shape.shape(db)); for (int j = 0; j < shape.length; j++) { assertEquals(shape[j], Shape.size(ib, j)); assertEquals(shape[j], Shape.size(db, j)); assertEquals(thisStride[j], Shape.stride(ib, j)); assertEquals(thisStride[j], Shape.stride(db, j)); } //Check base offset assertEquals(Shape.offset(ib), Shape.offset(db)); //Check offset calculation: NdIndexIterator iter = new NdIndexIterator(shape); while (iter.hasNext()) { val next = iter.next(); long offset1 = Shape.getOffset(ib, next); assertEquals(offset1, Shape.getOffset(db, next)); switch (shape.length) { case 2: assertEquals(offset1, Shape.getOffset(ib, next[0], next[1])); assertEquals(offset1, Shape.getOffset(db, next[0], next[1])); break; case 3: assertEquals(offset1, Shape.getOffset(ib, next[0], next[1], next[2])); assertEquals(offset1, Shape.getOffset(db, next[0], next[1], next[2])); break; case 4: assertEquals(offset1, Shape.getOffset(ib, next[0], next[1], next[2], next[3])); assertEquals(offset1, Shape.getOffset(db, next[0], next[1], next[2], next[3])); break; case 5: case 6: //No 5 and 6d getOffset overloads break; default: throw new RuntimeException(); } } } } }
Example 9
Source File: StaticShapeTests.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testBufferToIntShapeStrideMethods() { //Specifically: Shape.shape(IntBuffer), Shape.shape(DataBuffer) //.isRowVectorShape(DataBuffer), .isRowVectorShape(IntBuffer) //Shape.size(DataBuffer,int), Shape.size(IntBuffer,int) //Also: Shape.stride(IntBuffer), Shape.stride(DataBuffer) //Shape.stride(DataBuffer,int), Shape.stride(IntBuffer,int) List<List<Pair<INDArray, String>>> lists = new ArrayList<>(); lists.add(NDArrayCreationUtil.getAllTestMatricesWithShape(3, 4, 12345, DataType.DOUBLE)); lists.add(NDArrayCreationUtil.getAllTestMatricesWithShape(1, 4, 12345, DataType.DOUBLE)); lists.add(NDArrayCreationUtil.getAllTestMatricesWithShape(3, 1, 12345, DataType.DOUBLE)); lists.add(NDArrayCreationUtil.getAll3dTestArraysWithShape(12345, new long[]{3, 4, 5}, DataType.DOUBLE)); lists.add(NDArrayCreationUtil.getAll4dTestArraysWithShape(12345, new int[]{3, 4, 5, 6}, DataType.DOUBLE)); lists.add(NDArrayCreationUtil.getAll4dTestArraysWithShape(12345, new int[]{3, 1, 5, 1}, DataType.DOUBLE)); lists.add(NDArrayCreationUtil.getAll5dTestArraysWithShape(12345, new int[]{3, 4, 5, 6, 7}, DataType.DOUBLE)); lists.add(NDArrayCreationUtil.getAll6dTestArraysWithShape(12345, new int[]{3, 4, 5, 6, 7, 8}, DataType.DOUBLE)); val shapes = new long[][] {{3, 4}, {1, 4}, {3, 1}, {3, 4, 5}, {3, 4, 5, 6}, {3, 1, 5, 1}, {3, 4, 5, 6, 7}, {3, 4, 5, 6, 7, 8}}; for (int i = 0; i < shapes.length; i++) { List<Pair<INDArray, String>> list = lists.get(i); val shape = shapes[i]; for (Pair<INDArray, String> p : list) { INDArray arr = p.getFirst(); assertArrayEquals(shape, arr.shape()); val thisStride = arr.stride(); val ib = arr.shapeInfo(); DataBuffer db = arr.shapeInfoDataBuffer(); //Check shape calculation assertEquals(shape.length, Shape.rank(ib)); assertEquals(shape.length, Shape.rank(db)); assertArrayEquals(shape, Shape.shape(ib)); assertArrayEquals(shape, Shape.shape(db)); for (int j = 0; j < shape.length; j++) { assertEquals(shape[j], Shape.size(ib, j)); assertEquals(shape[j], Shape.size(db, j)); assertEquals(thisStride[j], Shape.stride(ib, j)); assertEquals(thisStride[j], Shape.stride(db, j)); } //Check base offset assertEquals(Shape.offset(ib), Shape.offset(db)); //Check offset calculation: NdIndexIterator iter = new NdIndexIterator(shape); while (iter.hasNext()) { val next = iter.next(); long offset1 = Shape.getOffset(ib, next); assertEquals(offset1, Shape.getOffset(db, next)); switch (shape.length) { case 2: assertEquals(offset1, Shape.getOffset(ib, next[0], next[1])); assertEquals(offset1, Shape.getOffset(db, next[0], next[1])); break; case 3: assertEquals(offset1, Shape.getOffset(ib, next[0], next[1], next[2])); assertEquals(offset1, Shape.getOffset(db, next[0], next[1], next[2])); break; case 4: assertEquals(offset1, Shape.getOffset(ib, next[0], next[1], next[2], next[3])); assertEquals(offset1, Shape.getOffset(db, next[0], next[1], next[2], next[3])); break; case 5: case 6: //No 5 and 6d getOffset overloads break; default: throw new RuntimeException(); } } } } }
Example 10
Source File: ProtectedCudaShapeInfoProviderTest.java From nd4j with Apache License 2.0 | 2 votes |
@Test public void testPurge2() throws Exception { INDArray arrayA = Nd4j.create(10, 10); DataBuffer shapeInfoA = arrayA.shapeInfoDataBuffer(); INDArray arrayE = Nd4j.create(10, 10); DataBuffer shapeInfoE = arrayE.shapeInfoDataBuffer(); int[] arrayShapeA = shapeInfoA.asInt(); assertTrue(shapeInfoA == shapeInfoE); ShapeDescriptor descriptor = new ShapeDescriptor(arrayA.shape(), arrayA.stride(), 0, arrayA.elementWiseStride(), arrayA.ordering()); ConstantProtector protector = ConstantProtector.getInstance(); AllocationPoint pointA = AtomicAllocator.getInstance().getAllocationPoint(arrayA.shapeInfoDataBuffer()); assertEquals(true, protector.containsDataBuffer(0, descriptor)); //////////////////////////////////// Nd4j.getMemoryManager().purgeCaches(); //////////////////////////////////// assertEquals(false, protector.containsDataBuffer(0, descriptor)); INDArray arrayB = Nd4j.create(10, 10); DataBuffer shapeInfoB = arrayB.shapeInfoDataBuffer(); assertFalse(shapeInfoA == shapeInfoB); AllocationPoint pointB = AtomicAllocator.getInstance().getAllocationPoint(arrayB.shapeInfoDataBuffer()); assertArrayEquals(arrayShapeA, shapeInfoB.asInt()); // pointers should be equal, due to offsets reset assertEquals(pointA.getPointers().getDevicePointer().address(), pointB.getPointers().getDevicePointer().address()); }