private void growCapacity(int idx) { if(container == null) { container = Nd4j.create(10); } else if(idx >= container.length()) { val max = Math.max(container.length() * 2,idx); INDArray newContainer = Nd4j.create(max); newContainer.put(new INDArrayIndex[]{NDArrayIndex.interval(0,container.length())},container); container = newContainer; } }
public INDArray adjustMasks(INDArray label, INDArray labelMask, int minorityLabel, double targetDist) { labelMask = Nd4j.ones(label.size(0), label.size(2)); INDArray bernoullis = Nd4j.zeros(labelMask.shape()); long currentTimeSliceEnd = label.size(2); INDArray currentWindowBernoulli = bernoullis.get(NDArrayIndex.all(), NDArrayIndex.interval(currentTimeSliceStart, currentTimeSliceEnd)); INDArray currentMask = labelMask.get(NDArrayIndex.all(), NDArrayIndex.interval(currentTimeSliceStart, currentTimeSliceEnd)); INDArray currentLabel; return Nd4j.getExecutioner().exec( new BernoulliDistribution(Nd4j.createUninitialized(bernoullis.shape()), bernoullis), Nd4j.getRandom());
/** * Setup the given byte buffer * for serialization (note that this is for uncompressed INDArrays) * 4 bytes int for rank * 4 bytes for data opType * shape buffer * data buffer * * @param arr the array to setup * @param allocated the byte buffer to setup * @param rewind whether to rewind the byte buffer or nt */ public static void doByteBufferPutUnCompressed(INDArray arr, ByteBuffer allocated, boolean rewind) { // ensure we send data to host memory Nd4j.getExecutioner().commit(); Nd4j.getAffinityManager().ensureLocation(arr, AffinityManager.Location.HOST); ByteBuffer buffer = arr.data().pointer().asByteBuffer().order(ByteOrder.nativeOrder()); ByteBuffer shapeBuffer = arr.shapeInfoDataBuffer().pointer().asByteBuffer().order(ByteOrder.nativeOrder()); //2 four byte ints at the beginning allocated.putInt(arr.rank()); //put data opType next so its self describing allocated.putInt(arr.data().dataType().ordinal()); allocated.put(shapeBuffer); allocated.put(buffer); if (rewind) allocated.rewind(); }
@Override public INDArray nextFloat(char order, int[] shape) { INDArray array = Nd4j.createUninitialized(shape, order); UniformDistribution op = new UniformDistribution(array, 0.0, 1.0); Nd4j.getExecutioner().exec(op, this); return array; }
public static INDArray or(INDArray x, INDArray y) { INDArray z = Nd4j.createUninitialized(x.shape(), x.ordering()); Nd4j.getExecutioner().exec(new Or(x, y, z, 0.0)); return z; }
public static Pair<INDArray, String> getTransposedMatrixWithShape(long rows, long cols, long seed) { Nd4j.getRandom().setSeed(seed); INDArray out = Nd4j.linspace(1, rows * cols, rows * cols).reshape(cols, rows); return new Pair<>(out.transpose(), "getTransposedMatrixWithShape(" + rows + "," + cols + "," + seed + ")"); }
public static void checkForInf(INDArray z) { if (Nd4j.getExecutioner().getProfilingMode() != OpExecutioner.ProfilingMode.INF_PANIC && Nd4j.getExecutioner().getProfilingMode() != OpExecutioner.ProfilingMode.ANY_PANIC) return; int match = 0; if (!z.isScalar()) { MatchCondition condition = new MatchCondition(z, Conditions.isInfinite()); match = Nd4j.getExecutioner().exec(condition, Integer.MAX_VALUE).getInt(0); } else { if (z.data().dataType() == DataBuffer.Type.DOUBLE) { if (Double.isInfinite(z.getDouble(0))) match = 1; } else { if (Float.isInfinite(z.getFloat(0))) match = 1; } } if (match > 0) throw new ND4JIllegalStateException("P.A.N.I.C.! Op.Z() contains " + match + " Inf value(s)"); }
return toConcat[0]; if (Nd4j.getExecutioner() instanceof GridExecutioner) ((GridExecutioner) Nd4j.getExecutioner()).flushQueue(); PointerPointer shapeInfoPointers = new PointerPointer(toConcat.length); PointerPointer dataPointers = new PointerPointer(toConcat.length); int[] outputShape = ArrayUtil.copy(toConcat[0].shape()); if (toConcat[i].isCompressed()) Nd4j.getCompressor().decompressi(toConcat[i]); shapeInfoPointers.put(i, allocator.getHostPointer(toConcat[i].shapeInfoDataBuffer())); dataPointers.put(i, allocator.getHostPointer(toConcat[i].data())); sumAlongDim += toConcat[i].size(dimension); PointerPointer dummy = new PointerPointer(new Pointer[] {null}); INDArray ret = Nd4j.createUninitialized(outputShape, Nd4j.order()); new PointerPointer(new Pointer[]{null}), new PointerPointer(new Pointer[]{null})); } else { throw new ND4JIllegalStateException("Unknown dataType: " + ret.data().dataType()); nativeOps.memcpyAsync(point.getDevicePointer(), point.getHostPointer(), ret.lengthLong() * Nd4j.sizeOfDataType(ret.data().dataType()), CudaConstants.cudaMemcpyHostToDevice, context.getSpecialStream()); context.getSpecialStream().synchronize();
@Override public INDArray sample(int[] shape) { int numRows = 1; for (int i = 0; i < shape.length - 1; i++) numRows *= shape[i]; int numCols = shape[shape.length - 1]; val flatShape = new int[]{numRows, numCols}; val flatRng = Nd4j.getExecutioner().exec(new GaussianDistribution(Nd4j.createUninitialized(flatShape, Nd4j.order()), 0.0, 1.0), random); long m = flatRng.rows(); long n = flatRng.columns(); val s = Nd4j.create(m < n ? m : n); val u = m < n ? Nd4j.create(m, n) : Nd4j.create(m, m); val v = Nd4j.create(n, n, 'f'); Nd4j.getBlasWrapper().lapack().gesvd(flatRng, s, u, v); // FIXME: int cast if (gains == null) { if (u.rows() == numRows && u.columns() == numCols) { return v.get(NDArrayIndex.interval(0, numRows), NDArrayIndex.interval(0, numCols)).mul(gain).reshape(ArrayUtil.toLongArray(shape)); } else { return u.get(NDArrayIndex.interval(0, numRows), NDArrayIndex.interval(0, numCols)).mul(gain).reshape(ArrayUtil.toLongArray(shape)); } } else { throw new UnsupportedOperationException(); } }
throw new IllegalStateException("Number of dimensions do not match number of arrays to shuffle"); Nd4j.getExecutioner().push(); tadLength *= arrays.get(0).shape()[dimensions.get(0)[i]]; val numTads = arrays.get(0).length() / tadLength; val map = ArrayUtil.buildInterleavedVector(rnd, (int) numTads); Pointer xShapeInfo = AtomicAllocator.getInstance().getPointer(array.shapeInfoDataBuffer(), context); TADManager tadManager = Nd4j.getExecutioner().getTADManager(); Pointer tadShapeInfo = AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context); DataBuffer offsets = tadBuffers.getSecond(); if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) { nativeOps.shuffleDouble(extras, new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)), new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)), arrays.size(), (IntPointer) shuffleMap, new PointerPointer(allocator.getPointer(tempTAD, context)), new PointerPointer(allocator.getPointer(tempOffsets, context))); } else if (Nd4j.dataType() == DataBuffer.Type.FLOAT) { nativeOps.shuffleFloat(extras, new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)),
INDArray sorted = Nd4j.sortRows(pl, 0, false); INDArray isPositive = sorted.getColumn(1); INDArray isNegative = sorted.getColumn(1).rsub(1.0); INDArray cumSumPos = isPositive.cumsum(-1); int length = sorted.size(0); INDArray t = Nd4j.create(new int[] {length + 2, 1}); t.put(new INDArrayIndex[] {NDArrayIndex.interval(1, length + 1), NDArrayIndex.all()}, sorted.getColumn(0)); INDArray fpr = Nd4j.create(new int[] {length + 2, 1}); fpr.put(new INDArrayIndex[] {NDArrayIndex.interval(1, length + 1), NDArrayIndex.all()}, cumSumNeg.div(countActualNegative)); INDArray tpr = Nd4j.create(new int[] {length + 2, 1}); tpr.put(new INDArrayIndex[] {NDArrayIndex.interval(1, length + 1), NDArrayIndex.all()}, cumSumPos.div(countActualPositive)); double[] x_fpr_out = fpr.data().asDouble(); double[] y_tpr_out = tpr.data().asDouble(); double[] tOut = t.data().asDouble(); double[][] temp = p.getFirst(); tOut = temp[0]; x_fpr_out = temp[1];
@Override public INDArray pullRows(INDArray source, INDArray destination, int sourceDimension, int[] indexes) { if (Nd4j.getExecutioner() instanceof GridExecutioner) ((GridExecutioner) Nd4j.getExecutioner()).flushQueue(); shape = new long[] {indexes.length, source.shape()[sourceDimension]}; else if (sourceDimension == 0) shape = new long[] {source.shape()[sourceDimension], indexes.length}; else throw new UnsupportedOperationException("2D input is expected"); ret = Nd4j.createUninitialized(shape, order); } else { if(!Arrays.equals(shape, destination.shape())){ PointerPointer extras = new PointerPointer(AddressRetriever.retrieveHostPointer(ret.shapeInfoDataBuffer()), context.getOldStream(), allocator.getDeviceIdPointer()); AtomicAllocator.getInstance().memcpyBlocking(tempIndexes, new LongPointer(ArrayUtil.toLongArray(indexes)), indexes.length * 8, 0); TADManager tadManager = Nd4j.getExecutioner().getTADManager(); Pointer tadShapeInfo = AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context); Pointer zTadShapeInfo = AtomicAllocator.getInstance().getPointer(zTadBuffers.getFirst(), context); DataBuffer offsets = tadBuffers.getSecond(); Pointer tadOffsets = AtomicAllocator.getInstance().getPointer(offsets, context);
@Override public INDArray toFlattened(char order, Collection<INDArray> matrices) { if (Nd4j.getExecutioner() instanceof GridExecutioner) ((GridExecutioner) Nd4j.getExecutioner()).flushQueue(); length += m.length(); INDArray ret = Nd4j.create(new int[] {1, length}, order); int linearIndex = 0; if (m.ordering() == order && ret.elementWiseStride() == m.elementWiseStride() && ret.elementWiseStride() == 1) { allocator.memcpyAsync(ret.data(), new CudaPointer(allocator.getHostPointer(m).address()), AllocationUtils.getRequiredMemory(AllocationUtils.buildAllocationShape(m)), linearIndex * (m.data().dataType() == DataBuffer.Type.DOUBLE ? 8 : m.data().dataType() == DataBuffer.Type.FLOAT ? 4 : 2)); linearIndex += m.length(); } else { Pointer hostYShapeInfo = AddressRetriever.retrieveHostPointer(m.shapeInfoDataBuffer()); PointerPointer extras = new PointerPointer( AddressRetriever.retrieveHostPointer(ret.shapeInfoDataBuffer()), context.getOldStream(), allocator.getDeviceIdPointer(), context.getBufferAllocation(), hostYShapeInfo, AddressRetriever.retrieveHostPointer(ret.shapeInfoDataBuffer())); if (m.data().dataType() == DataBuffer.Type.DOUBLE) { nativeOps.flattenDouble(extras, linearIndex, order, (DoublePointer) allocator.getPointer(ret, context),
/** * ?tbsv solves a system of linear equations whose coefficients are in a triangular band matrix. * * @param order * @param Uplo * @param TransA * @param Diag * @param A * @param X */ @Override public void tbsv(char order, char Uplo, char TransA, char Diag, INDArray A, INDArray X) { if (Nd4j.getExecutioner().getProfilingMode() == OpExecutioner.ProfilingMode.ALL) OpProfiler.getInstance().processBlasCall(false, A, X); // FIXME: int cast if (X.data().dataType() == DataBuffer.Type.DOUBLE) { DefaultOpExecutioner.validateDataType(DataBuffer.Type.DOUBLE, A, X); dtbsv(order, Uplo, TransA, Diag, (int) X.length(), (int) A.columns(), A, (int) A.size(0), X, X.majorStride()); } else { DefaultOpExecutioner.validateDataType(DataBuffer.Type.FLOAT, A, X); stbsv(order, Uplo, TransA, Diag, (int) X.length(), (int) A.columns(), A, (int) A.size(0), X, X.majorStride()); } }
public static boolean checkDivManually(INDArray first, INDArray second, double maxRelativeDifference, double minAbsDifference) { //No apache commons element-wise division, but can do this manually INDArray result = first.div(second); long[] shape = first.shape(); INDArray expected = Nd4j.zeros(first.shape()); for (int i = 0; i < shape[0]; i++) { for (int j = 0; j < shape[1]; j++) { double v = first.getDouble(i, j) / second.getDouble(i, j); expected.putScalar(new int[] {i, j}, v); } } if (!checkShape(expected, result)) return false; boolean ok = checkEntries(expected, result, maxRelativeDifference, minAbsDifference); if (!ok) { INDArray onCopies = Shape.toOffsetZeroCopy(first).mul(Shape.toOffsetZeroCopy(second)); printFailureDetails(first, second, expected, result, onCopies, "div"); } return ok; }
@Override public Pair<Double, INDArray> computeGradientAndScore(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask, boolean average) { final INDArray scoreArr = Nd4j.create(labels.size(0), 1); final INDArray grad = Nd4j.ones(labels.shape()); calculate(labels, preOutput, activationFn, mask, scoreArr, grad); double score = scoreArr.sumNumber().doubleValue(); if (average) score /= scoreArr.size(0); return new Pair<>(score, grad); }
Nd4j.getExecutioner().getTADManager().getTADOnlyShapeInfo(this, dimension); DataBuffer shapeInfo = tadInfo.getFirst(); val shape = Shape.shape(shapeInfo); val stride = Shape.stride(shapeInfo).asLong(); long offset = offset() + tadInfo.getSecond().getLong(index); INDArray toTad = Nd4j.create(data(), shape, stride, offset); BaseNDArray baseNDArray = (BaseNDArray) toTad; int ews = baseNDArray.shapeInfoDataBuffer().getInt(baseNDArray.shapeInfoDataBuffer().length() - 2); Nd4j.getShapeInfoProvider().createShapeInformation(shape, stride, 0, ews, newOrder));
@Override public Pair<INDArray, INDArray> backprop(INDArray in, INDArray epsilon) { INDArray dLdz = Nd4j.getExecutioner().execAndReturn(new HardTanhDerivative(in)); dLdz.muli(epsilon); return new Pair<>(dLdz, null); }
/** * Merge the vectors and append a bias. * Each vector must be either row or column vectors. * An exception is thrown for inconsistency (mixed row and column vectors) * * @param vectors the vectors to merge * @return the merged ndarray appended with the bias */ @Override public INDArray appendBias(INDArray... vectors) { int size = 0; for (INDArray vector : vectors) { size += vector.rows(); } INDArray result = Nd4j.create(size + 1, vectors[0].columns()); int index = 0; for (INDArray vector : vectors) { INDArray put = toFlattened(vector, Nd4j.ones(1)); result.put(new INDArrayIndex[] {NDArrayIndex.interval(index, index + vector.rows() + 1), NDArrayIndex.interval(0, vectors[0].columns())}, put); index += vector.rows(); } return result; }
@Override public INDArray sort(INDArray x, boolean descending, int... dimension) { if (x.isScalar()) return x; Nd4j.getExecutioner().push(); Pair<DataBuffer, DataBuffer> tadBuffers = Nd4j.getExecutioner().getTADManager().getTADOnlyShapeInfo(x, dimension); PointerPointer extraz = new PointerPointer(AtomicAllocator.getInstance().getHostPointer(x.shapeInfoDataBuffer()), // not used context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer()); .getPointer(AtomicAllocator.getInstance().getConstantBuffer(dimension), context); if (x.data().dataType() == DataBuffer.Type.FLOAT) { nativeOps.sortTadFloat(extraz, (FloatPointer) AtomicAllocator.getInstance().getPointer(x, context), (IntPointer) dimensionPointer, dimension.length, (LongPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context), new LongPointerWrapper(AtomicAllocator.getInstance().getPointer(tadBuffers.getSecond(), context)), descending ); } else if (x.data().dataType() == DataBuffer.Type.DOUBLE) { nativeOps.sortTadDouble(extraz, (DoublePointer) AtomicAllocator.getInstance().getPointer(x, context),