Refine search
@Override public INDArray put(INDArray indices, INDArray element) { if(indices.rank() > 2) { throw new ND4JIllegalArgumentException("Indices must be a vector or matrix."); if(indices.rows() == rank()) { NdIndexIterator ndIndexIterator = new NdIndexIterator(element.shape()); for(int i = 0; i < indices.columns(); i++) { int[] specifiedIndex = indices.getColumn(i).dup().data().asInt(); putScalar(specifiedIndex,element.getDouble(ndIndexIterator.next())); for(int j = 0; j < row.length(); j++) { INDArray slice = slice(row.getInt(j)); Nd4j.getExecutioner().exec(new Assign(new INDArray[]{slice,element},new INDArray[]{slice})); arrList.add(slice(row.getInt(j)));
/** * Setup the given byte buffer * for serialization (note that this is for uncompressed INDArrays) * 4 bytes int for rank * 4 bytes for data opType * shape buffer * data buffer * * @param arr the array to setup * @param allocated the byte buffer to setup * @param rewind whether to rewind the byte buffer or nt */ public static void doByteBufferPutUnCompressed(INDArray arr, ByteBuffer allocated, boolean rewind) { // ensure we send data to host memory Nd4j.getExecutioner().commit(); Nd4j.getAffinityManager().ensureLocation(arr, AffinityManager.Location.HOST); ByteBuffer buffer = arr.data().pointer().asByteBuffer().order(ByteOrder.nativeOrder()); ByteBuffer shapeBuffer = arr.shapeInfoDataBuffer().pointer().asByteBuffer().order(ByteOrder.nativeOrder()); //2 four byte ints at the beginning allocated.putInt(arr.rank()); //put data opType next so its self describing allocated.putInt(arr.data().dataType().ordinal()); allocated.put(shapeBuffer); allocated.put(buffer); if (rewind) allocated.rewind(); }
@Override public Pair<INDArray, INDArray> backprop(INDArray in, INDArray epsilon) { INDArray dLdz = Nd4j.getExecutioner().execAndReturn(new HardTanhDerivative(in)); dLdz.muli(epsilon); return new Pair<>(dLdz, null); }
public static void checkForInf(INDArray z) { if (Nd4j.getExecutioner().getProfilingMode() != OpExecutioner.ProfilingMode.INF_PANIC && Nd4j.getExecutioner().getProfilingMode() != OpExecutioner.ProfilingMode.ANY_PANIC) return; int match = 0; if (!z.isScalar()) { MatchCondition condition = new MatchCondition(z, Conditions.isInfinite()); match = Nd4j.getExecutioner().exec(condition, Integer.MAX_VALUE).getInt(0); } else { if (z.data().dataType() == DataBuffer.Type.DOUBLE) { if (Double.isInfinite(z.getDouble(0))) match = 1; } else { if (Float.isInfinite(z.getFloat(0))) match = 1; } } if (match > 0) throw new ND4JIllegalStateException("P.A.N.I.C.! Op.Z() contains " + match + " Inf value(s)"); }
/** * computes a vector by a scalar product. * * @param N * @param alpha * @param X */ @Override public void scal(long N, double alpha, INDArray X) { if (Nd4j.getExecutioner().getProfilingMode() == OpExecutioner.ProfilingMode.ALL) OpProfiler.getInstance().processBlasCall(false, X); if (X.isSparse()) { Nd4j.getSparseBlasWrapper().level1().scal(N, alpha, X); } else if (X.data().dataType() == DataBuffer.Type.DOUBLE) dscal(N, alpha, X, BlasBufferUtil.getBlasStride(X)); else if (X.data().dataType() == DataBuffer.Type.FLOAT) sscal(N, (float) alpha, X, BlasBufferUtil.getBlasStride(X)); else if (X.data().dataType() == DataBuffer.Type.HALF) Nd4j.getExecutioner().exec(new ScalarMultiplication(X, alpha)); }
private INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { if (labels.size(1) != preOutput.size(1)) { throw new IllegalArgumentException( "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer" + " number of outputs (nOut = " + preOutput.size(1) + ") "); if (activationFn instanceof ActivationSoftmax) { INDArray logsoftmax = Nd4j.getExecutioner().execAndReturn(new LogSoftMax(preOutput.dup())); scoreArr = logsoftmax.muli(labels); .addFloatingPointArguments(clipEps, 1.0-clipEps) .build(); Nd4j.getExecutioner().exec(op); scoreArr = Transforms.log(output, true).muli(labels); INDArray secondTerm = output.rsubi(1); Transforms.log(secondTerm, false); secondTerm.muli(labels.rsub(1)); scoreArr.addi(secondTerm);
ScalarAdd op = new ScalarAdd(this, columnVector, this, this.length(), 0.0); op.setDimension(1); Nd4j.getExecutioner().exec(op); break; ScalarSet op = new ScalarSet(this, columnVector, this, this.length(), 0.0); op.setDimension(1); Nd4j.getExecutioner().exec(op); break; ScalarSubtraction op = new ScalarSubtraction(this, columnVector, this, this.length(), 0.0); op.setDimension(1); Nd4j.getExecutioner().exec(op); break; new ScalarMultiplication(this, columnVector, this, this.length(), 0.0); op.setDimension(1); Nd4j.getExecutioner().exec(op); break; ScalarDivision op = new ScalarDivision(this, columnVector, this, this.length(), 0.0); op.setDimension(1); Nd4j.getExecutioner().exec(op); break; new ScalarReverseSubtraction(this, columnVector, this, this.length(), 0.0); op.setDimension(1); Nd4j.getExecutioner().exec(op); break;
@Override public INDArray concat(int dimension, INDArray... toConcat) { if (Nd4j.getExecutioner() instanceof GridExecutioner) ((GridExecutioner) Nd4j.getExecutioner()).flushQueue(); if (toConcat[i].isCompressed()) Nd4j.getCompressor().decompressi(toConcat[i]); sumAlongDim += toConcat[i].size(dimension); int[] outputShape = ArrayUtil.copy(toConcat[0].shape()); long[] hostShapeInfoPointers = new long[toConcat.length]; TADManager tadManager = Nd4j.getExecutioner().getTADManager(); for (int i = 0; i < toConcat.length; i++) { shapeInfoPointers[i] = AddressRetriever.retrieveDeviceAddress(toConcat[i].shapeInfoDataBuffer(), context); tadManager.getTADOnlyShapeInfo(toConcat[i], new int[] {dimension}); long devTadShapeInfo = AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context).address(); DataBuffer offsets = tadBuffers.getSecond(); long devTadOffsets = AtomicAllocator.getInstance().getPointer(offsets, context).address();
@Override public INDArray pullRows(INDArray source, INDArray destination, int sourceDimension, int[] indexes) { if (Nd4j.getExecutioner() instanceof GridExecutioner) ((GridExecutioner) Nd4j.getExecutioner()).flushQueue(); shape = new long[] {indexes.length, source.shape()[sourceDimension]}; else if (sourceDimension == 0) shape = new long[] {source.shape()[sourceDimension], indexes.length}; else throw new UnsupportedOperationException("2D input is expected"); ret = Nd4j.createUninitialized(shape, order); } else { if(!Arrays.equals(shape, destination.shape())){ throw new IllegalStateException("Cannot pull rows into destination array: expected destination array of" + " shape " + Arrays.toString(shape) + " but got destination array of shape " + Arrays.toString(destination.shape())); TADManager tadManager = Nd4j.getExecutioner().getTADManager(); Pointer tadShapeInfo = AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context); Pointer zTadShapeInfo = AtomicAllocator.getInstance().getPointer(zTadBuffers.getFirst(), context); DataBuffer offsets = tadBuffers.getSecond(); Pointer tadOffsets = AtomicAllocator.getInstance().getPointer(offsets, context); Pointer zTadOffsets = AtomicAllocator.getInstance().getPointer(zTadBuffers.getSecond(), context);
public INDArray adjustMasks(INDArray label, INDArray labelMask, int minorityLabel, double targetDist) { labelMask = Nd4j.ones(label.size(0), label.size(2)); INDArray bernoullis = Nd4j.zeros(labelMask.shape()); long currentTimeSliceEnd = label.size(2); NDArrayIndex.interval(currentTimeSliceStart, currentTimeSliceEnd)); if (minorityLabel == 0) { currentLabel = Transforms.not(currentLabel); return Nd4j.getExecutioner().exec( new BernoulliDistribution(Nd4j.createUninitialized(bernoullis.shape()), bernoullis), Nd4j.getRandom());
Nd4j.getExecutioner().getTADManager().getTADOnlyShapeInfo(this, dimension); DataBuffer shapeInfo = tadInfo.getFirst(); val shape = Shape.shape(shapeInfo); val stride = Shape.stride(shapeInfo).asLong(); long offset = offset() + tadInfo.getSecond().getLong(index); INDArray toTad = Nd4j.create(data(), shape, stride, offset); BaseNDArray baseNDArray = (BaseNDArray) toTad; char newOrder = Shape.getOrder(shape, stride, 1); int ews = baseNDArray.shapeInfoDataBuffer().getInt(baseNDArray.shapeInfoDataBuffer().length() - 2); Nd4j.getShapeInfoProvider().createShapeInformation(shape, stride, 0, ews, newOrder));
public INDArray[] tear(INDArray tensor, int... dimensions) { if (tensor.isCompressed()) Nd4j.getCompressor().decompressi(tensor); Pair<DataBuffer, DataBuffer> tadBuffers = Nd4j.getExecutioner().getTADManager().getTADOnlyShapeInfo(tensor, dimensions); tadLength *= tensor.shape()[dimensions[i]]; shape[i] = tensor.shape()[dimensions[i]]; result[x] = Nd4j.createUninitialized(shape); new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context)), (IntPointer) AtomicAllocator.getInstance().getPointer(result[0].shapeInfoDataBuffer(), context), (IntPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context), new LongPointerWrapper((IntPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getSecond(), context)) ); } else if (Nd4j.dataType() == DataBuffer.Type.FLOAT) { new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context)), (IntPointer) AtomicAllocator.getInstance().getPointer(result[0].shapeInfoDataBuffer(), context), (IntPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context), new LongPointerWrapper(AtomicAllocator.getInstance().getPointer(tadBuffers.getSecond(), context)) );
/** * in place subtraction of two matrices * * @param other the second ndarray to subtract * @param result the result ndarray * @return the result of the subtraction */ @Override public INDArray subi(INDArray other, INDArray result) { if (other.isScalar()) { return subi(other.getDouble(0), result); } if (isScalar()) { return other.rsubi(getDouble(0), result); } if(!Shape.shapeEquals(this.shape(),other.shape())) { int[] broadcastDimensions = Shape.getBroadcastDimensions(this.shape(),other.shape()); Nd4j.getExecutioner().exec(new BroadcastSubOp(this,other,result,broadcastDimensions),broadcastDimensions); return result; } LinAlgExceptions.assertSameShape(other, result); Nd4j.getExecutioner().exec(new OldSubOp(this, other,result)); if (Nd4j.ENFORCE_NUMERICAL_STABILITY) Nd4j.clearNans(result); return result; }
/** * And over the whole ndarray given some condition, with respect to dimensions * * @param n the ndarray to test * @param condition the condition to test against * @return true if all of the elements meet the specified * condition false otherwise */ public static boolean[] and(final INDArray n, final Condition condition, int... dimension) { if (!(condition instanceof BaseCondition)) throw new UnsupportedOperationException("Only static Conditions are supported"); MatchCondition op = new MatchCondition(n, condition); INDArray arr = Nd4j.getExecutioner().exec(op, dimension); boolean[] result = new boolean[(int) arr.length()]; long tadLength = Shape.getTADLength(n.shape(), dimension); for (int i = 0; i < arr.length(); i++) { if (arr.getDouble(i) == tadLength) result[i] = true; else result[i] = false; } return result; }
INDArray originalArray = Nd4j.linspace(1,15,15).reshape('c',3,5); //As per example 3 INDArray copyAdd = originalArray.add(1.0); System.out.println("Same object returned by add: " + (originalArray == copyAdd)); System.out.println("Original array after originalArray.add(1.0):\n" + originalArray); INDArray inPlaceAdd = originalArray.addi(1.0); System.out.println(); System.out.println("Same object returned by addi: " + (originalArray == inPlaceAdd)); //addi returns the exact same Java object originalArray = Nd4j.linspace(1,15,15).reshape('c',3,5); INDArray random = Nd4j.rand(3,5); //See example 2; we have a 3x5 with uniform random (0 to 1) values System.out.println("Element-wise tanh on random array:\n" + Transforms.tanh(random)); System.out.println("Element-wise power (x^3.0) on random array:\n" + Transforms.pow(random,3.0)); System.out.println("Element-wise scalar max (with scalar 0.5):\n" + Transforms.max(random,0.5)); INDArray sinx = Nd4j.getExecutioner().execAndReturn(new Sin(random.dup())); System.out.println("Element-wise sin(x) operation:\n" + sinx);
@Override public INDArray sort(INDArray x, boolean descending, int... dimension) { if (x.isScalar()) return x; Nd4j.getExecutioner().push(); Pair<DataBuffer, DataBuffer> tadBuffers = Nd4j.getExecutioner().getTADManager().getTADOnlyShapeInfo(x, dimension); PointerPointer extraz = new PointerPointer(AtomicAllocator.getInstance().getHostPointer(x.shapeInfoDataBuffer()), // not used context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer()); .getPointer(AtomicAllocator.getInstance().getConstantBuffer(dimension), context); if (x.data().dataType() == DataBuffer.Type.FLOAT) { nativeOps.sortTadFloat(extraz, (FloatPointer) AtomicAllocator.getInstance().getPointer(x, context), (IntPointer) dimensionPointer, dimension.length, (LongPointer) AtomicAllocator.getInstance().getPointer(tadBuffers.getFirst(), context), new LongPointerWrapper(AtomicAllocator.getInstance().getPointer(tadBuffers.getSecond(), context)), descending ); } else if (x.data().dataType() == DataBuffer.Type.DOUBLE) { nativeOps.sortTadDouble(extraz, (DoublePointer) AtomicAllocator.getInstance().getPointer(x, context),
Nd4j.getCompressor().autoDecompress(this); if (n.isSparse()) { return n.equals(this); if (this.lengthLong() != n.lengthLong()) return false; if (isScalar() && n.isScalar()) { if (data.dataType() == DataBuffer.Type.FLOAT) { double val = getDouble(0); double val2 = n.getDouble(0); Nd4j.getExecutioner().exec(op); double diff = op.getFinalResult().doubleValue(); if (!Shape.shapeEquals(shape(), n.shape())) { return false; if (n.ordering() == ordering()) { EqualsWithEps op = new EqualsWithEps(this, n, eps); Nd4j.getExecutioner().exec(op); double diff = op.getFinalResult().doubleValue(); } else { EqualsWithEps op = new EqualsWithEps(this, n, eps); Nd4j.getExecutioner().exec(op); double diff = op.getFinalResult().doubleValue();
/** * in place addition of two matrices * * @param other the second ndarray to add * @param result the result ndarray * @return the result of the addition */ @Override public INDArray addi(INDArray other, INDArray result) { if (other.isScalar()) { return result.addi(other.getDouble(0), result); } if (isScalar()) { return other.addi(getDouble(0), result); } if(!Shape.shapeEquals(this.shape(),other.shape())) { int[] broadcastDimensions = Shape.getBroadcastDimensions(this.shape(),other.shape()); result = Nd4j.createUninitialized(Shape.broadcastOutputShape(this.shape(),other.shape())); Nd4j.getExecutioner().exec(new BroadcastAddOp(this,other,result,broadcastDimensions),broadcastDimensions); return result; } LinAlgExceptions.assertSameShape(other, result); Nd4j.getExecutioner().exec(new OldAddOp(this, other, result, length())); if (Nd4j.ENFORCE_NUMERICAL_STABILITY) Nd4j.clearNans(result); return result; }
private static INDArray executeNd4jTransformOpWithPreservedScalarTensorShape(INDArray mask, INDArray right, DataBuffer.Type bufferType, QuadFunction<INDArray, INDArray, INDArray, Long, BaseTransformOp> baseTransformOpConstructor) { if (mask.length() == 1 || right.length() == 1) { long[] resultShape = Shape.broadcastOutputShape(mask.shape(), right.shape()); if (mask.length() == 1) { mask = Nd4j.valueArrayOf(right.shape(), mask.getDouble(0)); Nd4j.getExecutioner().exec( baseTransformOpConstructor.apply(mask, right, mask, mask.length()) ); } else { Nd4j.getExecutioner().exec( baseTransformOpConstructor.apply(mask, valueArrayOf(mask.shape(), right.getDouble(0), bufferType), mask, mask.length() ) ); } return mask.reshape(resultShape); } else { Nd4j.getExecutioner().exec( baseTransformOpConstructor.apply(mask, right, mask, mask.length()) ); return mask; } }