public static void checkForInf(INDArray z) { if (Nd4j.getExecutioner().getProfilingMode() != OpExecutioner.ProfilingMode.INF_PANIC && Nd4j.getExecutioner().getProfilingMode() != OpExecutioner.ProfilingMode.ANY_PANIC) return; int match = 0; if (!z.isScalar()) { MatchCondition condition = new MatchCondition(z, Conditions.isInfinite()); match = Nd4j.getExecutioner().exec(condition, Integer.MAX_VALUE).getInt(0); } else { if (z.data().dataType() == DataBuffer.Type.DOUBLE) { if (Double.isInfinite(z.getDouble(0))) match = 1; } else { if (Float.isInfinite(z.getFloat(0))) match = 1; } } if (match > 0) throw new ND4JIllegalStateException("P.A.N.I.C.! Op.Z() contains " + match + " Inf value(s)"); }
validateDataType(Nd4j.dataType(), op); extraz.set(new PointerPointer(32)); if (dimension[i] >= op.x().rank() && dimension[i] != Integer.MAX_VALUE) throw new ND4JIllegalStateException("Op target dimension " + Arrays.toString(dimension) + " contains element that higher then rank of op.X: [" + op.x().rank() + "]"); dimension[i] += op.x().rank(); : ArrayUtil.removeIndex(op.x().shape(), dimension); if (op.x().isVector() && op.x().length() == ArrayUtil.prod(retShape)) { return op.x(); Pointer devTadOffsets = offsets == null ? null : AtomicAllocator.getInstance().getPointer(offsets, context); PointerPointer xShapeInfoHostPointer = extraz.get().put( AddressRetriever.retrieveHostPointer(op.x().shapeInfoDataBuffer()), context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer(), context.getBufferAllocation(),
dimension[i] += op.x().rank(); if (dimension.length == op.x().rank()) dimension = new int[] {Integer.MAX_VALUE}; : ArrayUtil.removeIndex(op.x().shape(), dimension); val yT = op.y().tensorssAlongDimension(dimension); ret = Nd4j.create(xT, yT); } else { if (Math.abs(op.zeroDouble()) < Nd4j.EPS_THRESHOLD) { ret = Nd4j.zeros(retShape); } else { ret = Nd4j.valueArrayOf(retShape, op.zeroDouble()); } else { if (op.z().lengthLong() != ArrayUtil.prodLong(retShape)) throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + Arrays.toString(retShape) + "]"); if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) { op.z().assign(op.zeroDouble()); } else if (op.x().data().dataType() == DataBuffer.Type.FLOAT) { op.z().assign(op.zeroFloat()); } else if (op.x().data().dataType() == DataBuffer.Type.HALF) { op.z().assign(op.zeroHalf());
return toConcat[0]; if (Nd4j.getExecutioner() instanceof GridExecutioner) ((GridExecutioner) Nd4j.getExecutioner()).flushQueue(); PointerPointer shapeInfoPointers = new PointerPointer(toConcat.length); PointerPointer dataPointers = new PointerPointer(toConcat.length); int[] outputShape = ArrayUtil.copy(toConcat[0].shape()); if (toConcat[i].isCompressed()) Nd4j.getCompressor().decompressi(toConcat[i]); shapeInfoPointers.put(i, allocator.getHostPointer(toConcat[i].shapeInfoDataBuffer())); dataPointers.put(i, allocator.getHostPointer(toConcat[i].data())); sumAlongDim += toConcat[i].size(dimension); INDArray ret = Nd4j.createUninitialized(outputShape, Nd4j.order()); new PointerPointer(new Pointer[]{null}), new PointerPointer(new Pointer[]{null})); } else { throw new ND4JIllegalStateException("Unknown dataType: " + ret.data().dataType());
validateDataType(Nd4j.dataType(), op); extraz.set(new PointerPointer(32)); : ArrayUtil.removeIndex(op.x().shape(), dimension); extraz.get().put(AddressRetriever.retrieveHostPointer(op.x().shapeInfoDataBuffer()), // 0 if (op.x().length() != op.y().length() || op.x().length() != op.z().length()) throw new ND4JIllegalStateException("X, Y and Z arguments should have the same length for PairwiseTransform"); if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) { if ((xEWS >= 1 && yEWS >= 1 && zEWS >= 1 && !op.isExecSpecial() && op.x().ordering() == op.y().ordering() && op.x().ordering() == op.z().ordering()) || (xEWS >= 1 && yEWS == xEWS && zEWS == xEWS && xRow && yRow && zRow)) { (DoublePointer) z, (LongPointer) zShapeInfo, (DoublePointer) extraArgs); } else if (op.x().data().dataType() == DataBuffer.Type.FLOAT) { if ((xEWS >= 1 && yEWS >= 1 && xEWS == yEWS && !op.isExecSpecial() if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) { if (op.x().elementWiseStride() >= 1 && !op.isExecSpecial() && op.z().ordering() == op.x().ordering()) { nativeOps.execTransformDouble(xShapeInfoHostPointer, op.opNum(), (DoublePointer) x,
checkForCompression(op); validateDataType(Nd4j.dataType(), op); if (dimension[i] >= op.x().rank() && dimension[i] != Integer.MAX_VALUE) throw new ND4JIllegalStateException("Op target dimension " + Arrays.toString(dimension) + " contains element that higher then rank of op.X: [" + op.x().rank() + "]"); dimension[i] += op.x().rank(); : ArrayUtil.removeIndex(op.x().shape(), dimension); if (op.x().isVector() && op.x().length() == ArrayUtil.prod(retShape) && ArrayUtil.prodLong(retShape) > 1 && op.y() == null) return op.noOp(); int yT = op.y().tensorssAlongDimension(dimension); if (op.z().lengthLong() != ArrayUtil.prodLong(retShape)) throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + Arrays.toString(retShape) + "]"); if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) {
return target.assign(arrays[0]); Nd4j.getExecutioner().push(); long len = target.lengthLong(); if (arrays[i].elementWiseStride() != 1) throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays"); if (arrays[i].lengthLong() != len) throw new ND4JIllegalStateException("All arrays should have equal length for averaging"); PointerPointer x = new PointerPointer(AtomicAllocator.getInstance().getPointer(tempX, context)); long len = target.lengthLong(); Nd4j.getExecutioner().commit(); Nd4j.getCompressor().autoDecompress(arrays[i]); if (arrays[i].elementWiseStride() != 1) throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays"); if (arrays[i].lengthLong() != len) throw new ND4JIllegalStateException("All arrays should have equal length for averaging"); dataPointers.put(i, AtomicAllocator.getInstance().getHostPointer(arrays[i]));
op.setZ(Nd4j.scalar(0.0)); validateDataType(Nd4j.dataType(), op); extraz.set(new PointerPointer(32)); CudaEnvironment.getInstance().getConfiguration().enableDebug(true); for (int i = 0; i < dimension.length; i++) if (dimension[i] >= op.x().rank() && dimension[i] != Integer.MAX_VALUE) throw new ND4JIllegalStateException("Op target dimension " + Arrays.toString(dimension) + " contains element that higher then rank of op.X: [" + op.x().rank() + "]"); CudaContext context = AtomicAllocator.getInstance().getFlowController().prepareAction(op.z().isScalar() ? null : op.z(), op.x(), op.y()); Pointer devTadOffsets = offsets == null ? null : AtomicAllocator.getInstance().getPointer(offsets, context); PointerPointer xShapeInfoHostPointer = extraz.get().put( AddressRetriever.retrieveHostPointer(op.x().shapeInfoDataBuffer()), context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer(), context.getBufferAllocation(), op.z().assign(result); } else if (op.x().data().dataType() == DataBuffer.Type.FLOAT) { float result = nativeOps.execIndexReduceScalarFloat(xShapeInfoHostPointer, op.opNum(), (FloatPointer) x, (LongPointer) xShapeInfo, (FloatPointer) extraArgs); .getPointer(AtomicAllocator.getInstance().getConstantBuffer(dimension), context);
checkForCompression(op); validateDataType(Nd4j.dataType(), op); validateDataType(Nd4j.dataType(), op); extraz.set(new PointerPointer(32)); throw new ND4JIllegalStateException("Op target dimension " + Arrays.toString(dimension) + " contains element that higher then rank of op.X: [" + op.x().rank() + "]"); dimension[i] += op.x().rank(); if (dimension.length == op.x().rank()) dimension = new int[] {Integer.MAX_VALUE}; retShape = new long[] {1, 1}; else retShape = ArrayUtil.removeIndex(maxShape, dimension); if (op.x().isVector() && op.x().length() == ArrayUtil.prod(retShape) && ArrayUtil.prodLong(retShape) > 1 && op.y() == null) return op.noOp(); throw new ND4JIllegalStateException("Number of TADs along dimension doesn't match"); if (op.z().lengthLong() != ArrayUtil.prodLong(retShape)) throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + Arrays.toString(retShape) + "]");
@Override public INDArray percentile(Number quantile, int... dimension) { if (quantile.doubleValue() < 0 || quantile.doubleValue() > 100) throw new ND4JIllegalStateException("Percentile value should be in 0...100 range"); if (isScalar()) return Nd4j.scalar(this.getDouble(0)); INDArray sorted = Nd4j.getNDArrayFactory().sort(this.dup(this.ordering()), false, dimension); // there's no practical sense doing this on GPU, stride will be just size of TAD. INDArray ret = Nd4j.createUninitialized(sorted.tensorssAlongDimension(dimension)); for (int i = 0; i < ret.length(); i++) { ret.putScalar(i, getPercentile(quantile, sorted.tensorAlongDimension(i, dimension))); } return ret; }
throw new IllegalStateException("Number of dimensions do not match number of arrays to shuffle"); Nd4j.getExecutioner().push(); tadLength *= arrays.get(0).shape()[dimensions.get(0)[i]]; val numTads = arrays.get(0).length() / tadLength; val map = ArrayUtil.buildInterleavedVector(rnd, (int) numTads); PointerPointer extras = new PointerPointer(null, // not used context.getOldStream(), allocator.getDeviceIdPointer()); Pointer xShapeInfo = AtomicAllocator.getInstance().getPointer(array.shapeInfoDataBuffer(), context); TADManager tadManager = Nd4j.getExecutioner().getTADManager(); if (offsets.length() != numTads) throw new ND4JIllegalStateException("Can't symmetrically shuffle arrays with non-equal number of TADs"); if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) { nativeOps.shuffleDouble(extras, new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)), new PointerPointer(allocator.getPointer(tempX, context)), new PointerPointer(allocator.getPointer(tempShapes, context)), arrays.size(),
validateDataType(Nd4j.dataType(), op); if (op.x().length() != op.z().length()) throw new ND4JIllegalStateException("op.X length should be equal to op.Y length: [" + Arrays.toString(op.x().shapeInfoDataBuffer().asInt()) + "] != [" + Arrays.toString(op.z().shapeInfoDataBuffer().asInt()) + "]"); extraz.set(new PointerPointer(32)); Pointer zShapeInfo = AtomicAllocator.getInstance().getPointer(op.z().shapeInfoDataBuffer(), context); PointerPointer xShapeInfoHostPointer = extraz.get().put( AddressRetriever.retrieveHostPointer(op.x().shapeInfoDataBuffer()), context.getOldStream(), AtomicAllocator.getInstance().getDeviceIdPointer(), context.getBufferAllocation(), hostYShapeInfo, hostZShapeInfo, null, null); if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) { if (op.x().elementWiseStride() >= 1 && op.z().ordering() == op.x().ordering()) { nativeOps.execScalarDouble(xShapeInfoHostPointer, op.opNum(), (DoublePointer) x,
variableNameToArr.put(differentialFunction.getOwnName(), array.dup(array.ordering())); if ((int) array.getDouble(0) == 1) { val frameName = frames.getLast(); throw new ND4JIllegalStateException("No body was run."); Nd4j.getExecutioner().exec(customOp); Nd4j.getExecutioner().exec(op); else if (op.isExecSpecial()) { op.exec(); Accumulation accumulation = (Accumulation) differentialFunction; Nd4j.getExecutioner().exec(accumulation, axes);
/** * This method sums given arrays and stores them to a new array * * @param arrays * @return */ public static INDArray accumulate(INDArray... arrays) { if (arrays == null|| arrays.length == 0) throw new ND4JIllegalStateException("Input for accumulation is null or empty"); return accumulate(Nd4j.create(arrays[0].shape(), arrays[0].ordering()), arrays); }
@Override public void mapProperty(String name, DifferentialFunction on, NodeDef node, GraphDef graph, SameDiff sameDiff, Map<String, Map<String, PropertyMapping>> propertyMappingsForFunction) { if(node == null) { throw new ND4JIllegalStateException("No node found for name " + name); val type = field.getType(); if(type.equals(int[].class)) { on.setValueFor(field,arr.data().asInt()); on.setValueFor(field,arr.size(mapping.getShapePosition())); on.setValueFor(field,arr.getInt(0)); val type = attr.getType(); if(fields == null) { throw new ND4JIllegalStateException("No fields found for op " + mapping); throw new ND4JIllegalStateException("no property found for " + name + " and op " + on.opName());
@Override public Map<String, INDArray> executeGraph(long id, @NonNull Map<String, INDArray> map, @NonNull Map<String, Integer> reverseMap) { Nd4j.getExecutioner().commit(); val ptrBuffers = new PointerPointer(map.size() * 2); val ptrShapes = new PointerPointer(map.size() * 2); val ptrIndices = new IntPointer(map.size()); val array = map.get(key); if (Nd4j.dataType() == DataBuffer.Type.FLOAT) { val result = (Nd4jCuda.FloatVariablesSet) nativeOps.executeStoredGraphFloat(null, id, ptrBuffers, ptrShapes, ptrIndices, map.size()); throw new ND4JIllegalStateException("Op execution failed: " + status); Pointer.memcpy(AtomicAllocator.getInstance().getHostPointer(array), buffer, ArrayUtil.prod(shapeOf) * Nd4j.sizeOfDataType()); AtomicAllocator.getInstance().getAllocationPoint(array).tickHostWrite(); throw new ND4JIllegalStateException("Op execution failed: " + status); Pointer.memcpy(AtomicAllocator.getInstance().getHostPointer(array), buffer, ArrayUtil.prod(shapeOf) * Nd4j.sizeOfDataType()); AtomicAllocator.getInstance().getAllocationPoint(array).tickHostWrite(); throw new ND4JIllegalStateException("Op execution failed: " + status); Pointer.memcpy(AtomicAllocator.getInstance().getHostPointer(array), buffer, ArrayUtil.prod(shapeOf) * Nd4j.sizeOfDataType()); AtomicAllocator.getInstance().getAllocationPoint(array).tickHostWrite();
@Override public long bitmapEncode(INDArray indArray, INDArray target, double threshold) { long length = indArray.lengthLong(); long tLen = target.data().length(); throw new ND4JIllegalStateException("Length of target array should be " + (length / 16 + 5)); if (target.data().dataType() != DataBuffer.Type.INT) throw new ND4JIllegalStateException("Target array should have INT dataType"); DataBuffer buffer = target.data(); buffer.put(0, (int) length); buffer.put(1, (int) length); buffer.put(2, Float.floatToIntBits((float) threshold)); extraz.set(new PointerPointer(32)); PointerPointer extras = extraz.get().put( AtomicAllocator.getInstance().getHostPointer(indArray), context.getOldStream(), ); } else throw new ND4JIllegalStateException("Unknown dataType " + indArray.data().dataType());
@Override public INDArray reshape(char order, long... newShape) { Nd4j.getCompressor().autoDecompress(this); throw new ND4JIllegalStateException( "Can't reshape(int...) without shape arguments. Got empty shape instead."); long[] shape = ArrayUtil.copy(newShape); long prod = ArrayUtil.prodLong(shape); throw new ND4JIllegalStateException("New shape length doesn't match original length: [" + prod + "] vs [" + this.lengthLong() + "]. Original shape: "+Arrays.toString(this.shape())+" New Shape: "+Arrays.toString(newShape)); INDArray ret = Nd4j.createUninitialized(shape, order); if (order != ordering()) { ret.setData(dup(order).data()); } else ret.assign(this); return ret;
@Override public INDArray thresholdDecode(INDArray encoded, INDArray target) { DataBuffer buffer = encoded.data(); if (buffer.dataType() != DataBuffer.Type.INT) throw new UnsupportedOperationException(); long compressedLength = buffer.getInt(0); long originalLength = buffer.getInt(1); if (target.lengthLong() != originalLength) throw new ND4JIllegalStateException("originalLength ["+ originalLength+"] stored in encoded array doesn't match target length ["+ target.lengthLong()+"]"); DataBuffer result = target.data(); CudaContext context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext(); //nativeOps.memsetAsync(AtomicAllocator.getInstance().getPointer(result), 0,result.length(), 0, context.getOldStream()); if (extraz.get() == null) extraz.set(new PointerPointer(32)); PointerPointer extras = extraz.get().put(1, context.getOldStream()); //log.info("DEC Source length: {}", buffer.length()); //log.info("DEC Source: {}", Arrays.toString(buffer.asInt())); if (Nd4j.dataType() == DataBuffer.Type.FLOAT) { nativeOps.decodeThresholdFloat(extras, AtomicAllocator.getInstance().getPointer(buffer), compressedLength, (FloatPointer) AtomicAllocator.getInstance().getPointer(result)); } else if (Nd4j.dataType() == DataBuffer.Type.DOUBLE) { nativeOps.decodeThresholdDouble(extras, AtomicAllocator.getInstance().getPointer(buffer), compressedLength, (DoublePointer) AtomicAllocator.getInstance().getPointer(result)); } else if (Nd4j.dataType() == DataBuffer.Type.HALF) { nativeOps.decodeThresholdHalf(extras, AtomicAllocator.getInstance().getPointer(buffer), compressedLength, (ShortPointer) AtomicAllocator.getInstance().getPointer(result)); } AtomicAllocator.getInstance().getAllocationPoint(result).tickDeviceWrite(); //DataBuffer result = Nd4j.getNDArrayFactory().convertDataEx(DataBuffer.TypeEx.THRESHOLD, buffer, getGlobalTypeEx()); return target; }