public static long[] getReducedShape(long[] wholeShape, int[] dimensions) { if (isWholeArray(wholeShape, dimensions)) return new long[] {}; else if (dimensions.length == 1 && wholeShape.length == 2) { val ret = new long[2]; if (dimensions[0] == 1) { ret[0] = wholeShape[0]; ret[1] = 1; } else if (dimensions[0] == 0) { ret[0] = 1; ret[1] = wholeShape[1]; } return ret; } return ArrayUtil.removeIndex(wholeShape, dimensions); }
/** * The number of elements in a slice * along a set of dimensions * @param arr the array * to calculate the length per slice for * @param dimension the dimensions to do the calculations along * @return the number of elements in a slice along * arbitrary dimensions */ public static long lengthPerSlice(INDArray arr, int... dimension) { long[] remove = ArrayUtil.removeIndex(arr.shape(), dimension); return ArrayUtil.prodLong(remove); }
/** * This maps an index of a vector * on to a vector in the matrix that can be used * for indexing in to a tensor * @param index the index to map * @param arr the array to use * for indexing * @param rank the dimensions to compute a slice for * @return the mapped index */ public static int mapIndexOntoTensor(int index, INDArray arr, int... rank) { int ret = index * ArrayUtil.prod(ArrayUtil.removeIndex(arr.shape(), rank)); return ret; }
protected static void validateConcat(int dimension, INDArray... arrs) { if (arrs[0].isScalar()) { for (int i = 1; i < arrs.length; i++) if (!arrs[i].isScalar()) throw new IllegalArgumentException("All arrays must have same dimensions"); } else { int dims = arrs[0].shape().length; long[] shape = ArrayUtil.removeIndex(arrs[0].shape(), dimension); for (int i = 1; i < arrs.length; i++) { assert Arrays.equals(shape, ArrayUtil.removeIndex(arrs[i].shape(), dimension)); assert arrs[i].shape().length == dims; } } }
/** * Get the shape of the reduced array * @param wholeShape the shape of the array * with the reduce op being performed * @param dimensions the dimensions the reduce op is being performed on * @return the shape of the result array as the result of the reduce */ public static long[] getReducedShape(int[] wholeShape, int[] dimensions) { if (isWholeArray(wholeShape, dimensions)) return new long[] {}; else if (dimensions.length == 1 && wholeShape.length == 2) { val ret = new long[2]; if (dimensions[0] == 1) { ret[0] = wholeShape[0]; ret[1] = 1; } else if (dimensions[0] == 0) { ret[0] = 1; ret[1] = wholeShape[1]; } return ret; } return ArrayUtil.toLongArray(ArrayUtil.removeIndex(wholeShape, dimensions)); }
return ArrayUtil.removeIndex(wholeShape, dimensions);
return ArrayUtil.toLongArray(ArrayUtil.removeIndex(wholeShape, dimensions));
protected void assertSlice(INDArray put, long slice) { assert slice <= slices() : "Invalid slice specified " + slice; long[] sliceShape = put.shape(); if (Shape.isRowVectorShape(sliceShape)) { return; } else { long[] requiredShape = ArrayUtil.removeIndex(shape(), 0); //no need to compare for scalar; primarily due to shapes either being [1] or length 0 if (put.isScalar()) return; if (isVector() && put.isVector() && put.length() < length()) return; //edge case for column vectors if (Shape.isColumnVectorShape(sliceShape)) return; if (!Shape.shapeEquals(sliceShape, requiredShape) && !Shape.isRowVectorShape(requiredShape) && !Shape.isRowVectorShape(sliceShape)) throw new IllegalStateException(String.format("Invalid shape size of %s . Should have been %s ", Arrays.toString(sliceShape), Arrays.toString(requiredShape))); } }
/**Choose tensor dimension for operations with 3 arguments: z=Op(x,y) or similar<br> * @see #chooseElementWiseTensorDimension(INDArray) */ public static int chooseElementWiseTensorDimension(INDArray x, INDArray y, INDArray z) { if (x.isVector()) return ArrayUtil.argMax(x.shape()); // FIXME: int cast int opAlongDimensionMinStride = (int) ArrayUtil.argMinOfMax(x.stride(), y.stride(), z.stride()); int opAlongDimensionMaxLength = ArrayUtil.argMax(x.shape()); //Edge case: shapes with 1s in them can have stride of 1 on the dimensions of length 1 if (opAlongDimensionMinStride == opAlongDimensionMaxLength || x.size((int) opAlongDimensionMinStride) == 1) return opAlongDimensionMaxLength; int nOpsAlongMinStride = ArrayUtil.prod(ArrayUtil.removeIndex(x.shape(), (int) opAlongDimensionMinStride)); int nOpsAlongMaxLength = ArrayUtil.prod(ArrayUtil.removeIndex(x.shape(), opAlongDimensionMaxLength)); if (nOpsAlongMinStride <= 10 * nOpsAlongMaxLength) return opAlongDimensionMinStride; else return opAlongDimensionMaxLength; }
}; int[][] deletedAxes = new int[][]{ removeIndex(aAxes, sumAxes[0]), removeIndex(bAxes, sumAxes[1])}; int[] gAxes = range(0, i_v1.get(0).getShape().length); int[][] firstAxes = new int[][]{
/** * Choose tensor dimension for operations with 2 arguments: x=Op(x,y) or similar<br> * @see #chooseElementWiseTensorDimension(INDArray) */ public static long chooseElementWiseTensorDimension(INDArray x, INDArray y) { if (x.isVector()) return ArrayUtil.argMax(x.shape()); //Execute along the vector //doing argMin(max(x.stride(i),y.stride(i))) minimizes the maximum //separation between elements (helps CPU cache) BUT might result in a huge number //of tiny ops - i.e., addi on NDArrays with shape [5,10^6] long opAlongDimensionMinStride = ArrayUtil.argMinOfMax(x.stride(), y.stride()); //doing argMax on shape gives us smallest number of largest tensors //but may not be optimal in terms of element separation (for CPU cache etc) int opAlongDimensionMaxLength = ArrayUtil.argMax(x.shape()); // FIXME: int cast //Edge case: shapes with 1s in them can have stride of 1 on the dimensions of length 1 if (opAlongDimensionMinStride == opAlongDimensionMaxLength || x.size((int)opAlongDimensionMinStride) == 1) return opAlongDimensionMaxLength; //Using a heuristic approach here: basically if we get >= 10x as many tensors using the minimum stride //dimension vs. the maximum size dimension, use the maximum size dimension instead //The idea is to avoid choosing wrong dimension in cases like shape=[10,10^6] //Might be able to do better than this with some additional thought int nOpsAlongMinStride = ArrayUtil.prod(ArrayUtil.removeIndex(x.shape(), (int) opAlongDimensionMinStride)); int nOpsAlongMaxLength = ArrayUtil.prod(ArrayUtil.removeIndex(x.shape(), (int) opAlongDimensionMaxLength)); if (nOpsAlongMinStride <= 10 * nOpsAlongMaxLength) return opAlongDimensionMinStride; else return opAlongDimensionMaxLength; }
int nOpsAlongMinStride = ArrayUtil.prod(ArrayUtil.removeIndex(x.shape(), opAlongDimensionMinStride)); int nOpsAlongMaxLength = ArrayUtil.prod(ArrayUtil.removeIndex(x.shape(), opAlongDimensionMaxLength)); if (nOpsAlongMinStride <= 10 * nOpsAlongMaxLength) return opAlongDimensionMinStride;
int[] remove = ArrayUtil.removeIndex(ArrayUtil.range(0, rank()), dimension); int[] newPermuteDims = Ints.concat(remove, reverseDimensions); int[] finalPermuteDims = tadFinalPermuteDimensions[dimension.length];
/** * The number of elements in a slice * along a set of dimensions * @param arr the array * to calculate the length per slice for * @param dimension the dimensions to do the calculations along * @return the number of elements in a slice along * arbitrary dimensions */ public static int lengthPerSlice(INDArray arr, int... dimension) { int[] remove = ArrayUtil.removeIndex(arr.shape(), dimension); return ArrayUtil.prod(remove); }
/** * This maps an index of a vector * on to a vector in the matrix that can be used * for indexing in to a tensor * @param index the index to map * @param arr the array to use * for indexing * @param rank the dimensions to compute a slice for * @return the mapped index */ public static int mapIndexOntoTensor(int index, INDArray arr, int... rank) { int ret = index * ArrayUtil.prod(ArrayUtil.removeIndex(arr.shape(), rank)); return ret; }
@Override public void exec(int... dimension) { int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn((JaccardDistance) opForDimension(i, dimension)) .getFinalResult().doubleValue(); z.putScalar(i, d); } }
@Override public void exec(int... dimension) { int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn((Bias) opForDimension(i, dimension)).getFinalResult() .doubleValue(); z.putScalar(i, d); } } }
@Override public void exec(int... dimension) { int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn((CosineSimilarity) opForDimension(i, dimension)) .getFinalResult().doubleValue(); z.putScalar(i, d); } }
@Override public void exec(int... dimension) { int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn((CosineDistance) opForDimension(i, dimension)) .getFinalResult().doubleValue(); z.putScalar(i, d); } }
@Override public void exec(int... dimension) { int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn((HammingDistance) opForDimension(i, dimension)) .getFinalResult().doubleValue(); z.putScalar(i, d); } }