/** * Pow function * * @param ndArray the ndarray to raise hte power of * @param power the power to raise by * @return the ndarray raised to this power */ public static INDArray pow(INDArray ndArray, Number power) { return pow(ndArray, power, true); }
/** * Element-wise power function - x^y, performed element-wise. * Not performed in-place: the input arrays are not modified. * * @param ndArray the ndarray to raise to the power of * @param power the power to raise by * @return a copy of the ndarray raised to the specified power (element-wise) */ public static INDArray pow(INDArray ndArray, INDArray power) { return pow(ndArray, power, true); }
/** * * @param x * @return */ public static INDArray computeAbsoluteStep(INDArray x) { INDArray relStep = pow(Nd4j.scalar(Nd4j.EPS_THRESHOLD),0.5); return computeAbsoluteStep(relStep,x); }
System.out.println("Random array:\n" + random); //Again, note the limited printing precision, as per example 2 System.out.println("Element-wise tanh on random array:\n" + Transforms.tanh(random)); System.out.println("Element-wise power (x^3.0) on random array:\n" + Transforms.pow(random,3.0)); System.out.println("Element-wise scalar max (with scalar 0.5):\n" + Transforms.max(random,0.5));
/** * Estimate the variance of a single record with reduced # of dimensions. * @param data A single record with the same <i>N</i> features as the constructing data set * @param ndims The number of dimensions to include in calculation * @return The fraction (0 to 1) of the total variance covered by the <i>ndims</i> basis set. */ public double estimateVariance(INDArray data, int ndims) { INDArray dx = data.sub(mean); INDArray v = eigenvectors.transpose().mmul(dx.reshape(dx.columns(), 1)); INDArray t2 = Transforms.pow(v, 2); double fraction = t2.get(NDArrayIndex.interval(0, ndims)).sumNumber().doubleValue(); double total = t2.sumNumber().doubleValue(); return fraction / total; }
/** * Generates a set of <i>count</i> random samples with the same variance and mean and eigenvector/values * as the data set used to initialize the PCA object, with same number of features <i>N</i>. * @param count The number of samples to generate * @return A matrix of size <i>count</i> rows by <i>N</i> columns */ public INDArray generateGaussianSamples(long count) { INDArray samples = Nd4j.randn(new long[] {count, eigenvalues.columns()}); INDArray factors = Transforms.pow(eigenvalues, -0.5, true); samples.muliRowVector(factors); return Nd4j.tensorMmul(eigenvectors, samples, new int[][] {{1}, {1}}).transposei().addiRowVector(mean); }
/** * Return a reduced basis set that covers a certain fraction of the variance of the data * @param variance The desired fractional variance (0 to 1), it will always be greater than the value. * @return The basis vectors as columns, size <i>N</i> rows by <i>ndims</i> columns, where <i>ndims</i> is less than or equal to <i>N</i> */ public INDArray reducedBasis(double variance) { INDArray vars = Transforms.pow(eigenvalues, -0.5, true); double res = vars.sumNumber().doubleValue(); double total = 0.0; int ndims = 0; for (int i = 0; i < vars.columns(); i++) { ndims++; total += vars.getDouble(i); if (total / res > variance) break; } INDArray result = Nd4j.create(eigenvectors.rows(), ndims); for (int i = 0; i < ndims; i++) result.putColumn(i, eigenvectors.getColumn(i)); return result; }
/** * This method calculates 'phi' which is the probability * density function (see Bishop 23) * @param diffSquared This is the 'x-mu' term of the Gaussian distribution (distance between 'x' and the mean value of the distribution). * @param sigma This is the standard deviation of the Gaussian distribution. * @return This returns an array of shape [nsamples, nlabels, ndistributions] which contains the probability density (phi) for each of the * samples * labels * distributions for the given x, sigma, mu. */ private INDArray phi(INDArray diffSquared, INDArray sigma) { // 1/sqrt(2PIs^2) * e^((in-u)^2/2*s^2) INDArray minustwovariance = sigma.mul(sigma).muli(2).negi(); // This is phi_i(x,mu,sigma) INDArray likelihoods = Transforms.exp(diffSquared.divi(minustwovariance)) .divi(Transforms.pow(sigma.mul(SQRT_TWO_PI), (double) mLabelWidth)); return likelihoods; }
/** * This method performs a dimensionality reduction, including principal components * that cover a fraction of the total variance of the system. It does all calculations * about the mean. * @param in A matrix of datapoints as rows, where column are features with fixed number N * @param variance The desired fraction of the total variance required * @return The reduced basis set */ public static INDArray pca2(INDArray in, double variance) { // let's calculate the covariance and the mean INDArray[] covmean = covarianceMatrix(in); // use the covariance matrix (inverse) to find "force constants" and then break into orthonormal // unit vector components INDArray[] pce = principalComponents(covmean[0]); // calculate the variance of each component INDArray vars = Transforms.pow(pce[1], -0.5, true); double res = vars.sumNumber().doubleValue(); double total = 0.0; int ndims = 0; for (int i = 0; i < vars.columns(); i++) { ndims++; total += vars.getDouble(i); if (total / res > variance) break; } INDArray result = Nd4j.create(in.columns(), ndims); for (int i = 0; i < ndims; i++) result.putColumn(i, pce[0].getColumn(i)); return result; }
/** * * @param relStep * @param x * @return */ public static INDArray computeAbsoluteStep(INDArray relStep,INDArray x) { if(relStep == null) { relStep = pow(Nd4j.scalar(getEpsRelativeTo(x)),0.5); } INDArray signX0 = x.gte(0).muli(2).subi(1); return signX0.mul(relStep).muli(max(abs(x),1.0)); }
std = (batchCount == 1) ? Nd4j.zeros(mean.shape()) : Transforms.pow(next.getFeatureMatrix().std(0), 2); std.muli(batchCount); } else { INDArray deltaSq = Transforms.pow(meanB.subRowVector(mean), 2); INDArray deltaSqScaled = deltaSq.mul(((float) runningTotal - batchCount) * batchCount / (float) runningTotal); INDArray mtwoB = Transforms.pow(next.getFeatureMatrix().std(0), 2); mtwoB.muli(batchCount); std = std.add(mtwoB);
INDArray deltaSquared = Transforms.pow(mean.subRowVector(runningMean), 2); INDArray mB = variance.muli(count); runningVariance.muli(runningCount).addiRowVector(mB)
INDArray normalPart = mdc.alpha.div(Transforms.pow(mdc.sigma.mul(SQRT_TWO_PI), mLabelWidth)); INDArray exponent = labelsMinusMuSquared.div(minustwovariance); INDArray exponentMax = exponent.max(1);
@Override public IntegerTensor powInPlace(int exponent) { Transforms.pow(tensor, exponent, false); return this; }
/** * Pow function * * @param ndArray the ndarray to raise hte power of * @param power the power to raise by * @return the ndarray raised to this power */ public static INDArray pow(INDArray ndArray, Number power) { return pow(ndArray, power, Nd4j.copyOnOps); }
@Override public DoubleTensor powInPlace(double exponent) { Transforms.pow(tensor, exponent, false); return this; }
@Override public IntegerTensor powInPlace(IntegerTensor exponent) { if (exponent.isScalar()) { Transforms.pow(tensor, exponent.scalar(), false); } else { INDArray exponentArray = unsafeGetNd4J(exponent); tensor = INDArrayShim.pow(tensor, exponentArray); } return this; }
@Override public DoubleTensor powInPlace(DoubleTensor exponent) { if (exponent.isScalar()) { Transforms.pow(tensor, exponent.scalar(), false); } else { INDArray exponentArray = unsafeGetNd4J(exponent); tensor = INDArrayShim.pow(tensor, exponentArray); } return this; }
/** * Generates a set of <i>count</i> random samples with the same variance and mean and eigenvector/values * as the data set used to initialize the PCA object, with same number of features <i>N</i>. * @param count The number of samples to generate * @return A matrix of size <i>count</i> rows by <i>N</i> columns */ public INDArray generateGaussianSamples(int count) { INDArray samples = Nd4j.randn(count, eigenvalues.columns()); INDArray factors = Transforms.pow(eigenvalues, -0.5, true); samples.muliRowVector(factors); return Nd4j.tensorMmul(eigenvectors, samples, new int[][] {{1}, {1}}).transposei().addiRowVector(mean); }