/** * Initialize the node with a single vector, mainly used for initializing * the bottom. */ ClusterNode(DoubleVector mean) { this.mean = mean.deepCopy(); this.splitDistance = 0d; }
@Override public DoubleVector deepCopy() { return this.mainVector.deepCopy(); }
@Override public DoubleVector deepCopy() { return new KeyedDoubleVector(key, vector.deepCopy()); }
@Override public DoubleVector deepCopy() { return new NamedDoubleVector(name, vector.deepCopy()); }
private void shift() { DoubleVector nextS = null; DoubleVector nextY = null; int listSize = sList.size(); if (listSize < m) { nextS = new DenseDoubleVector(x.getDimension()); nextY = new DenseDoubleVector(x.getDimension()); } if (nextS == null) { nextS = sList.get(0); sList.remove(0); nextY = yList.get(0); yList.remove(0); roList.removeAt(0); } addMultInto(nextS, newX, x, -1); addMultInto(nextY, newGrad, grad, -1); double ro = nextS.dot(nextY); sList.add(nextS); yList.add(nextY); roList.add(ro); DoubleVector tmpNewX = newX.deepCopy(); newX = x.deepCopy(); x = tmpNewX; DoubleVector tmpNewGrad = newGrad.deepCopy(); newGrad = grad.deepCopy(); grad = tmpNewGrad; }
DoubleVector p1 = points.get(0); points.remove(0); DoubleVector canopy = p1.deepCopy(); int assigned = 1;
@Override public void onIterationFinished(int iteration, double cost, DoubleVector currentWeights) { if (iteration % evaluationInterval == 0) { T newClassifier = mapper.mapWeights(currentWeights); EvaluationResult result = Evaluator.testClassifier(newClassifier, split.getTestFeatures(), split.getTestOutcome()); if (bestResult == null) { bestResult = result; bestWeights = currentWeights.deepCopy(); } else { if (resultComparison.compare(bestResult, result) > 0) { LOG.info("Found better weights with result:"); result.print(LOG); bestResult = result; bestWeights = currentWeights.deepCopy(); } } } }
if (!set.get(i)) { DoubleVector v = values.get(i); DoubleVector center = v.deepCopy(); List<VectorDistanceTuple<Integer>> nns = tree.getNearestNeighbours(v, k, t1);
@Override public DoubleVector calculateGradient(DoubleVector feature, DoubleVector y, DoubleVector hypothesis) { double error = y.subtract(hypothesis).sum(); if (error != 0d) { DoubleVector result = feature.deepCopy(); Iterator<DoubleVectorElement> iterateNonZero = feature.iterateNonZero(); while (iterateNonZero.hasNext()) { DoubleVectorElement next = iterateNonZero.next(); result.set(next.getIndex(), MathUtils.guardedLogarithm(next.getValue() + 1d) * error * -1d); } return result; } return new SequentialSparseDoubleVector(feature.getDimension()); } }
@Override public DoubleVector prePredictionWeightUpdate( FeatureOutcomePair featureOutcome, DoubleVector theta, double learningRate, long iteration) { if (squaredPreviousGradient == null) { // initialize zeroed vectors of the same type as the weights squaredPreviousGradient = theta.deepCopy().multiply(0); perCoordinateWeights = theta.deepCopy().multiply(0); } Iterator<DoubleVectorElement> iterateNonZero = featureOutcome.getFeature() .iterateNonZero(); while (iterateNonZero.hasNext()) { DoubleVectorElement next = iterateNonZero.next(); double gradientValue = next.getValue(); int index = next.getIndex(); double zi = perCoordinateWeights.get(index); double ni = squaredPreviousGradient.get(index); if (FastMath.abs(zi) <= l1) { theta.set(index, 0); } else { double value = -1d / (((beta + FastMath.sqrt(ni)) / learningRate) + l2); value = value * (zi - FastMath.signum(gradientValue) * l1); theta.set(index, value); } } return theta; }
@Override public CostGradientTuple updateGradient(DoubleVector theta, DoubleVector gradient, double learningRate, long iteration, double cost) { if (movingAvg == null) { // initialize same types with zeros movingAvg = gradient.deepCopy().multiply(0); squaredGradient = gradient.deepCopy().multiply(0); } DoubleVector oneMinusBeta1Grad = gradient.multiply(1d - movingAvgDecay); movingAvg = movingAvg.multiply(movingAvgDecay).add(oneMinusBeta1Grad); DoubleVector oneMinusBeta2GradSquared = gradient.pow(2d).multiply( 1 - squaredDecay); squaredGradient = squaredGradient.multiply(squaredDecay).add( oneMinusBeta2GradSquared); double beta1t = FastMath.pow(movingAvgDecay, iteration); double beta2t = FastMath.pow(squaredDecay, iteration); double alphat = alpha * FastMath.sqrt(1 - beta2t) / (1 - beta1t); if (Double.isNaN(alphat) || alphat == 0.0) { alphat = EPS; } DoubleVector sqrtV = squaredGradient.sqrt().add(eps); gradient = movingAvg.multiply(alphat).divide(sqrtV); return new CostGradientTuple(cost, gradient); }
this.x = theta; this.grad = zeros; this.newX = theta.deepCopy(); this.newGrad = zeros; this.dir = zeros;
DoubleVector deepCopy = newWeights.deepCopy(); Iterator<DoubleVectorElement> iterateNonZero = newWeights .iterateNonZero();
/** * Calculates the numerical gradient from a cost function using the central * difference theorem. f'(x) = (f(x + h) - f(x - h)) / 2. * * @param vector the parameters to derive. * @param f the costfunction to return the cost at a given parameterset. * @return a numerical gradient. */ public static DoubleVector numericalGradient(DoubleVector vector, CostFunction f) { DoubleVector gradient = new DenseDoubleVector(vector.getLength()); DoubleVector tmp = vector.deepCopy(); for (int i = 0; i < vector.getLength(); i++) { double stepSize = EPS * (Math.abs(vector.get(i)) + 1d); tmp.set(i, vector.get(i) + stepSize); double add = f.evaluateCost(tmp).getCost(); tmp.set(i, vector.get(i) - stepSize); double diff = f.evaluateCost(tmp).getCost(); gradient.set(i, (add - diff) / (2d * stepSize)); } return gradient; }
.deepCopy(); DoubleVector hiddenPriorProbability = this.hiddenPriorProbability .deepCopy();
DoubleVector rankedTokens = pRankedTokens.deepCopy(); int[] sortedIndices = new int[rankedTokens.getLength()]; for (int i = 0; i < sortedIndices.length; i++) {