public Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
/** Resets the weight vector to all zeros. */ public void forget() { super.forget(); weightVector = weightVector.emptyClone(); bias = 0; }
/** * Retrieves the parameters that are set in this learner. * * @return An object containing all the values of the parameters that control the behavior of * this learning algorithm. **/ public Learner.Parameters getParameters() { Parameters p = new Parameters((SparsePerceptron.Parameters) super.getParameters()); p.beta = beta; return p; }
/** * Writes the binary representation of this learned function if there is a location cached in * {@link #lcFilePath}, and writes the binary representation of the feature lexicon if there is * a location cached in {@link #lexFilePath}. **/ public void save() { if (lcFilePath != null) saveModel(); if (lexFilePath != null && lexicon != null && lexicon.size() > 0) saveLexicon(); }
/** * Retrieves the parameters that are set in this learner. * * @return An object containing all the values of the parameters that control the behavior of * this learning algorithm. **/ public Learner.Parameters getParameters() { Parameters p = new Parameters(super.getParameters()); p.weakLearner = weakLearner; p.rounds = rounds; return p; }
/** * Retrieves the parameters that are set in this learner. * * @return An object containing all the values of the parameters that control the behavior of * this learning algorithm. **/ public Learner.Parameters getParameters() { return new Parameters((LinearThresholdUnit.Parameters) super.getParameters()); }
/** * Use this constructor to specify an alternative subclass of {@link SparseWeightVector}. * * @param n The name of the classifier. * @param r The desired learning rate value. **/ public StochasticGradientDescent(String n, double r) { super(n); Parameters p = new Parameters(); p.learningRate = r; setParameters(p); }
/** * Reinitializes the learner to the state it started at before any learning was performed. **/ public void forget() { super.forget(); variances = variances.emptyClone(); variancesBias = 1 / initialVariance; }
/** * Retrieves the parameters that are set in this learner. * * @return An object containing all the values of the parameters that control the behavior of * this learning algorithm. **/ public Learner.Parameters getParameters() { Parameters p = new Parameters(super.getParameters()); p.weightVector = weightVector; p.learningRate = learningRate; return p; }
/** * Retrieves the parameters that are set in this learner. * * @return An object containing all the values of the parameters that control the behavior of * this learning algorithm. **/ public Learner.Parameters getParameters() { Parameters p = new Parameters(super.getParameters()); p.baseLearner = baseLearner; p.defaultPrediction = defaultPrediction; return p; }
/** * Retrieves the parameters that are set in this learner. * * @return An object containing all the values of the parameters that control the behavior of * this learning algorithm. **/ public Learner.Parameters getParameters() { Parameters p = new Parameters(super.getParameters()); p.baseLTU = baseLTU; return p; }
/** * Retrieves the parameters that are set in this learner. * * @return An object containing all the values of the parameters that control the behavior of * this learning algorithm. **/ public Learner.Parameters getParameters() { Parameters p = new Parameters(super.getParameters()); p.baseClassifier = baseClassifier; p.attributeString = attributeString; return p; }
/** * Given the index of the weights to prune, discard them, then shrink the weight vector down * to save memory. * @param uselessfeatures the features being pruned. * @param numberFeatures the total number of features before pruning. */ public void pruneWeights(int[] uselessfeatures, int numberFeatures) { this.getWeightVector().pruneWeights(uselessfeatures, numberFeatures); } }
public Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
public Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 4; baseLTU = new SparseAveragedPerceptron(p); } }
Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
public Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }