public Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
/** * This method works just like {@link LinearThresholdUnit#learn(int[],double[],int[],double[])}, * except it notifies its weight vector when it got an example correct in addition to updating * it when it makes a mistake. * * @param exampleFeatures The example's array of feature indices * @param exampleValues The example's array of feature values * @param exampleLabels The example's label(s) * @param labelValues The labels' values **/ public void learn(int[] exampleFeatures, double[] exampleValues, int[] exampleLabels, double[] labelValues) { assert exampleLabels.length == 1 : "Example must have a single label."; assert exampleLabels[0] == 0 || exampleLabels[0] == 1 : "Example has unallowed label value."; boolean label = (exampleLabels[0] == 1); double s = awv.simpleDot(exampleFeatures, exampleValues, initialWeight) + bias; if (label && s < threshold + positiveThickness) promote(exampleFeatures, exampleValues, getLearningRate()); else if (!label && s >= threshold - negativeThickness) demote(exampleFeatures, exampleValues, getLearningRate()); else awv.correctExample(); }
NETaggerLevel2 taggerLevel2 = (NETaggerLevel2) prms.taggerLevel2; SparseAveragedPerceptron sap1 = (SparseAveragedPerceptron)taggerLevel1.getBaseLTU(); sap1.setLearningRate(prms.learningRatePredictionsLevel1); sap1.setThickness(prms.thicknessPredictionsLevel1); System.out.println("L1 learning rate = "+sap1.getLearningRate()+", thickness = "+sap1.getPositiveThickness()); if (prms.featuresToUse.containsKey("PredictionsLevel1")) { SparseAveragedPerceptron sap2 = (SparseAveragedPerceptron)taggerLevel2.getBaseLTU(); sap2.setLearningRate(prms.learningRatePredictionsLevel2); sap2.setThickness(prms.thicknessPredictionsLevel2); System.out.println("L2 learning rate = "+sap2.getLearningRate()+", thickness = "+sap2.getPositiveThickness());
NETaggerLevel2 taggerLevel2 = (NETaggerLevel2) params.taggerLevel2; SparseAveragedPerceptron sap1 = (SparseAveragedPerceptron)taggerLevel1.getBaseLTU(); System.out.println("L1 SparseAveragedPerceptron learning rate = "+sap1.getLearningRate()+", thickness = "+sap1.getPositiveThickness()); if (params.featuresToUse.containsKey("PredictionsLevel1")) { SparseAveragedPerceptron sap2 = (SparseAveragedPerceptron)taggerLevel2.getBaseLTU(); System.out.println("L2 SparseAveragedPerceptron learning rate = "+sap2.getLearningRate()+", thickness = "+sap2.getPositiveThickness());
/** * Determine if the provided feature has sum of weights greater than a threshold value, * and discard the feature if it falls below. * @param lex the lexicon. * @param f the feature. * @return true if the feature has any value, there is a */ protected boolean hasWeight(Lexicon lex, Feature f) { int featureindex = lex.lookup(f); // we assume each element of the network is of the same type, if that type is sparse averaged // perceptron, we check both the averaged and current weight double sum; if (this.ltuLearner instanceof SparseAveragedPerceptron) { SparseAveragedPerceptron sap = (SparseAveragedPerceptron) this.ltuLearner; double wt = sap.getWeightVector().getRawWeights().get(featureindex); double avg = sap.getAveragedWeightVector().getRawWeights().get(featureindex); sum = Math.abs(wt); sum += Math.abs(avg); } else { double wt = this.ltuLearner.getWeightVector().getRawWeights().get(featureindex); sum = Math.abs(wt); } // if the value is sufficiently large, then we have a good weight and should keep. if (sum > this.threshold) return true; else return false; }
NETaggerLevel2 taggerLevel2 = (NETaggerLevel2) prms.taggerLevel2; SparseAveragedPerceptron sap1 = (SparseAveragedPerceptron)taggerLevel1.getBaseLTU(); sap1.setLearningRate(prms.learningRatePredictionsLevel1); sap1.setThickness(prms.thicknessPredictionsLevel1); System.out.println("L1 learning rate = "+sap1.getLearningRate()+", thickness = "+sap1.getPositiveThickness()); if (prms.featuresToUse.containsKey("PredictionsLevel1")) { SparseAveragedPerceptron sap2 = (SparseAveragedPerceptron)taggerLevel2.getBaseLTU(); sap2.setLearningRate(prms.learningRatePredictionsLevel2); sap2.setThickness(prms.thicknessPredictionsLevel2); System.out.println("L2 learning rate = "+sap2.getLearningRate()+", thickness = "+sap2.getPositiveThickness());
NETaggerLevel2 taggerLevel2 = (NETaggerLevel2) params.taggerLevel2; SparseAveragedPerceptron sap1 = (SparseAveragedPerceptron)taggerLevel1.getBaseLTU(); System.out.println("L1 SparseAveragedPerceptron learning rate = "+sap1.getLearningRate()+", thickness = "+sap1.getPositiveThickness()); if (params.featuresToUse.containsKey("PredictionsLevel1")) { SparseAveragedPerceptron sap2 = (SparseAveragedPerceptron)taggerLevel2.getBaseLTU(); System.out.println("L2 SparseAveragedPerceptron learning rate = "+sap2.getLearningRate()+", thickness = "+sap2.getPositiveThickness());
for (; i < numberclasses; ++i) { SparseAveragedPerceptron sap = (SparseAveragedPerceptron) net.get(i); double wt = sap.getWeightVector().getRawWeights().get(featureindex); double avg = sap.getAveragedWeightVector().getRawWeights().get(featureindex); sum += Math.abs(wt); sum += Math.abs(avg);
public Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
+ ".level1.lex"); SparseAveragedPerceptron sap1 = (SparseAveragedPerceptron)taggerLevel1.getBaseLTU(); System.out.println("L1 SparseAveragedPerceptron learning rate = "+sap1.getLearningRate()+", thickness = "+sap1.getPositiveThickness()); NETaggerLevel2 taggerLevel2 = null; if (ParametersForLbjCode.currentParameters.featuresToUse.containsKey("PredictionsLevel1")) { + ".level2.lex"); SparseAveragedPerceptron sap2 = (SparseAveragedPerceptron)taggerLevel2.getBaseLTU(); System.out.println("L2 SparseAveragedPerceptron learning rate = "+sap2.getLearningRate()+", thickness = "+sap2.getPositiveThickness());
Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
public Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 4; baseLTU = new SparseAveragedPerceptron(p); } }
Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
public Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
public Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
public Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 2; baseLTU = new SparseAveragedPerceptron(p); } }
Parameters() { SparseAveragedPerceptron.Parameters p = new SparseAveragedPerceptron.Parameters(); p.learningRate = .1; p.thickness = 4; baseLTU = new SparseAveragedPerceptron(p); } }