/** * Given the index of the weights to prune, discard them, then shrink the weight vector down * to save memory. * @param uselessfeatures the features being pruned. * @param numberFeatures the total number of features before pruning. */ public void pruneWeights(int[] uselessfeatures, int numberFeatures) { this.getWeightVector().pruneWeights(uselessfeatures, numberFeatures); } }
int fi = entry.getValue(); if (!whitelist.contains(entry.getKey())) { double wt = Math.abs(this.ltuLearner.getWeightVector().getRawWeights().get(fi));
/** * Determine if the provided feature has sum of weights greater than a threshold value, * and discard the feature if it falls below. * @param lex the lexicon. * @param f the feature. * @return true if the feature has any value, there is a */ protected boolean hasWeight(Lexicon lex, Feature f) { int featureindex = lex.lookup(f); // we assume each element of the network is of the same type, if that type is sparse averaged // perceptron, we check both the averaged and current weight double sum; if (this.ltuLearner instanceof SparseAveragedPerceptron) { SparseAveragedPerceptron sap = (SparseAveragedPerceptron) this.ltuLearner; double wt = sap.getWeightVector().getRawWeights().get(featureindex); double avg = sap.getAveragedWeightVector().getRawWeights().get(featureindex); sum = Math.abs(wt); sum += Math.abs(avg); } else { double wt = this.ltuLearner.getWeightVector().getRawWeights().get(featureindex); sum = Math.abs(wt); } // if the value is sufficiently large, then we have a good weight and should keep. if (sum > this.threshold) return true; else return false; }
for (; i < numberclasses; ++i) { LinearThresholdUnit ltu = (LinearThresholdUnit) net.get(i); double wt = ltu.getWeightVector().getRawWeights().get(featureindex); sum += Math.abs(wt);