@Override @SuppressWarnings("unchecked") public DenseLayer build() { return new DenseLayer(this); } }
DenseLayer dl = (DenseLayer) layer; node.addLabel(Label.label("DenseLayer")); node.setProperty("activation",dl.getActivationFn().toString()); // todo parameters node.setProperty("biasInit",dl.getBiasInit()); node.setProperty("biasLearningRate",dl.getBiasLearningRate()); node.setProperty("l1",dl.getL1()); node.setProperty("l1Bias",dl.getL1Bias()); node.setProperty("l2",dl.getL2()); node.setProperty("l2Bias",dl.getL2Bias()); node.setProperty("distribution",dl.getDist().toString()); node.setProperty("in",dl.getNIn()); node.setProperty("out",dl.getNOut());
@Override public Layer instantiate(NeuralNetConfiguration conf, Collection<IterationListener> iterationListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams) { LayerValidation.assertNInNOutSet("DenseLayer", getLayerName(), layerIndex, getNIn(), getNOut()); org.deeplearning4j.nn.layers.feedforward.dense.DenseLayer ret = new org.deeplearning4j.nn.layers.feedforward.dense.DenseLayer(conf); ret.setListeners(iterationListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); Map<String, INDArray> paramTable = initializer().init(conf, layerParamsView, initializeParams); ret.setParamTable(paramTable); ret.setConf(conf); return ret; }
@Override public LayerMemoryReport getMemoryReport(InputType inputType) { InputType outputType = getOutputType(-1, inputType); int numParams = initializer().numParams(this); int updaterStateSize = (int) getIUpdater().stateSize(numParams); int trainSizeFixed = 0; int trainSizeVariable = 0; if (getDropOut() > 0) { if (false) { //TODO drop connect //Dup the weights... note that this does NOT depend on the minibatch size... trainSizeVariable += 0; //TODO } else { //Assume we dup the input trainSizeVariable += inputType.arrayElementsPerExample(); } } //Also, during backprop: we do a preOut call -> gives us activations size equal to the output size // which is modified in-place by activation function backprop // then we have 'epsilonNext' which is equivalent to input size trainSizeVariable += outputType.arrayElementsPerExample(); return new LayerMemoryReport.Builder(layerName, DenseLayer.class, inputType, outputType) .standardMemory(numParams, updaterStateSize) .workingMemory(0, 0, trainSizeFixed, trainSizeVariable) //No additional memory (beyond activations) for inference .cacheMemory(MemoryReport.CACHE_MODE_ALL_ZEROS, MemoryReport.CACHE_MODE_ALL_ZEROS) //No caching in DenseLayer .build(); }
/** * Get layer output type. * * @param inputType Array of InputTypes * @return output type as InputType * @throws InvalidKerasConfigurationException */ @Override public InputType getOutputType(InputType... inputType) throws InvalidKerasConfigurationException { /* Check whether layer requires a preprocessor for this InputType. */ InputPreProcessor preprocessor = getInputPreprocessor(inputType[0]); if (preprocessor != null) { return this.getDenseLayer().getOutputType(-1, preprocessor.getOutputType(inputType[0])); } return this.getDenseLayer().getOutputType(-1, inputType[0]); }
@Override public void initializeBackend() { backend= new org.deeplearning4j.nn.conf.layers.DenseLayer(); }