@OptionMetadata( displayName = "number of rows in kernel", description = "The number of rows in the kernel (default = 5).", commandLineParamName = "rows", commandLineParamSynopsis = "-rows <int>", displayOrder = 4 ) public int getKernelSizeX() { return backend.getKernelSize()[0]; }
@OptionMetadata( displayName = "number of columns in padding", description = "The number of columns in the padding (default = 0).", commandLineParamName = "paddingColumns", commandLineParamSynopsis = "-paddingColumns <int>", displayOrder = 9 ) public int getPaddingColumns() { return backend.getPadding()[1]; }
@OptionMetadata( displayName = "number of rows in stride", description = "The stride along the rows (default = 1).", commandLineParamName = "strideRows", commandLineParamSynopsis = "-strideRows <int>", displayOrder = 6 ) public int getStrideRows() { return backend.getStride()[0]; }
} else if (layer instanceof SubsamplingLayer) { SubsamplingLayer layer1 = (SubsamplingLayer) layer; map.put("Kernel size", Arrays.toString(layer1.getKernelSize())); map.put("Stride", Arrays.toString(layer1.getStride())); map.put("Padding", Arrays.toString(layer1.getPadding())); map.put("Pooling Type", layer1.getPoolingType().toString()); } else if (layer instanceof BaseOutputLayer) { BaseOutputLayer ol = (BaseOutputLayer) layer;
@OptionMetadata( displayName = "pooling type", description = "The type of pooling to use (default = MAX; options: MAX, AVG, SUM, NONE).", commandLineParamName = "poolingType", commandLineParamSynopsis = "-poolingType <string>", displayOrder = 10 ) public PoolingType getPoolingType() { return PoolingType.fromBackend(backend.getPoolingType()); }
private void configureLayer(Layer layer) { String layerName; if (layer == null || layer.getLayerName() == null) layerName = "Layer not named"; else layerName = layer.getLayerName(); learningRateValidation(layerName); if (layer != null) { copyConfigToLayer(layerName, layer); } if (layer instanceof FrozenLayer) { copyConfigToLayer(layerName, ((FrozenLayer) layer).getLayer()); } if (layer instanceof ConvolutionLayer) { ConvolutionLayer cl = (ConvolutionLayer) layer; if (cl.getConvolutionMode() == null) { cl.setConvolutionMode(convolutionMode); } } if (layer instanceof SubsamplingLayer) { SubsamplingLayer sl = (SubsamplingLayer) layer; if (sl.getConvolutionMode() == null) { sl.setConvolutionMode(convolutionMode); } } LayerValidation.generalValidation(layerName, layer, useRegularization, useDropConnect, dropOut, l2, l2Bias, l1, l1Bias, dist); }
@Override public LayerMemoryReport getMemoryReport(InputType inputType) { InputType.InputTypeConvolutional c = (InputType.InputTypeConvolutional) inputType; InputType.InputTypeConvolutional outputType = (InputType.InputTypeConvolutional) getOutputType(-1, inputType); int actElementsPerEx = outputType.arrayElementsPerExample(); //TODO Subsampling helper memory use... (CuDNN etc) //During forward pass: im2col array + reduce. Reduce is counted as activations, so only im2col is working mem int im2colSizePerEx = c.getDepth() * outputType.getHeight() * outputType.getWidth() * kernelSize[0] * kernelSize[1]; //Current implementation does NOT cache im2col etc... which means: it's recalculated on each backward pass int trainingWorkingSizePerEx = im2colSizePerEx; if (getDropOut() > 0) { //Dup on the input before dropout, but only for training trainingWorkingSizePerEx += inputType.arrayElementsPerExample(); } return new LayerMemoryReport.Builder(layerName, SubsamplingLayer.class, inputType, outputType) .standardMemory(0, 0) //No params .workingMemory(0, im2colSizePerEx, 0, trainingWorkingSizePerEx) .cacheMemory(MemoryReport.CACHE_MODE_ALL_ZEROS, MemoryReport.CACHE_MODE_ALL_ZEROS) //No caching .build(); }
public SubsamplingLayer(NeuralNetConfiguration conf) { super(conf); initializeHelper(); this.convolutionMode = ((org.deeplearning4j.nn.conf.layers.SubsamplingLayer) conf.getLayer()).getConvolutionMode(); }
@Override public void initializeBackend() { backend = new org.deeplearning4j.nn.conf.layers.SubsamplingLayer(); }
@OptionMetadata( displayName = "eps", description = "The value of the eps parameter (default = 1e-8).", commandLineParamName = "eps", commandLineParamSynopsis = "-eps <double>", displayOrder = 2 ) public double getEps() { return backend.getEps(); }
/** * Get layer output type. * * @param inputType Array of InputTypes * @return output type as InputType * @throws InvalidKerasConfigurationException */ @Override public InputType getOutputType(InputType... inputType) throws InvalidKerasConfigurationException { if (inputType.length > 1) throw new InvalidKerasConfigurationException( "Keras Subsampling layer accepts only one input (received " + inputType.length + ")"); return this.getSubsamplingLayer().getOutputType(-1, inputType[0]); } }
} else if (layer instanceof SubsamplingLayer) { SubsamplingLayer layer1 = (SubsamplingLayer) layer; map.put("Kernel size", Arrays.toString(layer1.getKernelSize())); map.put("Stride", Arrays.toString(layer1.getStride())); map.put("Padding", Arrays.toString(layer1.getPadding())); map.put("Pooling Type", layer1.getPoolingType().toString()); } else if (layer instanceof BaseOutputLayer) { BaseOutputLayer ol = (BaseOutputLayer) layer;
@OptionMetadata( displayName = "convolution mode", description = "The convolution mode (default = Truncate).", commandLineParamName = "mode", commandLineParamSynopsis = "-mode <string>", displayOrder = 2 ) public ConvolutionMode getConvolutionMode() { return ConvolutionMode.fromBackend(backend.getConvolutionMode()); }
@Override @SuppressWarnings("unchecked") public SubsamplingLayer build() { if (poolingType == org.deeplearning4j.nn.conf.layers.PoolingType.PNORM && pnorm <= 0) throw new IllegalStateException( "Incorrect Subsampling config: p-norm must be set when using PoolingType.PNORM"); ConvolutionUtils.validateCnnKernelStridePadding(kernelSize, stride, padding); return new SubsamplingLayer(this); } }
} else if (layer instanceof SubsamplingLayer) { SubsamplingLayer layer1 = (SubsamplingLayer) layer; map.put("Kernel size", Arrays.toString(layer1.getKernelSize())); map.put("Stride", Arrays.toString(layer1.getStride())); map.put("Padding", Arrays.toString(layer1.getPadding())); map.put("Pooling Type", layer1.getPoolingType().toString()); } else if (layer instanceof BaseOutputLayer) { BaseOutputLayer ol = (BaseOutputLayer) layer;
@OptionMetadata( displayName = "number of rows in padding", description = "The number of rows in the padding (default = 0).", commandLineParamName = "paddingRows", commandLineParamSynopsis = "-paddingRows <int>", displayOrder = 8 ) public int getPaddingRows() { return backend.getPadding()[0]; }
@ProgrammaticProperty public int[] getKernelSize() { return backend.getKernelSize(); }
@OptionMetadata( displayName = "number of columns in stride", description = "The stride along the columns (default = 1).", commandLineParamName = "strideColumns", commandLineParamSynopsis = "-strideColumns <int>", displayOrder = 7 ) public int getStrideColumns() { return backend.getStride()[1]; }
} else if (layer.conf().getLayer() instanceof SubsamplingLayer) { SubsamplingLayer layer1 = (SubsamplingLayer) layer.conf().getLayer(); fullLine.append("Kernel size: ").append(Arrays.toString(layer1.getKernelSize())).append("<br/>"); fullLine.append("Stride: ").append(Arrays.toString(layer1.getStride())).append("<br/>"); fullLine.append("Padding: ").append(Arrays.toString(layer1.getPadding())).append("<br/>"); fullLine.append("Pooling type: ").append(layer1.getPoolingType().toString()).append("<br/>"); } else if (layer.conf().getLayer() instanceof FeedForwardLayer) { org.deeplearning4j.nn.conf.layers.FeedForwardLayer layer1 =
@ProgrammaticProperty public int[] getPadding() { return backend.getPadding(); }