- Add the Codota plugin to your IDE and get smart completions
private void myMethod () {ArrayList a =
new ArrayList<String>()
new ArrayList()
new ArrayList<Object>()
- Smart code suggestions by Codota
}
.gradientNormalizationThreshold(10) .build()) .inputPreProcessor(0, new RnnToCnnPreProcessor(V_HEIGHT, V_WIDTH, 3)) .inputPreProcessor(3, new CnnToFeedForwardPreProcessor(7, 7, 10)) .inputPreProcessor(4, new FeedForwardToRnnPreProcessor()) .pretrain(false).backprop(true) .backpropType(BackpropType.TruncatedBPTT)
@Override public TensorFlowCnnToFeedForwardPreProcessor clone() { return (TensorFlowCnnToFeedForwardPreProcessor) super.clone(); } }
@Override public RnnToCnnPreProcessor clone() { return new RnnToCnnPreProcessor(inputHeight, inputWidth, numChannels); }
@Override public InputPreProcessor getPreProcessorForInputType(InputType inputType) { if (inputType == null) { throw new IllegalStateException( "Invalid input for layer (layer name = \"" + getLayerName() + "\"): input type is null"); } switch (inputType.getType()) { case FF: case CNNFlat: //FF -> FF and CNN (flattened format) -> FF: no preprocessor necessary return null; case RNN: //RNN -> FF return new RnnToFeedForwardPreProcessor(); case CNN: //CNN -> FF InputType.InputTypeConvolutional c = (InputType.InputTypeConvolutional) inputType; return new CnnToFeedForwardPreProcessor(c.getHeight(), c.getWidth(), c.getDepth()); default: throw new RuntimeException("Unknown input type: " + inputType); } }
public static InputPreProcessor getPreprocessorForInputTypeRnnLayers(InputType inputType, String layerName) { if (inputType == null) { throw new IllegalStateException( "Invalid input for RNN layer (layer name = \"" + layerName + "\"): input type is null"); } switch (inputType.getType()) { case FF: case CNNFlat: //FF -> RNN or CNNFlat -> RNN //In either case, input data format is a row vector per example return new FeedForwardToRnnPreProcessor(); case RNN: //RNN -> RNN: No preprocessor necessary return null; case CNN: //CNN -> RNN InputType.InputTypeConvolutional c = (InputType.InputTypeConvolutional) inputType; return new CnnToRnnPreProcessor(c.getHeight(), c.getWidth(), c.getDepth()); default: throw new RuntimeException("Unknown input type: " + inputType); } }
.removeVertexKeepConnections("flatten") .addVertex("flatten", new PreprocessorVertex(new CnnToFeedForwardPreProcessor(8, 8, 512)), "block5_pool") .removeVertexKeepConnections("fc1")
/** * Gets appropriate DL4J InputPreProcessor for given InputTypes. * * @param inputType Array of InputTypes * @return DL4J InputPreProcessor * @throws InvalidKerasConfigurationException * @see org.deeplearning4j.nn.conf.InputPreProcessor */ public InputPreProcessor getInputPreprocessor(InputType... inputType) throws InvalidKerasConfigurationException { if (inputType.length > 1) throw new InvalidKerasConfigurationException( "Keras GlobalPooling layer accepts only one input (received " + inputType.length + ")"); InputPreProcessor preprocessor; if (inputType[0].getType() == InputType.Type.FF && this.dimensions.length == 1) { preprocessor = new FeedForwardToRnnPreProcessor(); } else { preprocessor = this.getGlobalPoolingLayer().getPreProcessorForInputType(inputType[0]); } return preprocessor; }
@Override public InputPreProcessor getPreProcessorForInputType(InputType inputType) { switch (inputType.getType()) { case FF: throw new UnsupportedOperationException( "Global max pooling cannot be applied to feed-forward input type. Got input type = " + inputType); case RNN: case CNN: //No preprocessor required return null; case CNNFlat: InputType.InputTypeConvolutionalFlat cFlat = (InputType.InputTypeConvolutionalFlat) inputType; return new FeedForwardToCnnPreProcessor(cFlat.getHeight(), cFlat.getWidth(), cFlat.getDepth()); } return null; }
@Override public ComposableInputPreProcessor clone() { ComposableInputPreProcessor clone = (ComposableInputPreProcessor) super.clone(); if (clone.inputPreProcessors != null) { InputPreProcessor[] processors = new InputPreProcessor[clone.inputPreProcessors.length]; for (int i = 0; i < clone.inputPreProcessors.length; i++) { processors[i] = clone.inputPreProcessors[i].clone(); } clone.inputPreProcessors = processors; } return clone; }
@Override public CnnToRnnPreProcessor clone() { return new CnnToRnnPreProcessor(inputHeight, inputWidth, numChannels); }
@Override public INDArray backprop(INDArray epsilons, int miniBatchSize) { INDArray epsilonsReshaped = super.backprop(epsilons, miniBatchSize); return epsilonsReshaped.permute(0, 3, 1, 2); }
@Override public INDArray preProcess(INDArray input, int miniBatchSize) { if (input.rank() == 2) return input; //Should usually never happen /* DL4J convolutional input: # channels, # rows, # cols * TensorFlow convolutional input: # rows, # cols, # channels * Theano convolutional input: # channels, # rows, # cols */ /* TODO: remove the extra copies of the input. These are only * used for debugging purposes during development and testing. */ INDArray flatInput = super.preProcess(input, miniBatchSize); INDArray permuted = input.permute(0, 2, 3, 1); INDArray flatPermuted = super.preProcess(permuted, miniBatchSize); return flatPermuted; }
case NONE: case THEANO: preprocessor = new CnnToFeedForwardPreProcessor(it.getHeight(), it.getWidth(), it.getDepth()); break; case TENSORFLOW: preprocessor = new RnnToFeedForwardPreProcessor();
@Override public InputPreProcessor getPreProcessorForInputType(InputType inputType) { if (inputType.getType() == InputType.Type.CNNFlat) { InputType.InputTypeConvolutionalFlat i = (InputType.InputTypeConvolutionalFlat) inputType; return new FeedForwardToCnnPreProcessor(i.getHeight(), i.getWidth(), i.getDepth()); } return null; }
@Override public UnitVarianceProcessor clone() { UnitVarianceProcessor clone = (UnitVarianceProcessor) super.clone(); if (clone.columnStds != null) clone.columnStds = clone.columnStds.dup(); return clone; }
return new FeedForwardToCnnPreProcessor(f.getHeight(), f.getWidth(), f.getDepth()); default: throw new RuntimeException("Unknown input type: " + inputType);