/** * This method gets & activates default workspace * * @return */ @Override public MemoryWorkspace getAndActivateWorkspace() { return getWorkspaceForCurrentThread().notifyScopeEntered(); }
/** * This method gets & activates default with a given configuration and Id * * @param configuration * @param id * @return */ @Override public MemoryWorkspace getAndActivateWorkspace(@NonNull WorkspaceConfiguration configuration, @NonNull String id) { return getWorkspaceForCurrentThread(configuration, id).notifyScopeEntered(); }
/** * This method gets & activates workspace with a given Id * * @param id * @return */ @Override public MemoryWorkspace getAndActivateWorkspace(@NonNull String id) { return getWorkspaceForCurrentThread(id).notifyScopeEntered(); }
@Override public MemoryWorkspace notifyScopeEntered(@NonNull T arrayType) { validateConfig(arrayType); if(isScopedOut(arrayType)){ return Nd4j.getWorkspaceManager().scopeOutOfWorkspaces(); } else { MemoryWorkspace ws = Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread( getConfiguration(arrayType), getWorkspaceName(arrayType)); return ws.notifyScopeEntered(); } }
/** * This method gets & activates workspace with a given Id * * @param id * @return */ @Override public MemoryWorkspace getAndActivateWorkspace(@NonNull String id) { return getWorkspaceForCurrentThread(id).notifyScopeEntered(); }
/** * This method gets & activates default workspace * * @return */ @Override public MemoryWorkspace getAndActivateWorkspace() { return getWorkspaceForCurrentThread().notifyScopeEntered(); }
/** * This method gets & activates default with a given configuration and Id * * @param configuration * @param id * @return */ @Override public MemoryWorkspace getAndActivateWorkspace(@NonNull WorkspaceConfiguration configuration,@NonNull String id) { return getWorkspaceForCurrentThread(configuration, id).notifyScopeEntered(); }
try (MemoryWorkspace workspace = workspaces.get(i).notifyScopeEntered()) {
DataSet next = iter.next(); try (MemoryWorkspace wsCache = cache.notifyScopeEntered()) { try (MemoryWorkspace ws = workspace.notifyScopeEntered()) { input = next.getFeatureMatrix(); pretrainLayer(layerIdx, input);
try (MemoryWorkspace ws = workspace.notifyScopeEntered()) { currInput = activationFromPrevLayer(i, currInput, train).leverageTo(workspaceExternal);
/** Calculate the output of the network, with masking arrays. The masking arrays are used in situations such * as one-to-many and many-to-one recurrent neural network (RNN) designs, as well as for supporting time series * of varying lengths within the same minibatch. */ public INDArray output(INDArray input, boolean train, INDArray featuresMask, INDArray labelsMask) { WorkspaceMode cMode = layerWiseConfigurations.getTrainingWorkspaceMode(); layerWiseConfigurations.setTrainingWorkspaceMode(layerWiseConfigurations.getInferenceWorkspaceMode()); MemoryWorkspace workspace = layerWiseConfigurations.getTrainingWorkspaceMode() == WorkspaceMode.NONE ? new DummyWorkspace() : Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread( workspaceConfigurationExternal, workspaceExternal); try (MemoryWorkspace wsE = workspace.notifyScopeEntered()) { INDArray ret = silentOutput(input, train, featuresMask, labelsMask).detach(); layerWiseConfigurations.setTrainingWorkspaceMode(cMode); return ret; } }
/** * Return an array of network outputs (predictions), given the specified network inputs * Network outputs are for output layers only. * * @param train If true: do forward pass at training time; false: do forward pass at test time * @param input Inputs to the network * @return Output activations (order: same as defined in network configuration) */ public INDArray[] output(boolean train, INDArray... input) { WorkspaceMode cMode = configuration.getTrainingWorkspaceMode(); configuration.setTrainingWorkspaceMode(configuration.getInferenceWorkspaceMode()); MemoryWorkspace workspace = configuration.getTrainingWorkspaceMode() == WorkspaceMode.NONE ? new DummyWorkspace() : Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread( workspaceConfigurationExternal, workspaceExternal); try (MemoryWorkspace wsE = workspace.notifyScopeEntered()) { INDArray[] tmp = silentOutput(train, input); for (int x = 0; x < tmp.length; x++) tmp[x] = tmp[x].detach(); configuration.setTrainingWorkspaceMode(cMode); return tmp; } }
/** * Label the probabilities of the input * * @param input the input to label * @param train whether the output * is test or train. This mainly * affect hyper parameters such as * drop out where certain things should * be applied with activations * @return a vector of probabilities * given each label. * <p> * This is typically of the form: * [0.5, 0.5] or some other probability distribution summing to one */ public INDArray output(INDArray input, boolean train) { WorkspaceMode cMode = layerWiseConfigurations.getTrainingWorkspaceMode(); layerWiseConfigurations.setTrainingWorkspaceMode(layerWiseConfigurations.getInferenceWorkspaceMode()); MemoryWorkspace workspace = layerWiseConfigurations.getTrainingWorkspaceMode() == WorkspaceMode.NONE ? new DummyWorkspace() : Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread( workspaceConfigurationExternal, workspaceExternal); try (MemoryWorkspace wsE = workspace.notifyScopeEntered()) { INDArray ret = silentOutput(input, train).detach(); layerWiseConfigurations.setTrainingWorkspaceMode(cMode); return ret; } }
: Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread( workspaceConfigurationExternal, workspaceExternal); try (MemoryWorkspace ws = workspace.notifyScopeEntered()) {
try (MemoryWorkspace ws = workspace.notifyScopeEntered()) {
workspaceConfigurationExternal, workspaceExternal); try (MemoryWorkspace ws = workspace.notifyScopeEntered()) {
@Override public Gradient gradient() { MemoryWorkspace workspace = workspaceMode == WorkspaceMode.NONE ? new DummyWorkspace() : Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread( workspaceConfigurationExternal, workspaceExternal); try (MemoryWorkspace ws = workspace.notifyScopeEntered()) { if (yIncs == null) yIncs = zeros(Y.shape()); if (gains == null) gains = ones(Y.shape()); AtomicDouble sumQ = new AtomicDouble(0); /* Calculate gradient based on barnes hut approximation with positive and negative forces */ INDArray posF = Nd4j.create(Y.shape()); INDArray negF = Nd4j.create(Y.shape()); if (tree == null) { tree = new SpTree(Y); tree.setWorkspaceMode(workspaceMode); } tree.computeEdgeForces(rows, cols, vals, N, posF); for (int n = 0; n < N; n++) tree.computeNonEdgeForces(n, theta, negF.slice(n), sumQ); INDArray dC = posF.subi(negF.divi(sumQ)); Gradient ret = new DefaultGradient(); ret.gradientForVariable().put(Y_GRAD, dC); return ret; } }
try (MemoryWorkspace wsCache = cache.notifyScopeEntered()) { try (MemoryWorkspace ws = workspace.notifyScopeEntered()) { solver.optimize();
try (MemoryWorkspace ws = workspace.notifyScopeEntered()) {
@Override public void update(INDArray gradient, String paramType) { MemoryWorkspace workspace = workspaceMode == WorkspaceMode.NONE ? new DummyWorkspace() : Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread( workspaceConfigurationExternal, workspaceExternal); try (MemoryWorkspace ws = workspace.notifyScopeEntered()) { INDArray yGrads = gradient; gains = gains.add(.2).muli(sign(yGrads)).neqi(sign(yIncs)) .addi(gains.mul(0.8).muli(sign(yGrads)).neqi(sign(yIncs))); BooleanIndexing.applyWhere(gains, Conditions.lessThan(minGain), new Value(minGain)); INDArray gradChange = gains.mul(yGrads); if (useAdaGrad) { if (adaGrad == null) { // FIXME: int cast adaGrad = new AdaGrad(ArrayUtil.toInts(gradient.shape()), learningRate); adaGrad.setStateViewArray(Nd4j.zeros(gradient.shape()).reshape(1, gradChange.length()), gradChange.shape(), gradient.ordering(), true); } gradChange = adaGrad.getGradient(gradChange, 0); } else { gradChange.muli(learningRate); } yIncs.muli(momentum).subi(gradChange); Y.addi(yIncs); } }