@Override public void serialize(LoadFlowResult result, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException { jsonGenerator.writeStartObject(); jsonGenerator.writeStringField("version", VERSION); jsonGenerator.writeBooleanField("isOK", result.isOk()); jsonGenerator.writeObjectField("metrics", result.getMetrics()); jsonGenerator.writeEndObject(); }
private void runLoadFlow(ComputationManager computationManager) throws Exception { int priority = 1; LoadFlow loadflow = loadFlowFactory.create(_network, computationManager, priority); //((HELMLoadFlow) loadflow).setSlack(this._slackId); LoadFlowResult lfResults = loadflow.run(_network.getStateManager().getWorkingStateId(), LoadFlowParameters.load()).join(); if (!lfResults.isOk()) { System.out.println("LF has not been successfuly completed."); LOGGER.info("Loadflow finished. isOk == false"); System.exit(-1); } }
private void runLoadFlow(LoadFlow loadFlow, String workingStateId) { try { LoadFlowResult result = loadFlow.run(workingStateId, LoadFlowParameters.load()).join(); if (!result.isOk()) { throw new PowsyblException("Load flow diverged during phase shifter optimization"); } } catch (Exception e) { throw new PowsyblException(e); } }
private void printLoadFlowResult(LoadFlowResult result, Writer writer, TableFormatterFactory formatterFactory, TableFormatterConfig formatterConfig) { try (TableFormatter formatter = formatterFactory.create(writer, "loadflow results", formatterConfig, new Column("Result"), new Column("Metrics"))) { formatter.writeCell(result.isOk()); formatter.writeCell(result.getMetrics().toString()); } catch (IOException e) { throw new UncheckedIOException(e); } }
if (!result.isOk()) { throw new RuntimeException("Load flow diverged before stepup transformer expansion"); if (!result.isOk()) { throw new RuntimeException("Load flow diverged after stepup transformer expansion"); if (!result.isOk()) { throw new RuntimeException("Load flow diverged after stepup transformer expansion");
LOGGER.debug("Pre-projector load flow metrics: {}", loadFlowResult.getMetrics()); if (!loadFlowResult.isOk()) { throw new StopException("Pre-projector load flow diverged"); }, computationManager.getExecutor()) .thenApplyAsync(loadFlowResult -> { LOGGER.debug("Post-projector load flow metrics: {}", loadFlowResult.getMetrics()); if (!loadFlowResult.isOk()) { throw new StopException("Post-projector load flow diverged");
LoadFlow loadFlow = loadFlowFactory.create(network, computationManager, loadFlowPriority); LoadFlowResult result = loadFlow.run(network.getStateManager().getWorkingStateId(), loadFlowParameters).join(); if (!result.isOk()) { LOGGER.error("LF divergence on network " + network.getId()); if (!result.isOk()) { throw new RuntimeException("Merge LF divergence");
Map<String, String> metrics, List<SecurityIndex> securityIndexes, AtomicInteger okCount) { metrics.putAll(loadFlowResult.getMetrics().entrySet().stream() .collect(Collectors.toMap(e -> e.getKey() + "_" + index, Map.Entry::getValue))); if (loadFlowResult.isOk()) { okCount.incrementAndGet();
onlineDb.storeState(context.getWorkflowId(), stateIdInt, network, contingency.getId()); if (result.isOk()) { logger.info(this.logHeader + "{}: load flow on post contingency state {} converge", stateId, postContingencyStateId); loadflowConverge = true;
protected boolean runLoadFlow(WorkflowContext context, WorkflowStartContext startContext, Sample sample) { LOGGER.debug("Workflow {}, sample {}: load flow started", id, sample.getId()); try { LoadFlowResult result = context.getLoadflow().run(context.getNetwork().getStateManager().getWorkingStateId(), context.getLoadFlowParameters()).join(); LOGGER.debug("Workflow {}, sample {}: load flow terminated (ok={})", id, sample.getId(), result.isOk()); changeTaskStatus(startContext, sample.getId(), LOAD_FLOW, result.isOk() ? SUCCEED : FAILED, null); metricsDb.store(id, "sample-" + sample.getId(), LOAD_FLOW.name(), result.getMetrics()); // consistency check on sampled variables SampleCharacteritics characteritics = SampleCharacteritics.fromNetwork(context.getNetwork(), creationParameters.isGenerationSampled(), creationParameters.isBoundariesSampled()); if (!characteritics.equals(sample.getCharacteritics())) { LOGGER.warn("Sampled variables inconsistency for {}: {} != {}", sample.getId(), characteritics, sample.getCharacteritics()); } return result.isOk(); } catch (Exception e) { LOGGER.error(e.toString(), e); return false; } }
LoadFlowResult loadFlowResult = loadFlow.run(network.getStateManager().getWorkingStateId(), loadFlowParameters).join(); context.getErrorStream().println("load flow terminated (" + loadFlowResult.isOk() + ") on " + network.getId()); if (loadFlowResult.isOk()) { Stabilization stabilization = simulatorFactory.createStabilization(network, context.getShortTimeExecutionComputationManager(), 0); ImpactAnalysis impactAnalysis = simulatorFactory.createImpactAnalysis(network, context.getShortTimeExecutionComputationManager(), 0, contingencyDb);
context.getOutputStream().println("loadflow status is " + (result2.isOk() ? "ok" : "nok") + " (" + result2.getMetrics() + ")"); if (result2.isOk() && checkConstraints) { String report = Security.printLimitsViolations(network, LimitViolationFilter.load()); if (report != null) {
.thenCompose(aVoid -> loadFlow.run(contingencyStateId[0], LOAD_FLOW_PARAMETERS)) .thenCompose(loadFlowResult -> { if (!loadFlowResult.isOk()) { LOGGER.warn("Network {}, contingency {}: load flow on post contingency state diverged, metrics = {}", network.getId(), contingency.getId(), loadFlowResult.getMetrics()); filteredClusters.removeClusters(contingency.getId(), EnumSet.of(WCAClusterNum.ONE, WCAClusterNum.TWO, WCAClusterNum.THREE), wcaReport.addPostContingencyStatus(new WCAPostContingencyStatus( contingency.getId(), new WCALoadflowResult(false, "load flow on post contingency state diverged: metrics = " + loadFlowResult.getMetrics()) )); return CompletableFuture.completedFuture(WCAClusterNum.FOUR); try { loadFlowResult1 = loadFlow.run(network.getStateManager().getWorkingStateId(), LOAD_FLOW_PARAMETERS).join(); if (loadFlowResult1.isOk()) { boolean violationsRemoved = false; boolean actionApplied = false; } else { LOGGER.warn("Network {}, contingency {}, curative action {}: load flow on post curative action state diverged, metrics = {}", network.getId(), contingency.getId(), curativeActionId, loadFlowResult1.getMetrics()); curativeActionsApplication.add(new WCAActionApplication(curativeActionId, null, new WCALoadflowResult(false, "load flow on post curative action state diverged: metrics = " + loadFlowResult1.getMetrics()), false, false,
logger.info(this.logHeader + "{}: loadflow started", stateId); LoadFlowResult result = loadFlow.run(context.getNetwork().getStateManager().getWorkingStateId(), loadFlowParameters).join(); status.put(currentStatus, result.isOk() ? OnlineTaskStatus.SUCCESS : OnlineTaskStatus.FAILED); stateListener.onUpdate(stateId, status, context.timeHorizon); logger.info(this.logHeader + "{}: loadflow terminated", stateId); if (result.getMetrics() != null) { logger.info(this.logHeader + "{}: loadflow metrics: {}", stateId, result.getMetrics()); if (!result.getMetrics().isEmpty()) { onlineDb.storeMetrics(context.getWorkflowId(), stateId, OnlineStep.LOAD_FLOW, result.getMetrics()); status.put(currentStatus, result.isOk() ? OnlineTaskStatus.SUCCESS : OnlineTaskStatus.FAILED); if (result.isOk()) { logger.error(this.logHeader + "{}: stop analisys of state: loadflow does not converge: metrics = {}", stateIdStr, result.getMetrics()); stateListener.onUpdate(stateId, status, context.timeHorizon, "LoadFLow does not converge: metrics = " + result.getMetrics());
if (!loadFlowInBaseStateResult.isOk()) { LOGGER.error("Network {}: load flow on base state diverged, metrics = {}", network.getId(), loadFlowInBaseStateResult.getMetrics()); wcaReport.setBaseStateLoadflowResult(new WCALoadflowResult(false, "load flow on base state diverged: metrics = " + loadFlowInBaseStateResult.getMetrics())); contingencies.forEach(contingency -> { filteredClusters.removeClusters(contingency.getId(), return loadFlow.run(domainsUncertaintiesState, LOAD_FLOW_PARAMETERS) .thenApply(loadFlowResult -> { if (!loadFlowResult.isOk()) { LOGGER.info("Network {}: loadflow on state with 'domains' uncertainties diverged, metrics = {}", network.getId(), loadFlowResult.getMetrics()); wcaReport.setBaseStateWithUncertaintiesLoadflowResult(new WCALoadflowResult(false, "load flow on state with 'domains' uncertainties diverged: metrics = " + loadFlowResult.getMetrics())); return CompletableFuture.completedFuture(baseStateLimitViolations); } else { try { loadFlowResult1 = loadFlow.run(network.getStateManager().getWorkingStateId(), LOAD_FLOW_PARAMETERS).join(); if (loadFlowResult1.isOk()) { List<LimitViolation> preventiveStateLimitViolations = violationsFilter.apply(Security.checkLimits(network), network); Optional<LimitViolation> notSolvedLimitViolation = preventiveStateLimitViolations network.getId(), preventiveActionId, loadFlowResult1.getMetrics()); wcaReport.addPreventiveActionApplication(new WCAActionApplication(preventiveActionId, violationToBePrevented, new WCALoadflowResult(false, "loadflow on post preventive action state diverged: metrics = " + loadFlowResult1.getMetrics()), false, false,