/** * This method does not filter the tags based on the data point permissions. It should only be used * when joining onto the data points table (the filtering happens there post-join). * * @param tagKeyToColumn * @return */ Select<Record> createTagPivotSql(Map<String, Name> tagKeyToColumn) { List<Field<?>> fields = new ArrayList<>(tagKeyToColumn.size() + 1); fields.add(DATA_POINT_ID); for (Entry<String, Name> entry : tagKeyToColumn.entrySet()) { fields.add(DSL.max(DSL.when(TAG_KEY.eq(entry.getKey()), TAG_VALUE)).as(entry.getValue())); } return DSL.select(fields).from(DATA_POINT_TAGS).groupBy(DATA_POINT_ID); }
private static Field<Timestamp> createRunningAtValue(ProcessStatus status) { return when(PROCESS_QUEUE.CURRENT_STATUS.eq(ProcessStatus.RUNNING.toString()), PROCESS_QUEUE.LAST_RUN_AT) .otherwise(when(value(status.toString()).eq(ProcessStatus.RUNNING.toString()), currentTimestamp()) .otherwise(PROCESS_QUEUE.LAST_RUN_AT)); }
@Override public void accept(Context<?> c) { c.visit(DSL.max(DSL.when(condition, one()).otherwise(zero()))); toSQLOverClause(c); } });
@Override final Field<T> getFunction0(Configuration configuration) { switch (configuration.dialect().family()) { case H2: case HSQLDB: return DSL.field("{nvl2}({0}, {1}, {2})", getDataType(), arg1, arg2, arg3); default: return DSL.when(arg1.isNotNull(), arg2).otherwise(arg3); } } }
@Override public void accept(Context<?> c) { c.visit(DSL.min(DSL.when(condition, one()).otherwise(zero()))); toSQLOverClause(c); } });
private int[] update(DSLContext tx, Connection conn, List<HostItem> hosts) throws SQLException { Field<Integer> currentStatusWeight = decodeStatus(choose(ANSIBLE_HOSTS.STATUS)); Field<Integer> newStatusWeight = decodeStatus(choose(value((String) null))); String update = tx.update(ANSIBLE_HOSTS) .set(ANSIBLE_HOSTS.DURATION, ANSIBLE_HOSTS.DURATION.plus(value((Integer) null))) .set(ANSIBLE_HOSTS.STATUS, when(currentStatusWeight.greaterThan(newStatusWeight), ANSIBLE_HOSTS.STATUS).otherwise(value((String) null))) .set(ANSIBLE_HOSTS.EVENT_SEQ, when(currentStatusWeight.greaterThan(newStatusWeight), ANSIBLE_HOSTS.EVENT_SEQ).otherwise(value((Long) null))) .where(ANSIBLE_HOSTS.INSTANCE_ID.eq(value((UUID) null)) .and(ANSIBLE_HOSTS.INSTANCE_CREATED_AT.eq(value((Timestamp) null)) .and(ANSIBLE_HOSTS.HOST.eq(value((String) null)) .and(ANSIBLE_HOSTS.HOST_GROUP.eq(value((String) null)))))) .getSQL(); try (PreparedStatement ps = conn.prepareStatement(update)) { for (HostItem h : hosts) { ps.setLong(1, h.duration()); ps.setString(2, h.status()); ps.setString(3, h.status()); ps.setString(4, h.status()); ps.setLong(5, h.eventSeq()); ps.setObject(6, h.key().instanceId()); ps.setTimestamp(7, h.key().instanceCreatedAt()); ps.setString(8, h.key().host()); ps.setString(9, h.key().hostGroup()); ps.addBatch(); } return ps.executeBatch(); } }
return DSL.when(arg1.isNotNull(), arg1).otherwise(arg2);
sum(when(a.STATUS.eq(RUNNING.name()), 1).otherwise(0)).as(RUNNING.name()), sum(when(a.STATUS.eq(CHANGED.name()), 1).otherwise(0)).as(CHANGED.name()), sum(when(a.STATUS.eq(FAILED.name()), 1).otherwise(0)).as(FAILED.name()), sum(when(a.STATUS.eq(OK.name()), 1).otherwise(0)).as(OK.name()), sum(when(a.STATUS.eq(SKIPPED.name()), 1).otherwise(0)).as(SKIPPED.name()), sum(when(a.STATUS.eq(UNREACHABLE.name()), 1).otherwise(0)).as(UNREACHABLE.name()), arrayAggDistinct(a.HOST_GROUP)) .from(a)
@WithTimer public QueueMetrics metrics(UUID orgId, UUID prjId, Set<String> statuses) { return txResult(tx -> { SelectConditionStep<Record4<Integer, Integer, Integer, String>> q = tx.select( field("1", Integer.class).as("count_process"), when(V_PROCESS_QUEUE.ORG_ID.eq(orgId), 1).otherwise(0).as("count_per_org"), when(V_PROCESS_QUEUE.PROJECT_ID.eq(prjId), 1).otherwise(0).as("count_per_project"), V_PROCESS_QUEUE.CURRENT_STATUS.as("status")) .from(V_PROCESS_QUEUE) .where(V_PROCESS_QUEUE.CURRENT_STATUS.in(statuses)); List<Record4<BigDecimal, BigDecimal, BigDecimal, String>> result = tx.select( sum(q.field("count_process", Integer.class)), sum(q.field("count_per_org", Integer.class)), sum(q.field("count_per_project", Integer.class)), q.field("status", String.class)) .from(q) .groupBy(q.field("status", String.class)) .fetch(); Map<String, Integer> process = new HashMap<>(); Map<String, Integer> perOrg = new HashMap<>(); Map<String, Integer> perProject = new HashMap<>(); result.forEach(r -> { String status = r.value4(); process.put(status, getInt(r.value1())); perOrg.put(status, getInt(r.value2())); perProject.put(status, getInt(r.value3())); }); return new QueueMetrics(process, perOrg, perProject); }); }
return DSL.when(condition, inline(true)) .when(not(condition), inline(false)) .otherwise(inline((Boolean) null));
when(V_PROCESS_QUEUE.CURRENT_STATUS.eq(RUNNING.name()), 1).otherwise(0).as(RUNNING.name()), when(V_PROCESS_QUEUE.CURRENT_STATUS.eq(SUSPENDED.name()), 1).otherwise(0).as(SUSPENDED.name()), when(V_PROCESS_QUEUE.CURRENT_STATUS.eq(FINISHED.name()), 1).otherwise(0).as(FINISHED.name()), when(V_PROCESS_QUEUE.CURRENT_STATUS.eq(FAILED.name()), 1).otherwise(0).as(FAILED.name()), when(V_PROCESS_QUEUE.CURRENT_STATUS.eq(ENQUEUED.name()), 1).otherwise(0).as(ENQUEUED.name())) .from(V_PROCESS_QUEUE) .where(V_PROCESS_QUEUE.INITIATOR_ID.eq(initiatorId)
@SuppressWarnings("unchecked") @Override final Field<Integer> getFunction0(Configuration configuration) { switch (configuration.dialect()) { case SQLITE: return DSL .when(((Field<Integer>) argument).greaterThan(zero()), one()) .when(((Field<Integer>) argument).lessThan(zero()), one().neg()) .otherwise(zero()); default: return function("sign", getDataType(), argument); } } }
@SuppressWarnings({ "unchecked" }) @Override public void accept(Context<?> ctx) { switch (ctx.family()) { case POSTGRES: ctx.visit(DSL.field("{width_bucket}({0}, {1}, {2}, {3})", getType(), field, low, high, buckets)); break; default: ctx.visit( DSL.when(field.lt(low), zero()) .when(field.ge(high), buckets.add(one())) .otherwise((Field<Integer>) DSL.floor(field.sub(low).mul(buckets).div(high.sub(low))).add(one())) ); break; } }
final void toSQLArguments1(Context<?> ctx, QueryPartList<QueryPart> args) { if (distinct) { ctx.visit(K_DISTINCT); // [#2883] PostgreSQL can use the DISTINCT keyword with formal row value expressions. if (( ctx.family() == POSTGRES) && args.size() > 1) ctx.sql('('); else ctx.sql(' '); } if (!args.isEmpty()) { if (filter == null || HSQLDB == ctx.family() || POSTGRES_9_4.precedes(ctx.dialect())) { ctx.visit(args); } else { QueryPartList<Field<?>> expressions = new QueryPartList<Field<?>>(); for (QueryPart argument : args) expressions.add(DSL.when(filter, argument == ASTERISK ? one() : argument)); ctx.visit(expressions); } } if (distinct) if (( ctx.family() == POSTGRES) && args.size() > 1) ctx.sql(')'); }
when(COLUMNS.UDT_NAME.eq(inline("_varchar")), PG_ATTRIBUTE.ATTTYPMOD.sub(inline(4)))).as(COLUMNS.CHARACTER_MAXIMUM_LENGTH), COLUMNS.NUMERIC_PRECISION, COLUMNS.NUMERIC_SCALE,
parseKeyword(ctx, "THEN"); Field value = parseField(ctx); step = step == null ? when(condition, value) : step.when(condition, value);
? when(condition("{0} && ARRAY['o','b']::\"char\"[]", PG_PROC.PROARGMODES), inline("void")) .otherwise(r1.DATA_TYPE).as("data_type") : r1.DATA_TYPE.as("data_type"), when( count().over(partitionBy(r1.ROUTINE_SCHEMA, r1.ROUTINE_NAME)).gt(one()), rowNumber().over(partitionBy(r1.ROUTINE_SCHEMA, r1.ROUTINE_NAME).orderBy(r1.SPECIFIC_NAME))