Refine search
public PreparedStatementCreatorImpl(String actualSql, List<?> parameters) { this.actualSql = actualSql; Assert.notNull(parameters, "Parameters List must not be null"); this.parameters = parameters; if (this.parameters.size() != declaredParameters.size()) { // Account for named parameters being used multiple times Set<String> names = new HashSet<>(); for (int i = 0; i < parameters.size(); i++) { Object param = parameters.get(i); if (param instanceof SqlParameterValue) { names.add(((SqlParameterValue) param).getName()); } else { names.add("Parameter #" + i); } } if (names.size() != declaredParameters.size()) { throw new InvalidDataAccessApiUsageException( "SQL [" + sql + "]: given " + names.size() + " parameters but expected " + declaredParameters.size()); } } }
@Override public Long zUnionStore(byte[] destKey, byte[]... sets) { Assert.notNull(destKey, "Destination key must not be null!"); Assert.notNull(sets, "Source sets must not be null!"); Assert.noNullElements(sets, "Source sets must not contain null elements!"); byte[][] allKeys = ByteUtils.mergeArrays(destKey, sets); if (ClusterSlotHashUtil.isSameSlotForAllKeys(allKeys)) { try { return connection.getCluster().zunionstore(destKey, sets); } catch (Exception ex) { throw convertJedisAccessException(ex); } } throw new InvalidDataAccessApiUsageException("ZUNIONSTORE can only be executed when all keys map to the same slot"); }
@Override public Long bitOp(BitOperation op, byte[] destination, byte[]... keys) { Assert.notNull(op, "BitOperation must not be null!"); Assert.notNull(destination, "Destination key must not be null!"); byte[][] allKeys = ByteUtils.mergeArrays(destination, keys); if (ClusterSlotHashUtil.isSameSlotForAllKeys(allKeys)) { try { return connection.getCluster().bitop(JedisConverters.toBitOp(op), destination, keys); } catch (Exception ex) { throw convertJedisAccessException(ex); } } throw new InvalidDataAccessApiUsageException("BITOP is only supported for same slot keys in cluster mode."); }
@Override public List<Object> executePipelined(SessionCallback<?> session, @Nullable RedisSerializer<?> resultSerializer) { Assert.isTrue(initialized, "template not initialized; call afterPropertiesSet() before using it"); Assert.notNull(session, "Callback object must not be null"); RedisConnectionFactory factory = getRequiredConnectionFactory(); // bind connection RedisConnectionUtils.bindConnection(factory, enableTransactionSupport); try { return execute((RedisCallback<List<Object>>) connection -> { connection.openPipeline(); boolean pipelinedClosed = false; try { Object result = executeSession(session); if (result != null) { throw new InvalidDataAccessApiUsageException( "Callback cannot return a non-null value as it gets overwritten by the pipeline"); } List<Object> closePipeline = connection.closePipeline(); pipelinedClosed = true; return deserializeMixedResults(closePipeline, resultSerializer, hashKeySerializer, hashValueSerializer); } finally { if (!pipelinedClosed) { connection.closePipeline(); } } }); } finally { RedisConnectionUtils.unbindConnection(factory); } }
/** * Check mandatory properties. * @see org.springframework.beans.factory.InitializingBean#afterPropertiesSet() */ @Override public void init(DataSource dataSource) throws Exception { Assert.notNull(dataSource, "A DataSource is required"); Assert.hasLength(selectClause, "selectClause must be specified"); Assert.hasLength(fromClause, "fromClause must be specified"); Assert.notEmpty(sortKeys, "sortKey must be specified"); StringBuilder sql = new StringBuilder(); sql.append("SELECT ").append(selectClause); sql.append(" FROM ").append(fromClause); if (whereClause != null) { sql.append(" WHERE ").append(whereClause); } if(groupClause != null) { sql.append(" GROUP BY ").append(groupClause); } List<String> namedParameters = new ArrayList<>(); parameterCount = JdbcParameterUtils.countParameterPlaceholders(sql.toString(), namedParameters); if (namedParameters.size() > 0) { if (parameterCount != namedParameters.size()) { throw new InvalidDataAccessApiUsageException( "You can't use both named parameters and classic \"?\" placeholders: " + sql); } usingNamedParameters = true; } }
@Override public Flux<BooleanResponse<PfMergeCommand>> pfMerge(Publisher<PfMergeCommand> commands) { return getConnection().execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null for PFMERGE"); Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null or empty for PFMERGE!"); List<ByteBuffer> keys = new ArrayList<>(command.getSourceKeys()); keys.add(command.getKey()); if (ClusterSlotHashUtil.isSameSlotForAllKeys(keys.toArray(new ByteBuffer[keys.size()]))) { return super.pfMerge(Mono.just(command)); } return Mono .error(new InvalidDataAccessApiUsageException("All keys must map to same slot for PFMERGE in cluster mode.")); })); }
@Override public boolean exists(Query query, @Nullable Class<?> entityClass, String collectionName) { if (query == null) { throw new InvalidDataAccessApiUsageException("Query passed in to exist can't be null"); } Assert.notNull(collectionName, "CollectionName must not be null!"); Document mappedQuery = queryMapper.getMappedObject(query.getQueryObject(), getPersistentEntity(entityClass)); return execute(collectionName, new ExistsCallback(mappedQuery, query.getCollation().map(Collation::toMongoCollation).orElse(null))); }
@Override public Flux<NumericResponse<PushCommand, Long>> push(Publisher<PushCommand> commands) { return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notEmpty(command.getValues(), "Values must not be null or empty!"); if (!command.getUpsert() && command.getValues().size() > 1) { throw new InvalidDataAccessApiUsageException( String.format("%s PUSHX only allows one value!", command.getDirection())); } Mono<Long> pushResult; if (ObjectUtils.nullSafeEquals(Direction.RIGHT, command.getDirection())) { pushResult = command.getUpsert() ? cmd.rpush(command.getKey(), command.getValues().stream().toArray(ByteBuffer[]::new)) : cmd.rpushx(command.getKey(), command.getValues().get(0)); } else { pushResult = command.getUpsert() ? cmd.lpush(command.getKey(), command.getValues().stream().toArray(ByteBuffer[]::new)) : cmd.lpushx(command.getKey(), command.getValues().get(0)); } return pushResult.map(value -> new NumericResponse<>(command, value)); })); }
/** * Create a new ArgTypePreparedStatementSetter for the given arguments. * @param args the arguments to set * @param argTypes the corresponding SQL types of the arguments */ public ArgumentTypePreparedStatementSetter(@Nullable Object[] args, @Nullable int[] argTypes) { if ((args != null && argTypes == null) || (args == null && argTypes != null) || (args != null && args.length != argTypes.length)) { throw new InvalidDataAccessApiUsageException("args and argTypes parameters must match"); } this.args = args; this.argTypes = argTypes; }
protected <T> Mono<DeleteResult> doRemove(String collectionName, Query query, @Nullable Class<T> entityClass) { throw new InvalidDataAccessApiUsageException("Query passed in to remove can't be null!"); Assert.hasText(collectionName, "Collection name must not be null or empty!");
@Override public Flux<NumericResponse<PfCountCommand, Long>> pfCount(Publisher<PfCountCommand> commands) { return getConnection().execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notEmpty(command.getKeys(), "Keys must be null or empty for PFCOUNT!"); if (ClusterSlotHashUtil .isSameSlotForAllKeys(command.getKeys().toArray(new ByteBuffer[command.getKeys().size()]))) { return super.pfCount(Mono.just(command)); } return Mono .error(new InvalidDataAccessApiUsageException("All keys must map to same slot for PFCOUNT in cluster mode.")); })); } }
Assert.state(this.mappedClass != null, "Mapped class was not specified"); T mappedObject = BeanUtils.instantiateClass(this.mappedClass); BeanWrapper bw = PropertyAccessorFactory.forBeanPropertyAccess(mappedObject); throw new InvalidDataAccessApiUsageException("Given ResultSet does not contain all fields " + "necessary to populate object of class [" + this.mappedClass.getName() + "]: " + this.mappedProperties);
protected void addMultiFieldOperation(String operator, String key, Object value) { Assert.hasText(key, "Key/Path for update must not be null or blank."); Object existingValue = this.modifierOps.get(operator); Document keyValueMap; if (existingValue == null) { keyValueMap = new Document(); this.modifierOps.put(operator, keyValueMap); } else { if (existingValue instanceof Document) { keyValueMap = (Document) existingValue; } else { throw new InvalidDataAccessApiUsageException( "Modifier Operations should be a LinkedHashMap but was " + existingValue.getClass()); } } keyValueMap.put(key, value); this.keysToUpdate.add(key); }
@Override public Long pfCount(byte[]... keys) { Assert.notEmpty(keys, "PFCOUNT requires at least one non 'null' key."); Assert.noNullElements(keys, "Keys for PFCOUNT must not contain 'null'."); if (ClusterSlotHashUtil.isSameSlotForAllKeys(keys)) { try { return connection.getCluster().pfcount(keys); } catch (Exception ex) { throw convertJedisAccessException(ex); } } throw new InvalidDataAccessApiUsageException("All keys must map to same slot for pfcount in cluster mode."); }
/** * Set whether prepared statements should be capable of returning * auto-generated keys. * @see java.sql.Connection#prepareStatement(String, int) */ public void setReturnGeneratedKeys(boolean returnGeneratedKeys) { if (isCompiled()) { throw new InvalidDataAccessApiUsageException( "The returnGeneratedKeys flag must be set before the operation is compiled"); } this.returnGeneratedKeys = returnGeneratedKeys; }
@Override public Long pfCount(byte[]... keys) { if (ClusterSlotHashUtil.isSameSlotForAllKeys(keys)) { try { return super.pfCount(keys); } catch (Exception ex) { throw convertLettuceAccessException(ex); } } throw new InvalidDataAccessApiUsageException("All keys must map to same slot for pfcount in cluster mode."); }
assertEquals(featureNotSupEx, idaex.getCause());
@Override public Long zInterStore(byte[] destKey, byte[]... sets) { Assert.notNull(destKey, "Destination key must not be null!"); Assert.notNull(sets, "Source sets must not be null!"); Assert.noNullElements(sets, "Source sets must not contain null elements!"); byte[][] allKeys = ByteUtils.mergeArrays(destKey, sets); if (ClusterSlotHashUtil.isSameSlotForAllKeys(allKeys)) { try { return connection.getCluster().zinterstore(destKey, sets); } catch (Exception ex) { throw convertJedisAccessException(ex); } } throw new InvalidDataAccessApiUsageException("ZINTERSTORE can only be executed when all keys map to the same slot"); }
@Override public Flux<PopResponse> bPop(Publisher<BPopCommand> commands) { return getConnection().execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKeys(), "Keys must not be null!"); Assert.notNull(command.getDirection(), "Direction must not be null!"); if (ClusterSlotHashUtil.isSameSlotForAllKeys(command.getKeys())) { return super.bPop(Mono.just(command)); } return Mono.error(new InvalidDataAccessApiUsageException("All keys must map to the same slot for BPOP command.")); })); }
@Override public Flux<BooleanResponse<PfMergeCommand>> pfMerge(Publisher<PfMergeCommand> commands) { return getConnection().execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null for PFMERGE"); Assert.notEmpty(command.getSourceKeys(), "Source keys must not be null or empty for PFMERGE!"); List<ByteBuffer> keys = new ArrayList<>(command.getSourceKeys()); keys.add(command.getKey()); if (ClusterSlotHashUtil.isSameSlotForAllKeys(keys.toArray(new ByteBuffer[keys.size()]))) { return super.pfMerge(Mono.just(command)); } return Mono .error(new InvalidDataAccessApiUsageException("All keys must map to same slot for PFMERGE in cluster mode.")); })); }