@Override public ValueWrapper putIfAbsent(Object key, Object value) { try { if (writeTimeout > 0) return wrap(this.nativeCache.putIfAbsentAsync(key, value).get(writeTimeout, TimeUnit.MILLISECONDS)); else return wrap(this.nativeCache.putIfAbsent(key, value)); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new CacheException(e); } catch (ExecutionException | TimeoutException e) { throw new CacheException(e); } }
static <V extends Serializable> V putIfAbsentWithRetries(CrossDCAwareCacheFactory crossDCAwareCacheFactory, String key, V value, int taskTimeoutInSeconds) { AtomicReference<V> resultRef = new AtomicReference<>(); Retry.executeWithBackoff((int iteration) -> { try { V result; if (taskTimeoutInSeconds > 0) { result = (V) crossDCAwareCacheFactory.getCache().putIfAbsent(key, value); } else { result = (V) crossDCAwareCacheFactory.getCache().putIfAbsent(key, value, taskTimeoutInSeconds, TimeUnit.SECONDS); } resultRef.set(result); } catch (HotRodClientException re) { logger.warnf(re, "Failed to write key '%s' and value '%s' in iteration '%d' . Retrying", key, value, iteration); // Rethrow the exception. Retry will take care of handle the exception and eventually retry the operation. throw re; } }, 10, 10); return resultRef.get(); }
@Override public boolean putIfAbsent(String tokenId, int lifespanInSeconds) { ActionTokenValueEntity tokenValue = new ActionTokenValueEntity(null); // Rather keep the items in the cache for a bit longer lifespanInSeconds = lifespanInSeconds + 10; try { BasicCache<String, ActionTokenValueEntity> cache = tokenCache.get(); ActionTokenValueEntity existing = cache.putIfAbsent(tokenId, tokenValue, lifespanInSeconds, TimeUnit.SECONDS); return existing == null; } catch (HotRodClientException re) { // No need to retry. The hotrod (remoteCache) has some retries in itself in case of some random network error happened. // In case of lock conflict, we don't want to retry anyway as there was likely an attempt to use the token from different place. logger.debugf(re, "Failed when adding token %s", tokenId); return false; } }
@InvokeOnHeader("PUTIFABSENT") @InvokeOnHeader(InfinispanConstants.PUT_IF_ABSENT) void onPutIfAbsent(Message message) { final BasicCache<Object, Object> cache = manager.getCache(message, this.cacheName); final Object key = message.getHeader(InfinispanConstants.KEY); final Object value = message.getHeader(InfinispanConstants.VALUE); final Object result; if (hasLifespan(message)) { long lifespan = message.getHeader(InfinispanConstants.LIFESPAN_TIME, long.class); TimeUnit timeUnit = message.getHeader(InfinispanConstants.LIFESPAN_TIME_UNIT, TimeUnit.class); if (hasMaxIdleTime(message)) { long maxIdle = message.getHeader(InfinispanConstants.MAX_IDLE_TIME, long.class); TimeUnit maxIdleTimeUnit = message.getHeader(InfinispanConstants.MAX_IDLE_TIME_UNIT, TimeUnit.class); result = cache.putIfAbsent(key, value, lifespan, timeUnit, maxIdle, maxIdleTimeUnit); } else { result = cache.putIfAbsent(key, value, lifespan, timeUnit); } } else { result = cache.putIfAbsent(key, value); } setResult(message, result); }
@Override public <T> T get(Object key, Callable<T> valueLoader) { ReentrantLock lock = null; T value = (T) nativeCache.get(key); if (value == null) { lock = synchronousGetLocks.computeIfAbsent(key, k -> new ReentrantLock()); lock.lock(); try { if ((value = (T) nativeCache.get(key)) == null) { try { T newValue = valueLoader.call(); // we can't use computeIfAbsent here since in distributed embedded scenario we would // send a lambda to other nodes. This is the behavior we want to avoid. value = (T) nativeCache.putIfAbsent(key, newValue == null ? NullValue.NULL : newValue); if (value == null) { value = newValue; } } catch (Exception e) { throw ValueRetrievalExceptionResolver.throwValueRetrievalException(key, valueLoader, e); } } } finally { lock.unlock(); synchronousGetLocks.remove(key); } } return unwrapNull(value); }