@Override public TransactionBatch createBatch() { if (this.tm == null) return NON_TX_BATCH; TransactionBatch batch = getCurrentBatch(); try { if ((batch != null) && (batch.getState() == Batch.State.ACTIVE)) { return batch.interpose(); } this.tm.suspend(); this.tm.begin(); Transaction tx = this.tm.getTransaction(); tx.registerSynchronization(CURRENT_BATCH_SYNCHRONIZATION); batch = new InfinispanBatch(tx); setCurrentBatch(batch); return batch; } catch (RollbackException | SystemException | NotSupportedException e) { throw new CacheException(e); } }
@Override public void close() { this.cache.removeListener(this); this.shutdown(this.topologyChangeExecutor); try (Batch batch = this.batcher.createBatch()) { // If this remove fails, the entry will be auto-removed on topology change by the new primary owner this.cache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES, Flag.FAIL_SILENTLY).remove(this.group.getAddress(this.group.getLocalMember())); } catch (CacheException e) { ClusteringLogger.ROOT_LOGGER.warn(e.getLocalizedMessage(), e); } finally { // Cleanup any unregistered listeners for (ExecutorService executor : this.listeners.values()) { this.shutdown(executor); } this.listeners.clear(); this.closeTask.run(); } }
/** * @see org.infinispan.commons.api.BasicCache#put(Object, Object, long, TimeUnit) */ public void put(Object key, Object value, long lifespan, TimeUnit unit) { try { if (writeTimeout > 0) this.nativeCache.putAsync(key, value != null ? value : NullValue.NULL, lifespan, unit).get(writeTimeout, TimeUnit.MILLISECONDS); else this.nativeCache.put(key, value != null ? value : NullValue.NULL, lifespan, unit); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new CacheException(e); } catch (ExecutionException | TimeoutException e) { throw new CacheException(e); } }
@Override public <T> T createInstance(PartitionHandlingConfiguration config) { EntryMergePolicy mergePolicy = config.mergePolicy(); if (mergePolicy == null) return null; if (mergePolicy instanceof DeployedMergePolicy) { DeployedMergePolicy wrapper = (DeployedMergePolicy) mergePolicy; try { return (T) policies.computeIfAbsent(wrapper.getClassName(), k -> new CompletableFuture<>()).get(TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (InterruptedException | ExecutionException e) { throw new IllegalStateException("An error occurred while processing the deployment", e); } catch (TimeoutException e) { InfinispanLogger.ROOT_LOGGER.loadingCustomMergePolicyTimeout(wrapper.getClassName()); throw new CacheException(e); } } return (T) mergePolicy; }
public void testGetReplicaException() { CompletableFuture<Void> taskFuture = new CompletableFuture<>(); taskFuture.completeExceptionally(new CacheException("Problem encountered retrieving state")); initTransferTaskMock(taskFuture); CompletableFuture<List<Map<Address, CacheEntry<Object, Object>>>> cf = stateReceiver.getAllReplicasForSegment(0, localizedCacheTopology, 10000); Exceptions.expectExecutionException(CacheException.class, cf); }
public boolean isLocked() { boolean locked = false; Transaction tx = null; try { // if there is an ongoing transaction we need to suspend it if ((tx = tm.getTransaction()) != null) { tm.suspend(); } locked = noCacheStoreCache.containsKey(keyOfLock); } catch (Exception e) { log.errorSuspendingTransaction(e); } finally { if (tx != null) { try { tm.resume(tx); } catch (Exception e) { throw new CacheException("Unable to resume suspended transaction " + tx, e); } } } return locked; }
@TopologyChanged public void topologyChanged(TopologyChangedEvent<Address, ServerAddress> event) { boolean success = false; while (!success && !distributedExecutorService.isShutdown() && addressCache.getStatus().allowInvocations()) { try { List<CompletableFuture<Boolean>> futures = distributedExecutorService.submitEverywhere( new CheckAddressTask(clusterAddress)); // No need for a timeout here, the distributed executor has a default task timeout AtomicBoolean result = new AtomicBoolean(true); futures.forEach(f -> { try { if (!f.get()) { result.set(false); } } catch (InterruptedException | ExecutionException e) { throw new CacheException(e); } }); if (!result.get()) { log.debugf("Re-adding %s to the topology cache", clusterAddress); addressCache.putAsync(clusterAddress, address); } success = true; } catch (Throwable e) { log.debug("Error re-adding address to topology cache, retrying", e); } } } }
private boolean remove(String id, Cache<SessionCreationMetaDataKey, SessionCreationMetaDataEntry<L>> creationMetaDataCache) { SessionCreationMetaDataKey key = new SessionCreationMetaDataKey(id); try { if (!this.properties.isLockOnWrite() || (creationMetaDataCache.getAdvancedCache().getTransactionManager().getTransaction() == null) || creationMetaDataCache.getAdvancedCache().withFlags(Flag.ZERO_LOCK_ACQUISITION_TIMEOUT, Flag.FAIL_SILENTLY).lock(key)) { creationMetaDataCache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES).remove(key); this.accessMetaDataCache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES).remove(new SessionAccessMetaDataKey(id)); return true; } return false; } catch (SystemException e) { throw new CacheException(e); } }
Transaction existingTx = this.tm.suspend(); if (existingBatch.getTransaction() != existingTx) { throw new IllegalStateException(); this.tm.resume(tx); setCurrentBatch(batch); return () -> { try { this.tm.suspend(); if (existingBatch != null) { try { this.tm.resume(existingBatch.getTransaction()); } catch (InvalidTransactionException e) { throw new CacheException(e); throw new CacheException(e); } finally { setCurrentBatch(existingBatch); throw new CacheException(e);
public TransactionalLockFactory(Cache<?, ?> cache, String indexName) { this.cache = cache; this.indexName = indexName; tm = cache.getAdvancedCache().getTransactionManager(); if (tm == null) { ComponentStatus status = cache.getAdvancedCache().getComponentRegistry().getStatus(); if (status.equals(ComponentStatus.RUNNING)) { throw new CacheException( "Failed looking up TransactionManager. Check if any transaction manager is associated with Infinispan cache: \'" + cache.getName() + "\'"); } else { throw new CacheException("Failed looking up TransactionManager: the cache is not running"); } } defLock = new TransactionalSharedLuceneLock(cache, indexName, DEF_LOCK_NAME, tm); }
@Override public TransactionBatch suspendBatch() { if (this.tm == null) return NON_TX_BATCH; TransactionBatch batch = getCurrentBatch(); if (batch != null) { try { Transaction tx = this.tm.suspend(); if (batch.getTransaction() != tx) { throw new IllegalStateException(); } } catch (SystemException e) { throw new CacheException(e); } finally { setCurrentBatch(null); } } return batch; } }
/** * Acquires a Matcher instance from the ComponentRegistry of the given Cache object. */ @Inject protected void injectDependencies(Cache cache) { this.queryCache = cache.getCacheManager().getGlobalComponentRegistry().getComponent(QueryCache.class); ComponentRegistry componentRegistry = cache.getAdvancedCache().getComponentRegistry(); matcher = componentRegistry.getComponent(matcherImplClass); if (matcher == null) { throw new CacheException("Expected component not found in registry: " + matcherImplClass.getName()); } }
/** * Acquires a Matcher instance from the ComponentRegistry of the given Cache object. */ @Inject protected void injectDependencies(Cache cache) { ComponentRegistry componentRegistry = cache.getAdvancedCache().getComponentRegistry(); queryCache = componentRegistry.getComponent(QueryCache.class); matcher = componentRegistry.getComponent(matcherImplClass); if (matcher == null) { throw new CacheException("Expected component not found in registry: " + matcherImplClass.getName()); } }
assertEquals(Arrays.asList(address(0), address(1), address(2)), advancedCache(0).getDistributionManager().locate(key)); Cache<Object, Object> primaryOwnerCache = cache(0); final Cache<Object, Object> newBackupOwnerCache = cache(3); primaryOwnerCache.put(key, VALUE); int currentTopologyId = primaryOwnerCache.getAdvancedCache().getDistributionManager().getCacheTopology().getTopologyId(); Future<Object> secondCommitFuture = fork(() -> { CommitCommand command = new CommitCommand(ByteString.fromString(newBackupOwnerCache.getName()), gtx); command.setTopologyId(currentTopologyId); CommandsFactory cf = TestingUtil.extractCommandsFactory(newBackupOwnerCache); command.invoke(); } catch (Throwable throwable) { throw new CacheException(throwable);
public final org.infinispan.commons.CacheException exceptionProcessingEntryRetrievalValues(final java.lang.Throwable arg0) { org.infinispan.commons.CacheException result = new org.infinispan.commons.CacheException(java.lang.String.format(exceptionProcessingEntryRetrievalValues$str()), arg0); java.lang.StackTraceElement[] st = result.getStackTrace(); result.setStackTrace(java.util.Arrays.copyOfRange(st, 1, st.length)); return result; }
private void registerQueryMBeans(ComponentRegistry cr, Configuration cfg, SearchIntegrator sf) { AdvancedCache<?, ?> cache = cr.getComponent(Cache.class).getAdvancedCache(); String queryGroupName = getQueryGroupName(cacheManagerName, cache.getName()); jmxDomain = JmxUtil.buildJmxDomain(globalCfg, mbeanServer, queryGroupName); JmxUtil.registerMBean(stats, statsObjName, mbeanServer); } catch (Exception e) { throw new CacheException( "Unable to register query module statistics mbean", e); JmxUtil.registerMBean(mbean, massIndexerObjName, mbeanServer); } catch (Exception e) { throw new CacheException("Unable to create ", e);
/** * This is invoked only on the receiving node, before {@link #perform(org.infinispan.context.InvocationContext)}. */ @Override public void fetchExecutionContext(CommandInitializer ci) { String name = cacheName.toString(); if (ci.getCacheManager().cacheExists(name)) { Cache cache = ci.getCacheManager().getCache(name); SearchManager searchManager = new SearchManagerImpl(cache.getAdvancedCache()); searchFactory = searchManager.unwrap(SearchIntegrator.class); queryInterceptor = ComponentRegistryUtils.getQueryInterceptor(cache); } else { throw new CacheException("Cache named '" + name + "' does not exist on this CacheManager, or was not started"); } }
public void testMemConsumption() throws IOException { int kBytesCached = (bytesPerCharacter * numEntries * (payloadSize + keySize)) / 1024; System.out.println("Bytes to be cached: " + NumberFormat.getIntegerInstance().format(kBytesCached) + " kb"); Cache c = TestCacheManagerFactory.createCacheManager().getCache(); for (int i = 0; i < numEntries; i++) { switch (payloadType) { case STRINGS: c.put(generateUniqueString(i, keySize), generateRandomString(payloadSize)); break; case BYTE_ARRAYS: c.put(generateUniqueKey(i, keySize), generateBytePayload(payloadSize)); break; default: throw new CacheException("Unknown payload type"); } if (i % 1000 == 0) System.out.println("Added " + i + " entries"); } System.out.println("Calling System.gc()"); System.gc(); // clear any unnecessary objects TestingUtil.sleepThread(1000); // wait for gc // wait for manual test exit System.out.println("Cache populated; check mem usage using jconsole, etc.!"); System.in.read(); }
@Override public State getState() { try { switch (this.tx.getStatus()) { case Status.STATUS_ACTIVE: { if (this.active) { return State.ACTIVE; } // Otherwise fall through } case Status.STATUS_MARKED_ROLLBACK: { return State.DISCARDED; } default: { return State.CLOSED; } } } catch (SystemException e) { throw new CacheException(e); } }
private void incrDecr(Channel ch) throws StreamCorruptedException { byte[] prev = cache.get(key); Object ret; MemcachedOperation op = header.operation; if (cache.replace(key, prev, counterString.getBytes(), buildMetadata())) { if (isStatsEnabled) { if (op == MemcachedOperation.IncrementRequest) { } else { throw new CacheException("Value modified since we retrieved from the cache, old value was " + prevCounter);