@Override public byte[] serialize(Object obj) throws IOException { try { return marshaller.objectToByteBuffer(obj); } catch (InterruptedException e) { if (log.isTraceEnabled()) log.trace("Interrupted while serializing object"); Thread.currentThread().interrupt(); throw new IOException(e); } }
/** * {@inheritDoc} */ @Override public boolean obtain() { Object previousValue = noCacheStoreCache.putIfAbsent(keyOfLock, keyOfLock); if (previousValue == null) { if (log.isTraceEnabled()) { log.tracef("Lock: %s acquired for index: %s", lockName, indexName); } // we own the lock: return true; } else { if (log.isTraceEnabled()) { log.tracef("Lock: %s not aquired for index: %s, was taken already.", lockName, indexName); } return false; } }
/** * {@inheritDoc} */ @Override public BaseLuceneLock makeLock(String lockName) { BaseLuceneLock lock; //It appears Lucene always uses the same name so we give locks //having this name a special treatment: if (DEF_LOCK_NAME.equals(lockName)) { lock = defLock; } else { // this branch is never taken with current Lucene version. lock = new BaseLuceneLock(cache, indexName, lockName); } if (log.isTraceEnabled()) { log.tracef("Lock prepared, not acquired: %s for index %s", lockName, indexName); } return lock; }
/** * Used by Lucene at Directory creation: we expect the lock to not exist in this case. */ public void clearLock() { Object previousValue = noCacheStoreCache.remove(keyOfLock); if (previousValue!=null && log.isTraceEnabled()) { log.tracef("Lock removed for index: %s", indexName); } }
/** * {@inheritDoc} */ @Override public TransactionalSharedLuceneLock makeLock(String lockName) { TransactionalSharedLuceneLock lock; //It appears Lucene always uses the same name so we give locks //having this name a special treatment: if (DEF_LOCK_NAME.equals(lockName)) { lock = defLock; } else { // this branch is never taken with current Lucene version. lock = new TransactionalSharedLuceneLock(cache, indexName, lockName, tm); } if (log.isTraceEnabled()) { log.tracef("Lock prepared, not acquired: %s for index %s", lockName, indexName); } return lock; }
void putValueToCacheLeafNode(Fqn key, Object value) { if (attachLifespanToLeafNodes) { cache.put(key, value, leafNodeLifespan, TimeUnit.MILLISECONDS); if (log.isTraceEnabled()) { log.tracef("Added record %s with leafNodeLifespan " + leafNodeLifespan + "ms", key); } } else { cache.put(key, value); if (log.isTraceEnabled()) { log.tracef("Added record %s with infinite leafNodeLifespan", key); } } }
/** * {@inheritDoc} */ @Override public void clearLock(String lockName) { //Same special care as above for locks named DEF_LOCK_NAME: if (DEF_LOCK_NAME.equals(lockName)) { defLock.clearLockSuspending(); } else { new TransactionalSharedLuceneLock(cache, indexName, lockName, tm).clearLockSuspending(); } if (log.isTraceEnabled()) { log.tracef("Removed lock: %s for index %s", lockName, indexName); } }
/** * {@inheritDoc} */ @Override public void clearLock(String lockName) { //Same special care as above for locks named DEF_LOCK_NAME: if (DEF_LOCK_NAME.equals(lockName)) { defLock.clearLock(); } else { new BaseLuceneLock(cache, indexName, lockName).clearLock(); } if (log.isTraceEnabled()) { log.tracef("Removed lock: %s for index %s", lockName, indexName); } }
public void blockUntilCacheStoreContains(Set<Object> expectedState, long timeout) { long killTime = timeService.wallClockTime() + timeout; // Set<? extends Map.Entry<?, InternalCacheEntry>> expectedEntries = expectedState.entrySet(); Set<Object> notStored = null; Set<Object> notRemoved = null; while (timeService.wallClockTime() < killTime) { // Find out which entries might not have been removed from the store notRemoved = InfinispanCollections.difference(store.keySet(), expectedState); // Find out which entries might not have been stored notStored = InfinispanCollections.difference(expectedState, store.keySet()); if (notStored.isEmpty() && notRemoved.isEmpty()) break; TestingUtil.sleepThread(100); } if ((notStored != null && !notStored.isEmpty()) || (notRemoved != null && !notRemoved.isEmpty())) { if (log.isTraceEnabled()) { log.tracef("Entries still not stored: %s", notStored); log.tracef("Entries still not removed: %s", notRemoved); } throw new RuntimeException(String.format( "Timed out waiting (%d ms) for cache store to be flushed. entries-not-stored=[%s], entries-not-removed=[%s]", timeout, notStored, notRemoved)); } }
if (log.isTraceEnabled()) { Set<Integer> segmentsCompleted = new HashSet<>(); segments.get().forEachRemaining((Integer segment) -> segmentsCompleted.add(segment));
public void testForceCommitOnOtherNode() throws Exception { String inDoubt = showInDoubtTransactions(0); assertInDoubtTxCount(inDoubt, 1); assertInDoubtTxCount(showInDoubtTransactions(1), 1); List<Long> ids = getInternalIds(inDoubt); assertEquals(1, ids.size()); assertEquals(0, cache(0, "test").keySet().size()); assertEquals(0, cache(1, "test").keySet().size()); if (log.isTraceEnabled()) log.trace("Before forcing commit!"); String result = invokeForceWithId("forceCommit", 0, ids.get(0)); checkResponse(result, 1); }
private boolean createNodeInCache(Fqn fqn, boolean isLeafNode) { if (cache.containsKey(fqn)) { return false; } Fqn parent = fqn.getParent(); if (!fqn.isRoot()) { if (!exists(parent)) { createNodeInCache(parent, false); } AtomicMap<Object, Fqn> parentStructure = getStructure(parent); parentStructure.put(fqn.getLastElement(), fqn); } if (!isLeafNode) { getStructure(fqn); } if (log.isTraceEnabled()) { log.tracef("Created node %s", fqn); } return true; }
void renameFile(final String from, final String to) { final FileCacheKey fromKey = new FileCacheKey(indexName, from); final FileMetadata metadata = metadataCache.get(fromKey); final int bufferSize = metadata.getBufferSize(); // preparation: copy all chunks to new keys int i = -1; Object ob; do { final ChunkCacheKey fromChunkKey = new ChunkCacheKey(indexName, from, ++i, bufferSize); ob = chunksCache.get(fromChunkKey); if (ob == null) { break; } final ChunkCacheKey toChunkKey = new ChunkCacheKey(indexName, to, i, bufferSize); chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES).put(toChunkKey, ob); } while (true); // rename metadata first metadataCache.put(new FileCacheKey(indexName, to), metadata); fileOps.removeAndAdd(from, to); // now trigger deletion of old file chunks: readLocks.deleteOrReleaseReadLock(from); if (log.isTraceEnabled()) { log.tracef("Renamed file from: %s to: %s in index %s", from, to, indexName); } }
public void renameFile(String from, String to) { ensureOpen(); final FileCacheKey fromKey = new FileCacheKey(indexName, from); final FileMetadata metadata = metadataCache.get(fromKey); final int bufferSize = metadata.getBufferSize(); // preparation: copy all chunks to new keys int i = -1; Object ob; do { ChunkCacheKey fromChunkKey = new ChunkCacheKey(indexName, from, ++i, bufferSize); ob = chunksCache.get(fromChunkKey); if (ob == null) { break; } ChunkCacheKey toChunkKey = new ChunkCacheKey(indexName, to, i, bufferSize); chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES).put(toChunkKey, ob); } while (true); // rename metadata first metadataCache.put(new FileCacheKey(indexName, to), metadata); fileOps.removeAndAdd(from, to); // now trigger deletion of old file chunks: readLocks.deleteOrReleaseReadLock(from); if (log.isTraceEnabled()) { log.tracef("Renamed file from: %s to: %s in index %s", from, to, indexName); } }
final boolean trace = log.isTraceEnabled(); final String indexName = readLockKey.getIndexName(); final String filename = readLockKey.getFileName();
final boolean trace = log.isTraceEnabled(); final FileCacheKey key = new FileCacheKey(indexName, fileName, affinitySegmentId); if (trace) log.tracef("deleting metadata: %s", key);
final boolean trace = log.isTraceEnabled(); final FileCacheKey key = new FileCacheKey(indexName, fileName, affinitySegmentId); if (trace) log.tracef("deleting metadata: %s", key);
long size = log.isTraceEnabled() ? c.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).size() : 0; if (c.getAdvancedCache().getRpcManager() != null) { log.tracef("Local size on %s before stopping: %d", c.getAdvancedCache().getRpcManager().getAddress(), size);