Refine search
private void assertCondition(InvocationContext rCtx, VisitableCommand rCommand, Object rv, Throwable throwable) throws Throwable { boolean succeeded = throwable == null && rCommand.isSuccessful(); log.tracef("After command(successful=%s) %s", succeeded, rCommand); List<Runnable> toExecute = new ArrayList<>(); synchronized (ExpectingInterceptor.this) { for (Iterator<Condition> iterator = conditions.iterator(); iterator.hasNext(); ) { Condition condition = iterator.next(); log.tracef("Testing condition %s", condition); if ((condition.success == null || condition.success == succeeded) && condition.predicate.test(rCtx, rCommand)) { assert condition.action != null; log.trace("Condition succeeded"); toExecute.add(condition.action); if (condition.removeCheck == null || condition.removeCheck.getAsBoolean()) { iterator.remove(); } } else { log.trace("Condition test failed"); } } } // execute without holding the lock for (Runnable runnable : toExecute) { log.tracef("Executing %s", runnable); runnable.run(); } }
@Override public void updateBucket(Bucket b) throws CacheLoaderException { File f = new File(root, b.getBucketIdAsString()); if (f.exists()) { if (!purgeFile(f)) { log.problemsRemovingFile(f); } else if (trace) { log.tracef("Successfully deleted file: '%s'", f.getName()); } } if (!b.getEntries().isEmpty()) { try { byte[] bytes = marshaller.objectToByteBuffer(b); fileSync.write(bytes, f); } catch (IOException ex) { log.errorSavingBucket(b, ex); throw new CacheLoaderException(ex); } catch (InterruptedException ie) { if (trace) { log.trace("Interrupted while marshalling a bucket"); } Thread.currentThread().interrupt(); // Restore interrupted status } } }
public boolean await(String event, int count, long timeout, TimeUnit unit) throws InterruptedException { log.tracef("Waiting for event %s * %d", event, count); lock.lock(); try { while (waitNanos > 0) { if (released) { log.trace("Unblocked all events."); return true; log.tracef("Received event %s * %d (available = %d, total = %d)", event, count, status.available, status.total); return true;
private boolean removeNode(AdvancedCache<NodeKey, AtomicMap<?, ?>> cache, Fqn fqn) { if (fqn.isRoot()) return false; startAtomic(); boolean result; try { if (trace) log.tracef("About to remove node %s", fqn); Node<K, V> n = getNode(cache, fqn.getParent()); result = n != null && n.removeChild(fqn.getLastElement()); } finally { endAtomic(); } if (trace) log.trace("Node successfully removed"); return result; }
public void testHashingDistribution() { // ensure even bucket distribution of lock stripes List<String> keys = createRandomKeys(1000); Map<Integer, Integer> distribution = new HashMap<>(); for (String s : keys) { int segmentIndex = stripedHashFunction.hashToSegment(s); log.tracef("Lock for %s is %s", s, segmentIndex); if (distribution.containsKey(segmentIndex)) { int count = distribution.get(segmentIndex) + 1; distribution.put(segmentIndex, count); } else { distribution.put(segmentIndex, 1); } } // cannot be larger than the number of locks log.trace("dist size: " + distribution.size()); log.trace("num shared locks: " + stripedHashFunction.getNumSegments()); assert distribution.size() <= stripedHashFunction.getNumSegments(); // assume at least a 2/3rd spread assert distribution.size() * 1.5 >= stripedHashFunction.getNumSegments(); }
public CompletableFuture<Void> future(String event, int count) { log.tracef("Waiting for event %s * %d", event, count); lock.lock(); try { if (released) { log.trace("Unblocked all events."); return CompletableFutures.completedNull(); } EventStatus status = events.get(event); if (status == null) { status = new EventStatus(); events.put(event, status); } if (status.available >= count) { status.available -= count; return CompletableFutures.completedNull(); } if (status.requests == null) { status.requests = new ArrayList<>(); } CompletableFuture<Void> f = new CompletableFuture<>(); status.requests.add(new Request(f, count)); return f; } finally { lock.unlock(); } }
private void checkCorrectness(int i) { if (checkCorrectness) { log.tracef("Checking correctness for iteration %s", i); print("Checking correctness"); List<Address> owners = advancedCache(0).getDistributionManager().locate("k"); if (!checkOwners(owners)) { correctness.set(false); } for (int q = 0; q < nodes; q++) { print(q, cache(0).get("k")); } Object expectedValue = cache(0).get("k"); log.tracef("Original value read from cache 0 is %s", expectedValue); for (int j = 0; j < nodes; j++) { Object actualValue = cache(j).get("k"); boolean areEquals = expectedValue == null ? actualValue == null : expectedValue.equals(actualValue); print("Are " + actualValue + " and " + expectedValue + " equals ? " + areEquals); if (!areEquals) { correctness.set(false); print("Consistency error. On cache 0 we had " + expectedValue + " and on " + j + " we had " + actualValue); log.trace("Consistency error. On cache 0 we had " + expectedValue + " and on " + j + " we had " + actualValue); } } } }
private void move(AdvancedCache<NodeKey, AtomicMap<?, ?>> cache, Fqn nodeToMoveFqn, Fqn newParentFqn) throws NodeNotExistsException { if (trace) log.tracef("Moving node '%s' to '%s'", nodeToMoveFqn, newParentFqn); if (nodeToMoveFqn == null || newParentFqn == null) throw new NullPointerException("Cannot accept null parameters!"); if (trace) log.trace("Not doing anything as this node is equal with its parent"); if (trace) log.trace("The parent does not have the child that needs to be moved. Returning..."); return; if (trace) log.trace("Did not find the node that needs to be moved. Returning..."); return; // nothing to do here! if (trace) log.tracef("The new parent (%s) did not exists, was created", newParentFqn); for (Object child : nodeToMove.getChildrenNames()) { if (trace) log.tracef("Moving child %s", child); Fqn oldChildFqn = Fqn.fromRelativeElements(nodeToMoveFqn, child); move(cache, oldChildFqn, newFqn); log.tracef("Successfully moved node '%s' to '%s'", nodeToMoveFqn, newParentFqn);
protected boolean checkOwnerEntries(InternalCacheEntry entry0, InternalCacheEntry entry1, Address mainOwner, Address backupOwner) { Object mainOwnerValue = entry0 == null ? null : entry0.getValue(); Object otherOwnerValue = entry1 == null ? null : entry1.getValue(); log.tracef("Main owner value is %s, other Owner Value is %s", mainOwnerValue, otherOwnerValue); boolean equals = mainOwnerValue == null? otherOwnerValue == null : mainOwnerValue.equals(otherOwnerValue); if (!equals) { print("Consistency error. On main owner(" + mainOwner + ") we had " + mainOwnerValue + " and on backup owner(" + backupOwner + ") we had " + otherOwnerValue); log.trace("Consistency error. On main owner(" + mainOwner + ") we had " + mainOwnerValue + " and on backup owner(" + backupOwner + ") we had " + otherOwnerValue); return false; } print("otherOwnerValue = " + otherOwnerValue); print("mainOwnerValue = " + mainOwnerValue); return true; }
protected void waitForReplicationLatch(ReplicableCommand rpcCommand, Predicate<ReplicableCommand> filter) { if (!filter.test(rpcCommand)) { log.tracef("Not blocking command %s", rpcCommand); return; } try { if (!blockingLatch.isOpened()) { log.debugf("Replication trigger called, releasing any waiters for command to block."); blockingLatch.open(); } log.debugf("Replication trigger called, waiting for latch to open."); assertTrue(replicationLatch.await(30, TimeUnit.SECONDS)); log.trace("Replication latch opened, continuing."); } catch (Exception e) { throw new RuntimeException("Unexpected exception!", e); } }
throw new RuntimeException(e); log.tracef("about to process operation %s", operation); switch (operation) { case BEGIN_TX: { setResponse(OperationsResult.BEGIN_TX_OK); } catch (Exception e) { log.trace("Failure on beginning tx", e); setResponse(e); setResponse(OperationsResult.COMMIT_TX_OK); } catch (Exception e) { log.trace("Exception while committing tx", e); setResponse(e); try { cache.put(key, value); log.trace("Successfully executed putKeyValue(" + key + ", " + value + ")"); setResponse(OperationsResult.PUT_KEY_VALUE_OK); } catch (Exception e) {
log.tracef("ViewId before %s", viewId); log.tracef("Viewid middle %s", viewId); log.tracef("Viewid after before %s", viewId); log.tracef("Number of migrated tx is %s", migratedTx.size()); assertEquals(TX_COUNT, migratedTx.size()); log.trace("Releasing the gate"); ccf.gate.open(); log.tracef("For cache %d, localTxCount=%s, remoteTxCount=%s", i, local, remote); log.tracef(String.format("For cache %s , localTxCount=%s, remoteTxCount=%s", i, local, remote)); allZero = allZero && (local == 0); allZero = allZero && (remote == 0);
public void testStateTransferDisabled() throws Exception { // Insert initial data in the cache Set<Object> keys = new HashSet<Object>(); for (int i = 0; i < NUM_KEYS; i++) { Object key = "key" + i; keys.add(key); cache(0).put(key, key); } log.trace("State transfer happens here"); // add a third node addClusterEnabledCacheManager(dccc); waitForClusterToForm(); log.trace("Checking the values from caches..."); for (Object key : keys) { log.tracef("Checking key: %s", key); // check them directly in data container InternalCacheEntry d0 = advancedCache(0).getDataContainer().get(key); InternalCacheEntry d1 = advancedCache(1).getDataContainer().get(key); InternalCacheEntry d2 = advancedCache(2).getDataContainer().get(key); assertEquals(key, d0.getValue()); assertNull(d1); assertNull(d2); } }
@Test(dataProvider = DATA_PROVIDER) public void testScenario(boolean cache1IsOwner, boolean cache2IsOwner) throws Throwable { log.tracef("Start cache1IsOwner = %s, cache2IsOwner %s", cache1IsOwner, cache2IsOwner); AdvancedCache cache1 = (cache1IsOwner ? getFirstOwner(key) : getFirstNonOwner(key)).getAdvancedCache(); AdvancedCache cache2 = (cache2IsOwner ? getFirstOwner(key) : getFirstNonOwner(key)).getAdvancedCache(); log.trace("About to try to acquire a lock."); cache2.getTransactionManager().begin(); if (! cache2.lock(key)) { cache1.getTransactionManager().commit(); assertNull(cache2.get(key)); log.tracef("End cache1IsOwner = %s, cache2IsOwner %s", cache1IsOwner, cache2IsOwner);
public void testMultipleLeaves() throws Exception { //kill 3 caches at once fork(() -> manager(3).stop()); fork(() -> manager(2).stop()); fork(() -> manager(1).stop()); eventuallyEquals(1, () -> advancedCache(0).getRpcManager().getTransport().getMembers().size()); log.trace("MultipleNodesLeavingTest.testMultipleLeaves"); TestingUtil.blockUntilViewsReceived(60000, false, cache(0)); TestingUtil.waitForNoRebalance(cache(0)); List<Address> caches = advancedCache(0).getDistributionManager().getWriteConsistentHash().getMembers(); log.tracef("caches = %s", caches); int size = caches.size(); assert size == 1; } }
Arrays.fill(lastWrittenValues, "v0"); log.trace("Before split."); splitCluster(descriptors); for (int i = 0; i < descriptors.length; ++i) { log.tracef("Before merge #%d", merge); int prevPartitions = partitions.length; partition(0).merge(partition(1)); assertEquals(partitions.length, prevPartitions - 1); log.tracef("After merge #%d", merge);
log.trace("Lock transfer happens here"); killCache(); log.trace("Allow the prepare RPC to proceed"); blockedPrepare.send().receiveAll(); log.tracef("Prepare finished"); log.trace("About to commit existing transactions."); controlledRpcManager.excludeCommands(CommitCommand.class, TxCompletionNotificationCommand.class); tm.resume(tx);
log.trace("Lock transfer happens here"); migratedKey = key; migratedTransaction = key2Tx.get(key); log.trace("Migrated key = " + migratedKey); log.trace("Migrated transaction = " + ((EmbeddedTransaction) migratedTransaction).getEnlistedResources()); break; log.trace("Checking the values from caches..."); for (Object key : key2Tx.keySet()) { log.tracef("Checking key: %s", key); Object expectedValue = key; if (key.equals(migratedKey)) {