/** {@inheritDoc} */ @Override public long getPartitionUpdateCounter() { return e.updateCounter(); }
/** * @param e Entry to process. * @param backup Backup entry flag. * @return Collected entries to pass to listener (single entry or entries list). */ @Nullable Object processEntry(CacheContinuousQueryEntry e, boolean backup) { return process0(e.updateCounter(), e, backup); }
/** * @param updateCntr Acknowledged counter. */ void cleanupBackupQueue(Long updateCntr) { Iterator<CacheContinuousQueryEntry> it = backupQ.iterator(); while (it.hasNext()) { CacheContinuousQueryEntry backupEntry = it.next(); if (backupEntry.updateCounter() <= updateCntr) it.remove(); } }
/** * @param e Entry. */ private void addEntry(CacheContinuousQueryEntry e) { topVers.add(e.topologyVersion()); Long cntr0 = updateCntrs.get(e.partition()); if (cntr0 == null || e.updateCounter() > cntr0) updateCntrs.put(e.partition(), e.updateCounter()); }
/** * @return Backup entries. */ @Nullable Collection<CacheContinuousQueryEntry> flushOnExchange() { TreeMap<Long, CacheContinuousQueryEntry> ret = null; int size = backupQ.sizex(); if (size > 0) { ret = new TreeMap<>(); for (int i = 0; i < size; i++) { CacheContinuousQueryEntry e = backupQ.pollFirst(); if (e != null) ret.put(e.updateCounter(), e); else break; } } Batch batch = curBatch.get(); if (batch != null) ret = batch.flushCurrentEntries(ret); if (!pending.isEmpty()) { if (ret == null) ret = new TreeMap<>(); for (CacheContinuousQueryEntry e : pending.values()) ret.put(e.updateCounter(), e); } return ret != null ? ret.values() : null; }
@Override public Object call() throws Exception { barrier.await(); Object o; while ((o = q.poll()) != null) { Object res = b.processEntry((CacheContinuousQueryEntry)o, false); if (res != null) { if (res instanceof CacheContinuousQueryEntry) act0.put(((CacheContinuousQueryEntry)res).updateCounter(), (CacheContinuousQueryEntry)res); else { for (CacheContinuousQueryEntry e : ((List<CacheContinuousQueryEntry>)res)) act0.put(e.updateCounter(), e); } } } return null; } }, threads, "test");
/** * @param cctx Cache context. * @param e Entry. */ private void handleBackupEntry(final GridCacheContext cctx, CacheContinuousQueryEntry e) { if (internal || e.updateCounter() == -1L || nodeLeft) // Skip internal query and expire entries. return; CacheContinuousQueryEventBuffer buf = partitionBuffer(cctx, e.partition()); buf.processEntry(e.copyWithDataReset(), true); }
e.isKeepBinary(), e.partition(), e.updateCounter(), e.topologyVersion(), e.flags()); res = new TreeMap<>(); res.put(flushEntry.updateCounter(), flushEntry); res.put(flushEntry.updateCounter(), flushEntry);
/** * @param cctx Cache context. * @param e Entry. * @return Entry. */ private Object handleEntry(final GridCacheContext cctx, CacheContinuousQueryEntry e) { assert e != null; assert entryBufs != null; if (internal) { if (e.isFiltered()) return null; else return e; } // Initial query entry. // This events should be fired immediately. if (e.updateCounter() == -1L) return e; CacheContinuousQueryEventBuffer buf = partitionBuffer(cctx, e.partition()); return buf.processEntry(e, false); }
assert entry.updateCounter() == 0L : entry; log.debug("Handling event [lastFiredEvt=" + lastFiredEvt + ", curTop=" + curTop + ", entUpdCnt=" + entry.updateCounter() + ", partId=" + entry.partition() + ", pendingEvts=" + pendingEvts + ']'); lastFiredEvt = entry.updateCounter(); log.debug("First event [lastFiredEvt=" + lastFiredEvt + ", curTop=" + curTop + ", entUpdCnt=" + entry.updateCounter() + ", partId=" + entry.partition() + ']'); if (entry.updateCounter() == 1L && !entry.isBackup()) { entries = new ArrayList<>(pendingEvts.size()); lastFiredEvt = entry.updateCounter(); log.debug("Partition was lost [lastFiredEvt=" + lastFiredEvt + ", curTop=" + curTop + ", entUpdCnt=" + entry.updateCounter() + ", partId=" + entry.partition() + ", pendingEvts=" + pendingEvts + ']'); if (entry.updateCounter() > lastFiredEvt) pendingEvts.put(entry.updateCounter(), entry); else {
CacheContinuousQueryEntry actualEvt = actualEntries.get(i); assertEquals(expEvt.updateCounter(), actualEvt.updateCounter()); assertEquals(expEvt.filteredCount(), actualEvt.filteredCount());
/** * @param ctx Context. * @param e entry. * @return Entry collection. */ private Collection<CacheEntryEvent<? extends K, ? extends V>> handleEvent(GridKernalContext ctx, CacheContinuousQueryEntry e) { assert e != null; GridCacheContext<K, V> cctx = cacheContext(ctx); final IgniteCache cache = cctx.kernalContext().cache().jcache(cctx.name()); if (internal) { if (e.isFiltered()) return Collections.emptyList(); else return F.<CacheEntryEvent<? extends K, ? extends V>> asList(new CacheContinuousQueryEvent<K, V>(cache, cctx, e)); } // Initial query entry or evicted entry. These events should be fired immediately. if (e.updateCounter() == -1L) { return !e.isFiltered() ? F.<CacheEntryEvent<? extends K, ? extends V>>asList( new CacheContinuousQueryEvent<K, V>(cache, cctx, e)) : Collections.<CacheEntryEvent<? extends K, ? extends V>>emptyList(); } CacheContinuousQueryPartitionRecovery rec = getOrCreatePartitionRecovery(ctx, e.partition(), e.topologyVersion()); return rec.collectEntries(e, cctx, cache); }
/** * Transform event data with {@link #getTransformer()} if exists. * * @param trans Transformer. * @param evt Event to transform. * @return Entry contains only transformed data if transformer exists. Unchanged event if transformer is not set. * @see #getTransformer() */ private CacheContinuousQueryEntry transformToEntry(IgniteClosure<CacheEntryEvent<? extends K, ? extends V>, ?> trans, CacheContinuousQueryEvent<? extends K, ? extends V> evt) { Object transVal = transform(trans, evt); return new CacheContinuousQueryEntry(evt.entry().cacheId(), evt.entry().eventType(), null, transVal == null ? null : cacheContext(ctx).toCacheObject(transVal), null, evt.entry().isKeepBinary(), evt.entry().partition(), evt.entry().updateCounter(), evt.entry().topologyVersion(), evt.entry().flags()); }
CacheContinuousQueryEntry e = (CacheContinuousQueryEntry)o; assertNotSame("Evicted entry added to backup queue.", -1L, e.updateCounter());
assertEquals(e0.isKeepBinary(), e1.isKeepBinary()); assertEquals(e0.partition(), e1.partition()); assertEquals(e0.updateCounter(), e1.updateCounter());
/** {@inheritDoc} */ @Override public long getPartitionUpdateCounter() { return e.updateCounter(); }
/** * @param updateCntr Acknowledged counter. */ void cleanupBackupQueue(Long updateCntr) { Iterator<CacheContinuousQueryEntry> it = backupQ.iterator(); while (it.hasNext()) { CacheContinuousQueryEntry backupEntry = it.next(); if (backupEntry.updateCounter() <= updateCntr) it.remove(); } }
/** * @param e Entry to process. * @param backup Backup entry flag. * @return Collected entries to pass to listener (single entry or entries list). */ @Nullable Object processEntry(CacheContinuousQueryEntry e, boolean backup) { return process0(e.updateCounter(), e, backup); }
/** * @param e Entry. */ private void addEntry(CacheContinuousQueryEntry e) { topVers.add(e.topologyVersion()); Long cntr0 = updateCntrs.get(e.partition()); if (cntr0 == null || e.updateCounter() > cntr0) updateCntrs.put(e.partition(), e.updateCounter()); }
/** * @param cctx Cache context. * @param e Entry. */ private void handleBackupEntry(final GridCacheContext cctx, CacheContinuousQueryEntry e) { if (internal || e.updateCounter() == -1L || nodeLeft) // Skip internal query and expire entries. return; CacheContinuousQueryEventBuffer buf = partitionBuffer(cctx, e.partition()); buf.processEntry(e.copyWithDataReset(), true); }