/** * @param record The {@link Record} to merge. * @param cacheHeaders The {@link CacheHeaders} associated with the request which generated this record. * @return A set of record field keys that have changed. This set is returned by {@link Record#mergeWith(Record)}. */ @NotNull public Set<String> merge(@NotNull final Record record, @NotNull final CacheHeaders cacheHeaders) { checkNotNull(record, "apolloRecord == null"); checkNotNull(cacheHeaders, "cacheHeaders == null"); if (cacheHeaders.hasHeader(ApolloCacheHeaders.DO_NOT_STORE)) { return Collections.emptySet(); } Set<String> nextCacheChangedKeys = nextCache().map(new Function<NormalizedCache, Set<String>>() { @NotNull @Override public Set<String> apply(@NotNull NormalizedCache cache) { return cache.merge(record, cacheHeaders); } }).or(Collections.<String>emptySet()); Set<String> currentCacheChangedKeys = performMerge(record, cacheHeaders); Set<String> changedKeys = new HashSet<>(); changedKeys.addAll(nextCacheChangedKeys); changedKeys.addAll(currentCacheChangedKeys); return changedKeys; }
/** * Calls through to {@link NormalizedCache#merge(Record, CacheHeaders)}. Implementations should override this method * if the underlying storage technology can offer an optimized manner to store multiple records. * * @param recordSet The set of Records to merge. * @param cacheHeaders The {@link CacheHeaders} associated with the request which generated this record. * @return A set of record field keys that have changed. This set is returned by {@link Record#mergeWith(Record)}. */ @NotNull public Set<String> merge(@NotNull final Collection<Record> recordSet, @NotNull final CacheHeaders cacheHeaders) { checkNotNull(recordSet, "recordSet == null"); checkNotNull(cacheHeaders, "cacheHeaders == null"); if (cacheHeaders.hasHeader(ApolloCacheHeaders.DO_NOT_STORE)) { return Collections.emptySet(); } //noinspection ResultOfMethodCallIgnored Set<String> nextCacheChangedKeys = nextCache().map(new Function<NormalizedCache, Set<String>>() { @NotNull @Override public Set<String> apply(@NotNull NormalizedCache cache) { return cache.merge(recordSet, cacheHeaders); } }).or(Collections.<String>emptySet()); Set<String> currentCacheChangedKeys = new HashSet<>(); for (Record record : recordSet) { currentCacheChangedKeys.addAll(performMerge(record, cacheHeaders)); } Set<String> changedKeys = new HashSet<>(); changedKeys.addAll(nextCacheChangedKeys); changedKeys.addAll(currentCacheChangedKeys); return changedKeys; }
@Test public void testClearSecondaryCache() { LruNormalizedCacheFactory secondaryCacheFactory = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION); NormalizedCache primaryCache = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION) .chain(secondaryCacheFactory).createChain(basicFieldAdapter); Record record = Record.builder("key").build(); primaryCache.merge(record, CacheHeaders.NONE); primaryCache.nextCache().get().clearAll(); assertThat(primaryCache.nextCache().get().loadRecord("key", CacheHeaders.NONE)).isNull(); }
@Test public void testDualCacheMultipleRecord() { LruNormalizedCacheFactory secondaryCacheFactory = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION); NormalizedCache primaryCache = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION) .chain(secondaryCacheFactory).createChain(basicFieldAdapter); Record.Builder recordBuilder = Record.builder("root1"); recordBuilder.addField("bar", "bar"); final Record record1 = recordBuilder.build(); recordBuilder = Record.builder("root2"); recordBuilder.addField("bar", "bar"); final Record record2 = recordBuilder.build(); recordBuilder = Record.builder("root3"); recordBuilder.addField("bar", "bar"); final Record record3 = recordBuilder.build(); Collection<Record> records = Arrays.asList(record1, record2, record3); Collection<String> keys = Arrays.asList(record1.key(), record2.key(), record3.key()); primaryCache.merge(records, CacheHeaders.NONE); assertThat(primaryCache.loadRecords(keys, CacheHeaders.NONE).size()).isEqualTo(3); //verify write through behavior assertThat(primaryCache.loadRecords(keys, CacheHeaders.NONE).size()).isEqualTo(3); assertThat(primaryCache.nextCache().get() .loadRecords(keys, CacheHeaders.NONE).size()).isEqualTo(3); }
@Test public void testDualCacheSingleRecord() { LruNormalizedCacheFactory secondaryCacheFactory = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION); NormalizedCache primaryCache = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION) .chain(secondaryCacheFactory).createChain(basicFieldAdapter); Record.Builder recordBuilder = Record.builder("root"); recordBuilder.addField("bar", "bar"); final Record record = recordBuilder.build(); primaryCache.merge(record, CacheHeaders.NONE); //verify write through behavior assertThat(primaryCache.loadRecord("root", CacheHeaders.NONE).field("bar")).isEqualTo("bar"); assertThat(primaryCache.nextCache().get().loadRecord("root", CacheHeaders.NONE).field("bar")).isEqualTo("bar"); }