@Test public void testClearSecondaryCache() { LruNormalizedCacheFactory secondaryCacheFactory = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION); NormalizedCache primaryCache = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION) .chain(secondaryCacheFactory).createChain(basicFieldAdapter); Record record = Record.builder("key").build(); primaryCache.merge(record, CacheHeaders.NONE); primaryCache.nextCache().get().clearAll(); assertThat(primaryCache.nextCache().get().loadRecord("key", CacheHeaders.NONE)).isNull(); }
/** * Remove cached record by the key * * @param cacheKey of record to be removed * @return {@code true} if record with such key was successfully removed, {@code false} otherwise */ public boolean remove(@NotNull CacheKey cacheKey) { return remove(cacheKey, false); }
public final NormalizedCache createChain(final RecordFieldJsonAdapter recordFieldAdapter) { if (nextFactory.isPresent()) { return create(recordFieldAdapter) .chain(nextFactory.map(new Function<NormalizedCacheFactory, NormalizedCache>() { @NotNull @Override public NormalizedCache apply(@NotNull NormalizedCacheFactory factory) { return factory.createChain(recordFieldAdapter); } }).get()); } else { return create(recordFieldAdapter); } }
/** * @param record The {@link Record} to merge. * @param cacheHeaders The {@link CacheHeaders} associated with the request which generated this record. * @return A set of record field keys that have changed. This set is returned by {@link Record#mergeWith(Record)}. */ @NotNull public Set<String> merge(@NotNull final Record record, @NotNull final CacheHeaders cacheHeaders) { checkNotNull(record, "apolloRecord == null"); checkNotNull(cacheHeaders, "cacheHeaders == null"); if (cacheHeaders.hasHeader(ApolloCacheHeaders.DO_NOT_STORE)) { return Collections.emptySet(); } Set<String> nextCacheChangedKeys = nextCache().map(new Function<NormalizedCache, Set<String>>() { @NotNull @Override public Set<String> apply(@NotNull NormalizedCache cache) { return cache.merge(record, cacheHeaders); } }).or(Collections.<String>emptySet()); Set<String> currentCacheChangedKeys = performMerge(record, cacheHeaders); Set<String> changedKeys = new HashSet<>(); changedKeys.addAll(nextCacheChangedKeys); changedKeys.addAll(currentCacheChangedKeys); return changedKeys; }
@Test public void testClearAll() { LruNormalizedCacheFactory secondaryCacheFactory = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION); NormalizedCache primaryCacheStore = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION) .chain(secondaryCacheFactory).createChain(basicFieldAdapter); Record record = Record.builder("key").build(); primaryCacheStore.merge(record, CacheHeaders.NONE); primaryCacheStore.clearAll(); assertThat(primaryCacheStore.loadRecord("key", CacheHeaders.NONE)).isNull(); }
@NotNull @Override public Set<String> apply(@NotNull NormalizedCache cache) { return cache.merge(recordSet, cacheHeaders); } }).or(Collections.<String>emptySet());
@Test public void testDualCacheSingleRecord() { LruNormalizedCacheFactory secondaryCacheFactory = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION); NormalizedCache primaryCache = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION) .chain(secondaryCacheFactory).createChain(basicFieldAdapter); Record.Builder recordBuilder = Record.builder("root"); recordBuilder.addField("bar", "bar"); final Record record = recordBuilder.build(); primaryCache.merge(record, CacheHeaders.NONE); //verify write through behavior assertThat(primaryCache.loadRecord("root", CacheHeaders.NONE).field("bar")).isEqualTo("bar"); assertThat(primaryCache.nextCache().get().loadRecord("root", CacheHeaders.NONE).field("bar")).isEqualTo("bar"); }
@Override public void apply(@NotNull NormalizedCache cache) { cache.clearAll(); } });
@NotNull @Override public Optional<Record> apply(@NotNull NormalizedCache cache) { return Optional.fromNullable(cache.loadRecord(key, cacheHeaders)); } });
@Test public void testDualCacheMultipleRecord() { LruNormalizedCacheFactory secondaryCacheFactory = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION); NormalizedCache primaryCache = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION) .chain(secondaryCacheFactory).createChain(basicFieldAdapter); Record.Builder recordBuilder = Record.builder("root1"); recordBuilder.addField("bar", "bar"); final Record record1 = recordBuilder.build(); recordBuilder = Record.builder("root2"); recordBuilder.addField("bar", "bar"); final Record record2 = recordBuilder.build(); recordBuilder = Record.builder("root3"); recordBuilder.addField("bar", "bar"); final Record record3 = recordBuilder.build(); Collection<Record> records = Arrays.asList(record1, record2, record3); Collection<String> keys = Arrays.asList(record1.key(), record2.key(), record3.key()); primaryCache.merge(records, CacheHeaders.NONE); assertThat(primaryCache.loadRecords(keys, CacheHeaders.NONE).size()).isEqualTo(3); //verify write through behavior assertThat(primaryCache.loadRecords(keys, CacheHeaders.NONE).size()).isEqualTo(3); assertThat(primaryCache.nextCache().get() .loadRecords(keys, CacheHeaders.NONE).size()).isEqualTo(3); }
@NotNull @Override public Set<String> apply(@NotNull NormalizedCache cache) { return cache.merge(record, cacheHeaders); } }).or(Collections.<String>emptySet());
@Override public void apply(@NotNull NormalizedCache cache) { cache.clearAll(); } });
/** * Calls through to {@link NormalizedCache#loadRecord(String, CacheHeaders)}. Implementations should override this * method if the underlying storage technology can offer an optimized manner to read multiple records. * * @param keys The set of {@link Record} keys to read. * @param cacheHeaders The cache headers associated with the request which generated this record. */ @NotNull public Collection<Record> loadRecords(@NotNull Collection<String> keys, @NotNull CacheHeaders cacheHeaders) { List<Record> records = new ArrayList<>(keys.size()); for (String key : keys) { final Record record = loadRecord(key, cacheHeaders); if (record != null) { records.add(record); } } return records; }
/** * Calls through to {@link NormalizedCache#merge(Record, CacheHeaders)}. Implementations should override this method * if the underlying storage technology can offer an optimized manner to store multiple records. * * @param recordSet The set of Records to merge. * @param cacheHeaders The {@link CacheHeaders} associated with the request which generated this record. * @return A set of record field keys that have changed. This set is returned by {@link Record#mergeWith(Record)}. */ @NotNull public Set<String> merge(@NotNull final Collection<Record> recordSet, @NotNull final CacheHeaders cacheHeaders) { checkNotNull(recordSet, "recordSet == null"); checkNotNull(cacheHeaders, "cacheHeaders == null"); if (cacheHeaders.hasHeader(ApolloCacheHeaders.DO_NOT_STORE)) { return Collections.emptySet(); } //noinspection ResultOfMethodCallIgnored Set<String> nextCacheChangedKeys = nextCache().map(new Function<NormalizedCache, Set<String>>() { @NotNull @Override public Set<String> apply(@NotNull NormalizedCache cache) { return cache.merge(recordSet, cacheHeaders); } }).or(Collections.<String>emptySet()); Set<String> currentCacheChangedKeys = new HashSet<>(); for (Record record : recordSet) { currentCacheChangedKeys.addAll(performMerge(record, cacheHeaders)); } Set<String> changedKeys = new HashSet<>(); changedKeys.addAll(nextCacheChangedKeys); changedKeys.addAll(currentCacheChangedKeys); return changedKeys; }
@Override public void apply(@Nonnull NormalizedCache cache) { for (Record record : recordSet) { cache.merge(record, cacheHeaders); } } });
@Override public void apply(@Nonnull NormalizedCache cache) { cache.clearAll(); } });
@Test public void testDualCache_recordNotPresent() { LruNormalizedCacheFactory secondaryCacheFactory = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION); NormalizedCache primaryCacheStore = new LruNormalizedCacheFactory(EvictionPolicy.NO_EVICTION) .chain(secondaryCacheFactory).createChain(basicFieldAdapter); assertThat(primaryCacheStore.loadRecord("not_present_id", CacheHeaders.NONE)).isNull(); }
@NotNull @Override public Boolean apply(@NotNull NormalizedCache cache) { return cache.remove(cacheKey, cascade); } }).or(Boolean.FALSE);
public final NormalizedCache createChain(final RecordFieldJsonAdapter recordFieldAdapter) { if (nextFactory.isPresent()) { return create(recordFieldAdapter) .chain(nextFactory.map(new Function<NormalizedCacheFactory, NormalizedCache>() { @Nonnull @Override public NormalizedCache apply(@Nonnull NormalizedCacheFactory factory) { return factory.createChain(recordFieldAdapter); } }).get()); } else { return create(recordFieldAdapter); } }
@Override public void apply(@Nonnull NormalizedCache cache) { cache.merge(apolloRecord, cacheHeaders); } });