public TableNameCompleter(QueryRunner queryRunner) { this.queryRunner = requireNonNull(queryRunner, "queryRunner session was null!"); tableCache = CacheBuilder.newBuilder() .refreshAfterWrite(RELOAD_TIME_MINUTES, TimeUnit.MINUTES) .build(asyncReloading(CacheLoader.from(this::listTables), executor)); functionCache = CacheBuilder.newBuilder() .build(asyncReloading(CacheLoader.from(this::listFunctions), executor)); }
public NetworkLocationCache(NetworkTopology networkTopology) { this.networkTopology = requireNonNull(networkTopology, "networkTopology is null"); this.cache = CacheBuilder.newBuilder() .expireAfterWrite(1, DAYS) .refreshAfterWrite(12, HOURS) .build(asyncReloading(CacheLoader.from(this::locate), executor)); this.negativeCache = CacheBuilder.newBuilder() .expireAfterWrite(NEGATIVE_CACHE_DURATION.toMillis(), MILLISECONDS) .build(); }
@Inject public GrokPatternRegistry(EventBus serverEventBus, GrokPatternService grokPatternService, @Named("daemonScheduler") ScheduledExecutorService daemonExecutor) { this.grokPatternService = grokPatternService; grokCache = CacheBuilder.newBuilder() .expireAfterAccess(1, TimeUnit.MINUTES) // prevent from hanging on to memory forever .build(asyncReloading(new GrokReloader(false), daemonExecutor)); grokCacheNamedOnly = CacheBuilder.newBuilder() .expireAfterAccess(1, TimeUnit.MINUTES) // prevent from hanging on to memory forever .build(asyncReloading(new GrokReloader(true), daemonExecutor)); // trigger initial loading reload(); serverEventBus.register(this); }
@Inject public ThriftMetadata( DriftClient<PrestoThriftService> client, ThriftHeaderProvider thriftHeaderProvider, TypeManager typeManager, @ForMetadataRefresh Executor metadataRefreshExecutor) { this.client = requireNonNull(client, "client is null"); this.thriftHeaderProvider = requireNonNull(thriftHeaderProvider, "thriftHeaderProvider is null"); this.typeManager = requireNonNull(typeManager, "typeManager is null"); this.tableCache = CacheBuilder.newBuilder() .expireAfterWrite(EXPIRE_AFTER_WRITE.toMillis(), MILLISECONDS) .refreshAfterWrite(REFRESH_AFTER_WRITE.toMillis(), MILLISECONDS) .build(asyncReloading(CacheLoader.from(this::getTableMetadataInternal), metadataRefreshExecutor)); }
.build(asyncReloading(CacheLoader.from(this::loadAllDatabases), executor)); .build(asyncReloading(CacheLoader.from(this::loadDatabase), executor)); .build(asyncReloading(CacheLoader.from(this::loadAllTables), executor)); .build(asyncReloading(new CacheLoader<HiveTableName, PartitionStatistics>() .build(asyncReloading(new CacheLoader<HivePartitionName, PartitionStatistics>() .build(asyncReloading(CacheLoader.from(this::loadTable), executor)); .build(asyncReloading(CacheLoader.from(this::loadAllViews), executor)); .build(asyncReloading(CacheLoader.from(this::loadPartitionNames), executor)); .build(asyncReloading(CacheLoader.from(this::loadPartitionNamesByParts), executor)); .build(asyncReloading(new CacheLoader<HivePartitionName, Optional<Partition>>() .build(asyncReloading(CacheLoader.from(this::loadRoles), executor)); .build(asyncReloading(CacheLoader.from(key -> loadTablePrivileges(key.getUser(), key.getDatabase(), key.getTable())), executor));
@Inject public ElasticsearchClient(ElasticsearchTableDescriptionProvider descriptions, ElasticsearchConnectorConfig config) throws IOException { tableDescriptions = requireNonNull(descriptions, "description is null"); ElasticsearchConnectorConfig configuration = requireNonNull(config, "config is null"); requestTimeout = configuration.getRequestTimeout(); maxAttempts = configuration.getMaxRequestRetries(); maxRetryTime = configuration.getMaxRetryTime(); for (ElasticsearchTableDescription tableDescription : tableDescriptions.getAllTableDescriptions()) { if (!clients.containsKey(tableDescription.getClusterName())) { Settings settings = Settings.builder().put("cluster.name", tableDescription.getClusterName()).build(); TransportAddress address = new TransportAddress(InetAddress.getByName(tableDescription.getHost()), tableDescription.getPort()); TransportClient client = new PreBuiltTransportClient(settings).addTransportAddress(address); clients.put(tableDescription.getClusterName(), client); } } this.columnMetadataCache = CacheBuilder.newBuilder() .expireAfterWrite(30, MINUTES) .refreshAfterWrite(15, MINUTES) .maximumSize(500) .build(asyncReloading(CacheLoader.from(this::loadColumns), executor)); }
CacheLoader<Object, Object> asyncReloader = CacheLoader.asyncReloading(baseLoader, executor);
public <K, V> LoadingCache<K, V> build(CacheLoader<K, V> loader) { LoadingCache<K, V> cache; if (isCaffeine()) { cache = isAsync() ? caffeine.buildAsync(loader).synchronous() : caffeine.build(loader); } else { cache = new GuavaLoadingCache<>(guava.build( com.google.common.cache.CacheLoader.asyncReloading( new SingleLoader<>(loader), executor)), ticker, isRecordingStats()); } this.cache = cache; return cache; }
public TableNameCompleter(QueryRunner queryRunner) { this.queryRunner = requireNonNull(queryRunner, "queryRunner session was null!"); tableCache = CacheBuilder.newBuilder() .refreshAfterWrite(RELOAD_TIME_MINUTES, TimeUnit.MINUTES) .build(asyncReloading(CacheLoader.from(this::listTables), executor)); functionCache = CacheBuilder.newBuilder() .build(asyncReloading(CacheLoader.from(this::listFunctions), executor)); }
public TableNameCompleter(QueryRunner queryRunner) { this.queryRunner = requireNonNull(queryRunner, "queryRunner session was null!"); tableCache = CacheBuilder.newBuilder() .refreshAfterWrite(RELOAD_TIME_MINUTES, TimeUnit.MINUTES) .build(asyncReloading(CacheLoader.from(this::listTables), executor)); functionCache = CacheBuilder.newBuilder() .build(asyncReloading(CacheLoader.from(this::listFunctions), executor)); }
public TableNameCompleter(QueryRunner queryRunner) { this.queryRunner = requireNonNull(queryRunner, "queryRunner session was null!"); tableCache = CacheBuilder.newBuilder() .refreshAfterWrite(RELOAD_TIME_MINUTES, TimeUnit.MINUTES) .build(asyncReloading(CacheLoader.from(this::listTables), executor)); functionCache = CacheBuilder.newBuilder() .build(asyncReloading(CacheLoader.from(this::listFunctions), executor)); }
public TableNameCompleter(QueryRunner queryRunner) { this.queryRunner = requireNonNull(queryRunner, "queryRunner session was null!"); tableCache = CacheBuilder.newBuilder() .refreshAfterWrite(RELOAD_TIME_MINUTES, TimeUnit.MINUTES) .build(asyncReloading(new CacheLoader<String, List<String>>() { @Override public List<String> load(String schemaName) { return queryMetadata(format("SELECT table_name FROM information_schema.tables WHERE table_schema = '%s'", schemaName)); } }, executor)); functionCache = CacheBuilder.newBuilder() .build(asyncReloading(new CacheLoader<String, List<String>>() { @Override public List<String> load(String schemaName) { return queryMetadata("SHOW FUNCTIONS"); } }, executor)); }
public NetworkLocationCache(NetworkTopology networkTopology) { this.networkTopology = requireNonNull(networkTopology, "networkTopology is null"); this.cache = CacheBuilder.newBuilder() .expireAfterWrite(1, DAYS) .refreshAfterWrite(12, HOURS) .build(asyncReloading(CacheLoader.from(this::locate), executor)); this.negativeCache = CacheBuilder.newBuilder() .expireAfterWrite(NEGATIVE_CACHE_DURATION.toMillis(), MILLISECONDS) .build(); }
@Inject public GrokPatternRegistry(EventBus serverEventBus, GrokPatternService grokPatternService, @Named("daemonScheduler") ScheduledExecutorService daemonExecutor) { this.grokPatternService = grokPatternService; grokCache = CacheBuilder.newBuilder() .expireAfterAccess(1, TimeUnit.MINUTES) // prevent from hanging on to memory forever .build(asyncReloading(new GrokReloader(false), daemonExecutor)); grokCacheNamedOnly = CacheBuilder.newBuilder() .expireAfterAccess(1, TimeUnit.MINUTES) // prevent from hanging on to memory forever .build(asyncReloading(new GrokReloader(true), daemonExecutor)); // trigger initial loading reload(); serverEventBus.register(this); }
public NetworkLocationCache(NetworkTopology networkTopology) { this.networkTopology = requireNonNull(networkTopology, "networkTopology is null"); this.cache = CacheBuilder.newBuilder() .expireAfterWrite(1, DAYS) .refreshAfterWrite(12, HOURS) .build(asyncReloading(CacheLoader.from(this::locate), executor)); this.negativeCache = CacheBuilder.newBuilder() .expireAfterWrite(NEGATIVE_CACHE_DURATION.toMillis(), MILLISECONDS) .build(); }
public NetworkLocationCache(NetworkTopology networkTopology) { this.networkTopology = requireNonNull(networkTopology, "networkTopology is null"); this.cache = CacheBuilder.newBuilder() .expireAfterWrite(1, DAYS) .refreshAfterWrite(12, HOURS) .build(asyncReloading(new CacheLoader<HostAddress, NetworkLocation>() { @Override public NetworkLocation load(HostAddress host) throws Exception { return locate(host); } }, executor)); this.negativeCache = CacheBuilder.newBuilder() .expireAfterWrite(NEGATIVE_CACHE_DURATION.toMillis(), MILLISECONDS) .build(); }
@Inject public ThriftMetadata( DriftClient<PrestoThriftService> client, ThriftHeaderProvider thriftHeaderProvider, TypeManager typeManager, @ForMetadataRefresh Executor metadataRefreshExecutor) { this.client = requireNonNull(client, "client is null"); this.thriftHeaderProvider = requireNonNull(thriftHeaderProvider, "thriftHeaderProvider is null"); this.typeManager = requireNonNull(typeManager, "typeManager is null"); this.tableCache = CacheBuilder.newBuilder() .expireAfterWrite(EXPIRE_AFTER_WRITE.toMillis(), MILLISECONDS) .refreshAfterWrite(REFRESH_AFTER_WRITE.toMillis(), MILLISECONDS) .build(asyncReloading(CacheLoader.from(this::getTableMetadataInternal), metadataRefreshExecutor)); }
.refreshAfterWrite(refreshInterval + 2, TimeUnit.SECONDS) .build( CacheLoader.asyncReloading( new CacheLoader<AuthKey, UserIdentity>() { @Override
.build(asyncReloading(CacheLoader.from(this::loadAllDatabases), executor)); .build(asyncReloading(CacheLoader.from(this::loadDatabase), executor)); .build(asyncReloading(CacheLoader.from(this::loadAllTables), executor)); .build(asyncReloading(new CacheLoader<HiveTableName, PartitionStatistics>() .build(asyncReloading(new CacheLoader<HivePartitionName, PartitionStatistics>() .build(asyncReloading(CacheLoader.from(this::loadTable), executor)); .build(asyncReloading(CacheLoader.from(this::loadAllViews), executor)); .build(asyncReloading(CacheLoader.from(this::loadPartitionNames), executor)); .build(asyncReloading(CacheLoader.from(this::loadPartitionNamesByParts), executor)); .build(asyncReloading(new CacheLoader<HivePartitionName, Optional<Partition>>() .build(asyncReloading(CacheLoader.from(key -> loadTablePrivileges(key.getDatabase(), key.getTable(), key.getPrincipal())), executor)); .build(asyncReloading(CacheLoader.from(() -> loadRoles()), executor)); .build(asyncReloading(CacheLoader.from(this::loadRoleGrants), executor));
CacheLoader.asyncReloading(baseLoader, executor);