com.google.common.cache.CacheBuilder Java Examples
The following examples show how to use
com.google.common.cache.CacheBuilder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KeyProviderCache.java From hadoop with Apache License 2.0 | 6 votes |
public KeyProviderCache(long expiryMs) { cache = CacheBuilder.newBuilder() .expireAfterAccess(expiryMs, TimeUnit.MILLISECONDS) .removalListener(new RemovalListener<URI, KeyProvider>() { @Override public void onRemoval( RemovalNotification<URI, KeyProvider> notification) { try { notification.getValue().close(); } catch (Throwable e) { LOG.error( "Error closing KeyProvider with uri [" + notification.getKey() + "]", e); ; } } }) .build(); }
Example #2
Source File: SnapshotManager.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private SnapshotManager(KylinConfig config) { this.config = config; this.snapshotCache = CacheBuilder.newBuilder().removalListener(new RemovalListener<String, SnapshotTable>() { @Override public void onRemoval(RemovalNotification<String, SnapshotTable> notification) { SnapshotManager.logger.info("Snapshot with resource path {} is removed due to {}", notification.getKey(), notification.getCause()); } }).maximumSize(config.getCachedSnapshotMaxEntrySize())// .expireAfterWrite(1, TimeUnit.DAYS).build(new CacheLoader<String, SnapshotTable>() { @Override public SnapshotTable load(String key) throws Exception { SnapshotTable snapshotTable = SnapshotManager.this.load(key, true); return snapshotTable; } }); }
Example #3
Source File: ProfileTaskCache.java From skywalking with Apache License 2.0 | 6 votes |
public ProfileTaskCache(ModuleManager moduleManager, CoreModuleConfig moduleConfig) { this.moduleManager = moduleManager; long initialSize = moduleConfig.getMaxSizeOfProfileTask() / 10L; int initialCapacitySize = (int) (initialSize > Integer.MAX_VALUE ? Integer.MAX_VALUE : initialSize); profileTaskDownstreamCache = CacheBuilder.newBuilder() .initialCapacity(initialCapacitySize) .maximumSize(moduleConfig.getMaxSizeOfProfileTask()) // remove old profile task data .expireAfterWrite(Duration.ofMinutes(1)) .build(); profileTaskIdCache = CacheBuilder.newBuilder() .initialCapacity(initialCapacitySize) .maximumSize(moduleConfig.getMaxSizeOfProfileTask()) .build(); }
Example #4
Source File: SchemaMetadataCache.java From registry with Apache License 2.0 | 6 votes |
public SchemaMetadataCache(Long size, Long expiryInSecs, final SchemaMetadataFetcher schemaMetadataFetcher) { schemaNameToIdMap = Maps.synchronizedBiMap(HashBiMap.create()); loadingCache = CacheBuilder.newBuilder() .maximumSize(size) .expireAfterAccess(expiryInSecs, TimeUnit.SECONDS) .build(new CacheLoader<Key, SchemaMetadataInfo>() { @Override public SchemaMetadataInfo load(Key key) throws Exception { SchemaMetadataInfo schemaMetadataInfo; Key otherKey; if (key.getName() != null) { schemaMetadataInfo = schemaMetadataFetcher.fetch(key.getName()); otherKey = Key.of(schemaMetadataInfo.getId()); schemaNameToIdMap.put(key.getName(), schemaMetadataInfo.getId()); } else if (key.getId() != null) { schemaMetadataInfo = schemaMetadataFetcher.fetch(key.getId()); otherKey = Key.of(schemaMetadataInfo.getSchemaMetadata().getName()); schemaNameToIdMap.put(schemaMetadataInfo.getSchemaMetadata().getName(), schemaMetadataInfo.getId()); } else { throw new RegistryException("Key should have name or id as non null"); } loadingCache.put(otherKey, schemaMetadataInfo); return schemaMetadataInfo; } }); }
Example #5
Source File: AppleDependenciesCache.java From buck with Apache License 2.0 | 6 votes |
public AppleDependenciesCache(TargetGraph projectGraph) { this.depsCache = CacheBuilder.newBuilder() .build( CacheLoader.from( node -> { ImmutableSortedSet.Builder<TargetNode<?>> defaultDepsBuilder = ImmutableSortedSet.naturalOrder(); ImmutableSortedSet.Builder<TargetNode<?>> exportedDepsBuilder = ImmutableSortedSet.naturalOrder(); AppleBuildRules.addDirectAndExportedDeps( projectGraph, node, defaultDepsBuilder, exportedDepsBuilder, Optional.empty()); return new CacheItem(defaultDepsBuilder.build(), exportedDepsBuilder.build()); })); }
Example #6
Source File: ClusterAgentAutoScaler.java From titus-control-plane with Apache License 2.0 | 6 votes |
public ClusterAgentAutoScaler(TitusRuntime titusRuntime, ClusterOperationsConfiguration configuration, AgentManagementService agentManagementService, V3JobOperations v3JobOperations, SchedulingService<? extends TaskRequest> schedulingService, Scheduler scheduler) { this.titusRuntime = titusRuntime; this.configuration = configuration; this.agentManagementService = agentManagementService; this.v3JobOperations = v3JobOperations; this.schedulingService = schedulingService; this.scheduler = scheduler; this.clock = titusRuntime.getClock(); this.taskIdsForPreviousScaleUps = CacheBuilder.newBuilder() .expireAfterWrite(TASK_IDS_PREVIOUSLY_SCALED_TTL_MS, TimeUnit.MILLISECONDS) .build(); this.tierTierAutoScalerExecutions = new HashMap<>(); }
Example #7
Source File: FlowableCookieFilter.java From flowable-engine with Apache License 2.0 | 6 votes |
protected void initUserCache() { FlowableCommonAppProperties.Cache cache = properties.getCacheLoginUsers(); Long userMaxSize = cache.getMaxSize(); Long userMaxAge = cache.getMaxAge(); userCache = CacheBuilder.newBuilder().maximumSize(userMaxSize).expireAfterWrite(userMaxAge, TimeUnit.SECONDS).recordStats() .build(new CacheLoader<String, FlowableAppUser>() { @Override public FlowableAppUser load(final String userId) throws Exception { RemoteUser user = remoteIdmService.getUser(userId); if (user == null) { throw new FlowableException("user not found " + userId); } Collection<GrantedAuthority> grantedAuthorities = new ArrayList<>(); for (String privilege : user.getPrivileges()) { grantedAuthorities.add(new SimpleGrantedAuthority(privilege)); } // put account into security context (for controllers to use) FlowableAppUser appUser = new FlowableAppUser(user, user.getId(), grantedAuthorities); return appUser; } }); }
Example #8
Source File: XCConfigurationList.java From buck with Apache License 2.0 | 6 votes |
public XCConfigurationList(AbstractPBXObjectFactory objectFactory) { buildConfigurations = new ArrayList<>(); defaultConfigurationName = Optional.empty(); defaultConfigurationIsVisible = false; buildConfigurationsByName = CacheBuilder.newBuilder() .build( new CacheLoader<String, XCBuildConfiguration>() { @Override public XCBuildConfiguration load(String key) { XCBuildConfiguration configuration = objectFactory.createBuildConfiguration(key); buildConfigurations.add(configuration); return configuration; } }); }
Example #9
Source File: NetflowV9CodecAggregator.java From graylog-plugin-netflow with Apache License 2.0 | 6 votes |
@Inject public NetflowV9CodecAggregator() { // TODO customize this.templateCache = CacheBuilder.newBuilder() .maximumSize(5000) .removalListener(notification -> LOG.debug("Removed {} from template cache for reason {}", notification.getKey(), notification.getCause())) .recordStats() .build(); this.packetCache = CacheBuilder.newBuilder() .expireAfterWrite(1, TimeUnit.MINUTES) .maximumWeight(Size.megabytes(1).toBytes()) .removalListener((RemovalListener<TemplateKey, Queue<PacketBytes>>) notification -> LOG.debug("Removed {} from packet cache for reason {}", notification.getKey(), notification.getCause())) .weigher((key, value) -> value.stream().map(PacketBytes::readableBytes).reduce(0, Integer::sum)) .recordStats() .build(); }
Example #10
Source File: ApplicationIdCacheImpl.java From usergrid with Apache License 2.0 | 6 votes |
public ApplicationIdCacheImpl( final EntityManager managementEnityManager, ManagerCache managerCache, ApplicationIdCacheFig fig) { this.managementEnityManager = managementEnityManager; this.managerCache = managerCache; appCache = CacheBuilder.newBuilder() .maximumSize(fig.getCacheSize()) .expireAfterWrite(fig.getCacheTimeout(), TimeUnit.MILLISECONDS) .build(new CacheLoader<String, UUID>() { @Override public UUID load(final String key) throws Exception { UUID appId = fetchApplicationId(key); if ( appId == null ) { throw new PersistenceException("Error getting applicationId"); } return appId; } }); }
Example #11
Source File: SchemaCache.java From airpal with Apache License 2.0 | 6 votes |
public SchemaCache(final QueryRunner.QueryRunnerFactory queryRunnerFactory, final ExecutorService executor) { this.queryRunnerFactory = checkNotNull(queryRunnerFactory, "queryRunnerFactory session was null!"); this.executor = checkNotNull(executor, "executor was null!"); ListeningExecutorService listeningExecutor = MoreExecutors.listeningDecorator(executor); BackgroundCacheLoader<String, Map<String, List<String>>> loader = new BackgroundCacheLoader<String, Map<String, List<String>>>(listeningExecutor) { @Override public Map<String, List<String>> load(String catalogName) { return queryMetadata(format( "SELECT table_catalog, table_schema, table_name " + "FROM information_schema.tables " + "WHERE table_catalog = '%s'", catalogName)); } }; schemaTableCache = CacheBuilder.newBuilder() .refreshAfterWrite(RELOAD_TIME_MINUTES, TimeUnit.MINUTES) .build(loader); }
Example #12
Source File: CachingTableProvider.java From samza with Apache License 2.0 | 6 votes |
private ReadWriteTable createDefaultCacheTable(String tableId, JavaTableConfig tableConfig) { long readTtlMs = Long.parseLong(tableConfig.getForTable(tableId, CachingTableDescriptor.READ_TTL_MS, "-1")); long writeTtlMs = Long.parseLong(tableConfig.getForTable(tableId, CachingTableDescriptor.WRITE_TTL_MS, "-1")); long cacheSize = Long.parseLong(tableConfig.getForTable(tableId, CachingTableDescriptor.CACHE_SIZE, "-1")); CacheBuilder cacheBuilder = CacheBuilder.newBuilder(); if (readTtlMs != -1) { cacheBuilder.expireAfterAccess(readTtlMs, TimeUnit.MILLISECONDS); } if (writeTtlMs != -1) { cacheBuilder.expireAfterWrite(writeTtlMs, TimeUnit.MILLISECONDS); } if (cacheSize != -1) { cacheBuilder.maximumSize(cacheSize); } logger.info(String.format("Creating default cache with: readTtl=%d, writeTtl=%d, maxSize=%d", readTtlMs, writeTtlMs, cacheSize)); GuavaCacheTable cacheTable = new GuavaCacheTable(tableId + "-def-cache", cacheBuilder.build()); cacheTable.init(this.context); return cacheTable; }
Example #13
Source File: ResourceLocalizationService.java From big-c with Apache License 2.0 | 6 votes |
/** * For each of the requested resources for a container, determines the * appropriate {@link LocalResourcesTracker} and forwards a * {@link LocalResourceRequest} to that tracker. */ private void handleInitContainerResources( ContainerLocalizationRequestEvent rsrcReqs) { Container c = rsrcReqs.getContainer(); // create a loading cache for the file statuses LoadingCache<Path,Future<FileStatus>> statCache = CacheBuilder.newBuilder().build(FSDownload.createStatusCacheLoader(getConfig())); LocalizerContext ctxt = new LocalizerContext( c.getUser(), c.getContainerId(), c.getCredentials(), statCache); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs = rsrcReqs.getRequestedResources(); for (Map.Entry<LocalResourceVisibility, Collection<LocalResourceRequest>> e : rsrcs.entrySet()) { LocalResourcesTracker tracker = getLocalResourcesTracker(e.getKey(), c.getUser(), c.getContainerId().getApplicationAttemptId() .getApplicationId()); for (LocalResourceRequest req : e.getValue()) { tracker.handle(new ResourceRequestEvent(req, e.getKey(), ctxt)); } } }
Example #14
Source File: BasicToken.java From robe with GNU Lesser General Public License v3.0 | 6 votes |
/** * Configure method for Token generation configurations and encryptor configure * * @param configuration confiuration for auth bundle */ public static void configure(TokenBasedAuthConfiguration configuration) { encryptor.setPoolSize(configuration.getPoolSize()); // This would be a good value for a 4-core system if (configuration.getServerPassword().equals("auto")) { encryptor.setPassword(UUID.randomUUID().toString()); } else { encryptor.setPassword(configuration.getServerPassword()); } encryptor.setAlgorithm(configuration.getAlgorithm()); encryptor.initialize(); BasicToken.defaultMaxAge = configuration.getMaxage(); //Create cache for permissions. cache = CacheBuilder.newBuilder() .expireAfterAccess(defaultMaxAge, TimeUnit.SECONDS) .expireAfterWrite(defaultMaxAge, TimeUnit.SECONDS) .build(); }
Example #15
Source File: ElasticsearchMetadataModule.java From heroic with Apache License 2.0 | 5 votes |
@Provides @ElasticsearchScope public RateLimitedCache<Pair<String, HashCode>> writeCache(HeroicReporter reporter) { final Cache<Pair<String, HashCode>, Boolean> cache = CacheBuilder .newBuilder() .concurrencyLevel(writeCacheConcurrency) .maximumSize(writeCacheMaxSize) .expireAfterWrite(writeCacheDurationMinutes, TimeUnit.MINUTES) .build(); reporter.registerCacheSize("elasticsearch-metadata-write-through", cache::size); if (writesPerSecond <= 0d) { return new DisabledRateLimitedCache<>(cache.asMap()); } if (distributedCacheSrvRecord.length() > 0) { return new DistributedRateLimitedCache<>( cache.asMap(), RateLimiter.create(writesPerSecond, rateLimitSlowStartSeconds, SECONDS), MemcachedConnection.create(distributedCacheSrvRecord), toIntExact(Duration.of(writeCacheDurationMinutes, MINUTES).convert(SECONDS)), reporter.newMemcachedReporter("metadata") ); } return new DefaultRateLimitedCache<>(cache.asMap(), RateLimiter.create(writesPerSecond, rateLimitSlowStartSeconds, TimeUnit.SECONDS)); }
Example #16
Source File: BukkitPlugin.java From ServerListPlus with GNU General Public License v3.0 | 5 votes |
@Override public void reloadCaches(ServerListPlusCore core) { CoreConf conf = core.getConf(CoreConf.class); // Check if request cache configuration has been changed if (requestCacheConf == null || requestCache == null || !requestCacheConf.equals(conf.Caches.Request)) { if (requestCache != null) { // Delete the request cache getLogger().log(DEBUG, "Deleting old request cache due to configuration changes."); requestCache.invalidateAll(); requestCache.cleanUp(); this.requestCache = null; } getLogger().log(DEBUG, "Creating new request cache..."); try { this.requestCacheConf = conf.Caches.Request; this.requestCache = CacheBuilder.from(requestCacheConf).build(requestLoader); } catch (IllegalArgumentException e) { getLogger().log(ERROR, "Unable to create request cache using configuration settings.", e); this.requestCacheConf = core.getDefaultConf(CoreConf.class).Caches.Request; this.requestCache = CacheBuilder.from(requestCacheConf).build(requestLoader); } getLogger().log(DEBUG, "Request cache created."); } }
Example #17
Source File: RedisSchema.java From calcite with Apache License 2.0 | 5 votes |
@Override protected Map<String, Table> getTableMap() { JsonCustomTable[] jsonCustomTables = new JsonCustomTable[tables.size()]; Set<String> tableNames = Arrays.stream(tables.toArray(jsonCustomTables)) .map(e -> e.name).collect(Collectors.toSet()); tableMap = Maps.asMap( ImmutableSet.copyOf(tableNames), CacheBuilder.newBuilder() .build(CacheLoader.from(this::table))); return tableMap; }
Example #18
Source File: ManagedCache.java From mercury with Apache License 2.0 | 5 votes |
/** * Obtain a ManagedCache instance * * @param name of cache store * @param expiryMs in milliseconds * @param maxItems maximum number of cached objects * @return cache instance */ public synchronized static ManagedCache createCache(String name, long expiryMs, long maxItems) { ManagedCache managedCache = getInstance(name); if (managedCache != null) { return managedCache; } long expiryTimer = Math.max(expiryMs, MIN_EXPIRY); Cache<String, Object> cache = CacheBuilder.newBuilder().maximumSize(maxItems).expireAfterWrite(expiryTimer, TimeUnit.MILLISECONDS).build(); // create cache managedCache = new ManagedCache(cache, name, expiryTimer, maxItems); cacheCollection.put(name, managedCache); log.info("Created cache ({}), expiry {} ms, maxItems={}", name, expiryTimer, maxItems); return managedCache; }
Example #19
Source File: TestApplication.java From registry with Apache License 2.0 | 5 votes |
private StorageManager getCacheBackedDao(TestConfiguration testConfiguration) { StorageProviderConfiguration storageProviderConfiguration = testConfiguration.getStorageProviderConfiguration(); final StorageManager dao = getStorageManager(storageProviderConfiguration); final CacheBuilder cacheBuilder = getGuavaCacheBuilder(); final Cache<StorableKey, Storable> cache = getCache(dao, cacheBuilder); final StorageWriter storageWriter = getStorageWriter(dao); return doGetCacheBackedDao(cache, storageWriter); }
Example #20
Source File: GuavaCacheImpl.java From RxCache with Apache License 2.0 | 5 votes |
public GuavaCacheImpl(long maxSize) { super(maxSize); cache = CacheBuilder .newBuilder() .recordStats() .maximumSize(maxSize) .build(new CacheLoader<String, Object>(){ @Override public String load(String key) throws Exception { return key; } }); }
Example #21
Source File: SshClientServiceImpl.java From cymbal with Apache License 2.0 | 5 votes |
public SshClientServiceImpl() { defaultConfig = new DefaultConfig(); // 免密登陆校验器 Security.addProvider(new BouncyCastleProvider()); // sshClient的缓存,避免每次命令都进行ssh连接的耗时 sshClientCache = CacheBuilder.newBuilder().maximumSize(CACHE_CLIENT_MAX) .expireAfterAccess(CACHE_CLIENT_TTL, TimeUnit.SECONDS) .removalListener(new RemovalListener<SSHInfo, SSHClient>() { @Override public void onRemoval(RemovalNotification<SSHInfo, SSHClient> removalNotification) { try { removalNotification.getValue().disconnect(); } catch (IOException e) { log.error("Close ssh connection error.", e); } log.debug("Ssh client to {} is expired.", removalNotification.getKey()); } }).build(new CacheLoader<SSHInfo, SSHClient>() { @Override public SSHClient load(SSHInfo sshInfo) throws Exception { return getSshCLient(sshInfo); } }); }
Example #22
Source File: TenantDataProvider.java From rh-che with Eclipse Public License 2.0 | 5 votes |
@Inject public TenantDataProvider( TenantDataCacheLoader tenantDataCacheLoader, @Named("che.infra.openshift.project") String cheNamespace, @Named("che.fabric8.standalone") boolean standalone) { this.cheNamespace = cheNamespace; this.standalone = standalone; this.tenantDataCache = CacheBuilder.newBuilder() .maximumSize(CONCURRENT_USERS) .expireAfterWrite(CACHE_TIMEOUT_MINUTES, TimeUnit.MINUTES) .build(tenantDataCacheLoader); }
Example #23
Source File: CacheFilter.java From oneops with Apache License 2.0 | 5 votes |
/** * Initialize the cache filter. * * @param cacheEnabled set to true of md caching is enabled. * @param ttl cache ttl * @param cacheSize maximum cache size. * @param cmMgr {@link CmsCmManager} */ public CacheFilter(boolean cacheEnabled, long ttl, long cacheSize, CmsCmManager cmMgr) { this.cacheEnabled = cacheEnabled; this.cmManager = cmMgr; this.lastUpdatedTs = 0; // Initialize the var cache if it's enabled. if (cacheEnabled) { logger.info("Creating var cache with TTL: " + ttl + "sec and cacheSize: " + cacheSize); this.varCache = CacheBuilder.newBuilder() .maximumSize(cacheSize) .expireAfterWrite(ttl, SECONDS) .initialCapacity(1) .build(new CacheLoader<String, Long>() { @Override public Long load(String key) throws Exception { CmsVar cacheStatus = cmManager.getCmSimpleVar(key); if (cacheStatus != null) { return parseLong(cacheStatus.getValue()); } return 0L; } }); } else { logger.warn("Filter/MdCache cache is disabled."); } }
Example #24
Source File: PinotMetadata.java From presto with Apache License 2.0 | 5 votes |
@Inject public PinotMetadata( PinotClient pinotClient, PinotConfig pinotConfig, @ForPinot Executor executor) { this.pinotConfig = requireNonNull(pinotConfig, "pinot config"); long metadataCacheExpiryMillis = this.pinotConfig.getMetadataCacheExpiry().roundTo(TimeUnit.MILLISECONDS); this.allTablesCache = CacheBuilder.newBuilder() .refreshAfterWrite(metadataCacheExpiryMillis, TimeUnit.MILLISECONDS) .build(asyncReloading(CacheLoader.from(pinotClient::getAllTables), executor)); this.pinotTableColumnCache = CacheBuilder.newBuilder() .refreshAfterWrite(metadataCacheExpiryMillis, TimeUnit.MILLISECONDS) .build(asyncReloading(new CacheLoader<String, List<PinotColumn>>() { @Override public List<PinotColumn> load(String tableName) throws Exception { Schema tablePinotSchema = pinotClient.getTableSchema(tableName); return getPinotColumnsForPinotSchema(tablePinotSchema); } }, executor)); executor.execute(() -> this.allTablesCache.refresh(ALL_TABLES_CACHE_KEY)); }
Example #25
Source File: CachingHiveMetastore.java From presto with Apache License 2.0 | 5 votes |
private static CacheBuilder<Object, Object> newCacheBuilder(OptionalLong expiresAfterWriteMillis, OptionalLong refreshMillis, long maximumSize) { CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder(); if (expiresAfterWriteMillis.isPresent()) { cacheBuilder = cacheBuilder.expireAfterWrite(expiresAfterWriteMillis.getAsLong(), MILLISECONDS); } if (refreshMillis.isPresent() && (expiresAfterWriteMillis.isEmpty() || expiresAfterWriteMillis.getAsLong() > refreshMillis.getAsLong())) { cacheBuilder = cacheBuilder.refreshAfterWrite(refreshMillis.getAsLong(), MILLISECONDS); } cacheBuilder = cacheBuilder.maximumSize(maximumSize); return cacheBuilder; }
Example #26
Source File: ObjectSizer.java From titan1withtp3.1 with Apache License 2.0 | 5 votes |
@Override public Object newInstance() { int size = 10000; Cache<String,Long> cache = CacheBuilder.newBuilder() .concurrencyLevel(2).initialCapacity(16*3) .maximumSize(10000).build(); // for (int i=0;i<size;i++) { // cache.put(new Nothing(),new Nothing()); // } return cache; }
Example #27
Source File: ObjectManager.java From act-platform with ISC License | 5 votes |
private LoadingCache<String, ObjectTypeEntity> createObjectTypeByNameCache() { return CacheBuilder.newBuilder() .expireAfterAccess(10, TimeUnit.MINUTES) .build(new CacheLoader<String, ObjectTypeEntity>() { @Override public ObjectTypeEntity load(String key) throws Exception { return ObjectUtils.notNull(objectTypeDao.get(key), new Exception(String.format("ObjectType with name = %s does not exist.", key))); } }); }
Example #28
Source File: EnvelopeSchemaConverter.java From incubator-gobblin with Apache License 2.0 | 5 votes |
/** * To remove certain fields from the Avro schema or records of a topic/table, set property * {topic/table name}.remove.fields={comma-separated, fully qualified field names} in workUnit. */ @Override public EnvelopeSchemaConverter init(WorkUnitState workUnit) { if (workUnit.contains(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY)) { String removeFieldsPropName = workUnit.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY) + AvroProjectionConverter.REMOVE_FIELDS; if (workUnit.contains(removeFieldsPropName)) { this.fieldRemover = Optional.of(new AvroSchemaFieldRemover(workUnit.getProp(removeFieldsPropName))); } else { this.fieldRemover = Optional.absent(); } } String registryFactoryField = workUnit.contains(KafkaSchemaRegistryFactory.KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS) ? workUnit.getProp(KafkaSchemaRegistryFactory.KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS) : DEFAULT_KAFKA_SCHEMA_REGISTRY_FACTORY_CLASS; try { KafkaSchemaRegistryFactory registryFactory = ((Class<? extends KafkaSchemaRegistryFactory>) Class.forName(registryFactoryField)).newInstance(); this.registry = registryFactory.create(workUnit.getProperties()); } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) { return null; } this.decoderFactory = DecoderFactory.get(); this.readers = CacheBuilder.newBuilder().build(new CacheLoader<Schema, GenericDatumReader<GenericRecord>>() { @Override public GenericDatumReader<GenericRecord> load(final Schema key) throws Exception { return new GenericDatumReader<>(key); } }); return this; }
Example #29
Source File: OutstandingCountFlowController.java From pubsub with Apache License 2.0 | 5 votes |
public OutstandingCountFlowController(double startingPerSecond) { this.ratePerSecond = Math.max(startingPerSecond, 1); this.expiryCache = CacheBuilder.newBuilder().expireAfterWrite(expiryLatencySeconds, TimeUnit.SECONDS).build(); updateExecutor.scheduleAtFixedRate( this::updateRate, expiryLatencySeconds * 1000, rateUpdateDelayMilliseconds, TimeUnit.MILLISECONDS); }
Example #30
Source File: SingleChannelPublisher.java From rxrabbit with MIT License | 5 votes |
public SingleChannelPublisher(ChannelFactory channelFactory, boolean publisherConfirms, int maxRetries, Scheduler observeOnScheduler, PublishEventListener metricsReporter, long confirmsTimeoutSec, long closeTimeoutMillis, long cacheCleanupTriggerSecs, BackoffAlgorithm backoffAlgorithm) { this.channelFactory = channelFactory; this.publisherConfirms = publisherConfirms; this.maxRetries = maxRetries; this.observeOnScheduler = observeOnScheduler; this.closeTimeoutMillis = closeTimeoutMillis; this.metricsReporter = metricsReporter; this.backoffAlgorithm = backoffAlgorithm; this.publishWorker = Schedulers.io().createWorker(); final long instanceNr = publisherInstanceNr.incrementAndGet(); publishWorker.schedule(() -> Thread.currentThread().setName("rabbit-send-thread-"+instanceNr)); this.ackWorker = Schedulers.io().createWorker(); ackWorker.schedule(() -> Thread.currentThread().setName("rabbit-confirm-thread-"+instanceNr)); this.cacheCleanupWorker = Schedulers.io().createWorker(); cacheCleanupWorker.schedule(() -> Thread.currentThread().setName("cache-cleanup-"+instanceNr)); this.tagToMessage = CacheBuilder.<Long, UnconfirmedMessage>newBuilder() .expireAfterAccess(confirmsTimeoutSec, TimeUnit.SECONDS) .removalListener(this::handleCacheRemove) .build(); if (publisherConfirms) { cacheCleanupWorker.schedulePeriodically(tagToMessage::cleanUp, cacheCleanupTriggerSecs, cacheCleanupTriggerSecs, TimeUnit.SECONDS); } }