com.google.common.cache.Weigher Java Examples
The following examples show how to use
com.google.common.cache.Weigher.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksDBLookupTableCache.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private void init() { this.basePath = getCacheBasePath(config); this.maxCacheSizeInKB = (long) (config.getExtTableSnapshotLocalCacheMaxSizeGB() * 1024 * 1024); this.tablesCache = CacheBuilder.newBuilder().removalListener(new RemovalListener<String, CachedTableInfo>() { @Override public void onRemoval(RemovalNotification<String, CachedTableInfo> notification) { logger.warn(notification.getValue() + " is removed " + "because of " + notification.getCause()); notification.getValue().cleanStorage(); } }).maximumWeight(maxCacheSizeInKB).weigher(new Weigher<String, CachedTableInfo>() { @Override public int weigh(String key, CachedTableInfo value) { return value.getSizeInKB(); } }).build(); restoreCacheState(); cacheStateChecker = new CacheStateChecker(); initExecutors(); }
Example #2
Source File: OrbitTiledImage2.java From orbit-image-analysis with GNU General Public License v3.0 | 6 votes |
private static void initCache() { logger.info("(re-)creating tile cache"); long mem = ScaleoutMode.SCALEOUTMODE.get() ? 512L*512L*3*4*200 : Runtime.getRuntime().maxMemory()/2; // long mem = (512L*512L*3*4*200); tileCache = CacheBuilder. newBuilder(). //recordStats(). expireAfterWrite(7, TimeUnit.MINUTES). maximumWeight(mem). weigher(new Weigher<PointAndName, Raster>() { @Override public int weigh(PointAndName key, Raster raster) { return raster.getWidth()*raster.getHeight() * 3 * 4; } }). build(); }
Example #3
Source File: MetaCacheManager.java From aliyun-tablestore-java-sdk with Apache License 2.0 | 6 votes |
public MetaCacheManager(String tableName, long intervalDumpMeta, long maxCacheSize, TableStoreWriter writer) { this.tableName = tableName; this.intervalDumpMeta = intervalDumpMeta; this.cache = CacheBuilder.newBuilder() .expireAfterWrite(intervalDumpMeta, TimeUnit.SECONDS) .weigher( new Weigher<TimestreamIdentifier, Long>() { public int weigh(TimestreamIdentifier k, Long v) { return k.getDataSize() + 8; } } ) .maximumWeight(maxCacheSize) .build(); this.writer = writer; }
Example #4
Source File: CachingTextExtractor.java From james-project with Apache License 2.0 | 6 votes |
public CachingTextExtractor(TextExtractor underlying, Duration cacheEvictionPeriod, Long cacheWeightInBytes, MetricFactory metricFactory, GaugeRegistry gaugeRegistry) { this.underlying = underlying; this.weightMetric = metricFactory.generate("textExtractor.cache.weight"); Weigher<String, ParsedContent> weigher = (key, parsedContent) -> computeWeight(parsedContent); RemovalListener<String, ParsedContent> removalListener = notification -> Optional.ofNullable(notification.getValue()) .map(this::computeWeight) .ifPresent(weightMetric::remove); this.cache = CacheBuilder.newBuilder() .expireAfterAccess(cacheEvictionPeriod.toMillis(), TimeUnit.MILLISECONDS) .maximumWeight(cacheWeightInBytes) .weigher(weigher) .recordStats() .removalListener(removalListener) .build(); recordStats(gaugeRegistry); }
Example #5
Source File: CachingDirectoryLister.java From presto with Apache License 2.0 | 5 votes |
public CachingDirectoryLister(Duration expireAfterWrite, long maxSize, List<String> tables) { this.cache = CacheBuilder.newBuilder() .maximumWeight(maxSize) .weigher((Weigher<Path, List<LocatedFileStatus>>) (key, value) -> value.size()) .expireAfterWrite(expireAfterWrite.toMillis(), TimeUnit.MILLISECONDS) .recordStats() .build(); this.tablePrefixes = tables.stream() .map(CachingDirectoryLister::parseTableName) .collect(toImmutableList()); }
Example #6
Source File: BaseCache.java From my_curd with Apache License 2.0 | 5 votes |
/** * 权重缓存:缓存数据权重和不能超过maxWeight * * @param maxWeight 最大权重 * @param weigher:权重函数类,需要实现计算元素权重的函数 */ public BaseCache(long maxWeight, Weigher<K, V> weigher) { cache = CacheBuilder.newBuilder() .maximumWeight(maxWeight) .weigher(weigher) .build(new CacheLoader<K, V>() { @Override public V load(K k) { return loadData(k); } }); }
Example #7
Source File: ConfigFileController.java From apollo with Apache License 2.0 | 5 votes |
public ConfigFileController( final ConfigController configController, final NamespaceUtil namespaceUtil, final WatchKeysUtil watchKeysUtil, final GrayReleaseRulesHolder grayReleaseRulesHolder) { localCache = CacheBuilder.newBuilder() .expireAfterWrite(EXPIRE_AFTER_WRITE, TimeUnit.MINUTES) .weigher((Weigher<String, String>) (key, value) -> value == null ? 0 : value.length()) .maximumWeight(MAX_CACHE_SIZE) .removalListener(notification -> { String cacheKey = notification.getKey(); logger.debug("removing cache key: {}", cacheKey); if (!cacheKey2WatchedKeys.containsKey(cacheKey)) { return; } //create a new list to avoid ConcurrentModificationException List<String> watchedKeys = new ArrayList<>(cacheKey2WatchedKeys.get(cacheKey)); for (String watchedKey : watchedKeys) { watchedKeys2CacheKey.remove(watchedKey, cacheKey); } cacheKey2WatchedKeys.removeAll(cacheKey); logger.debug("removed cache key: {}", cacheKey); }) .build(); propertiesResponseHeaders = new HttpHeaders(); propertiesResponseHeaders.add("Content-Type", "text/plain;charset=UTF-8"); jsonResponseHeaders = new HttpHeaders(); jsonResponseHeaders.add("Content-Type", "application/json;charset=UTF-8"); NOT_FOUND_RESPONSE = new ResponseEntity<>(HttpStatus.NOT_FOUND); this.configController = configController; this.namespaceUtil = namespaceUtil; this.watchKeysUtil = watchKeysUtil; this.grayReleaseRulesHolder = grayReleaseRulesHolder; }
Example #8
Source File: StandardJanusGraphTx.java From grakn with GNU Affero General Public License v3.0 | 5 votes |
public StandardJanusGraphTx(StandardJanusGraph graph, TransactionConfiguration config) { this.graph = graph; this.timestampProvider = graph.getConfiguration().getTimestampProvider(); this.config = config; this.idManager = graph.getIDManager(); this.attributeHandler = graph.getDataSerializer(); this.edgeSerializer = graph.getEdgeSerializer(); this.indexSerializer = graph.getIndexSerializer(); this.temporaryIds = buildTemporaryIDsPool(); this.isOpen = true; this.externalVertexRetriever = new VertexConstructor(config.hasVerifyExternalVertexExistence()); // used to retrieve vertices when vertex ID is provided as a parameter by the user, e.g. getVertex("1234") this.internalVertexRetriever = new VertexConstructor(config.hasVerifyInternalVertexExistence()); // used to retrieve vertices, but only invoked by internal methods, e.g. vertexVariable.query().direction(Direction.OUT).labels("link").vertices() this.existingVertexRetriever = new VertexConstructor(false); // use to retrieve vertices when we are 100% sure that the vertex exists int concurrencyLevel = (config.isSingleThreaded()) ? 1 : 4; this.addedRelations = (config.isSingleThreaded()) ? new SimpleBufferAddedRelations() : new ConcurrentBufferAddedRelations(); this.newTypeCache = (config.isSingleThreaded()) ? new HashMap<>() : new ConcurrentHashMap<>(); this.newVertexIndexEntries = (config.isSingleThreaded()) ? new SimpleIndexCache() : new ConcurrentIndexCache(); long effectiveVertexCacheSize = Math.max(MIN_VERTEX_CACHE_SIZE, config.getVertexCacheSize()); // this is because of a weird bug with cache, see line above where declared MIN_VERTEX_CACHE_SIZE this.vertexCache = new VertexCache(effectiveVertexCacheSize, concurrencyLevel, config.getDirtyVertexSize()); this.indexCache = CacheBuilder.newBuilder().weigher((Weigher<JointIndexQuery.Subquery, List<Object>>) (q, r) -> 2 + r.size()).concurrencyLevel(concurrencyLevel).maximumWeight(config.getIndexCacheWeight()).build(); this.deletedRelations = EMPTY_DELETED_RELATIONS; //The following 2 variables need to be reworked completely, but in order to do that // correctly, the whole hierarchy Transaction-QueryBuilder-QueryProcessor needs to be reworked elementProcessor = elementProcessorImpl; edgeProcessor = edgeProcessorImpl; // Ideally we should try to remove the dependency IndexSerialiser to Tx (which is why we can only open BackendTransaction here), // and find a proper structure so that this Tx and BackendTransaction don't have this awkward coupling. this.backendTransaction = graph.openBackendTransaction(this); // awkward! }
Example #9
Source File: MapCache.java From dsl-devkit with Eclipse Public License 1.0 | 5 votes |
MapCache(final String name, final CacheConfiguration config) { this.name = name; CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder(); if (config.isStatisticsEnabled()) { cacheBuilder.recordStats(); } if (config.isSoftValuesEnabled()) { cacheBuilder.softValues(); } if (config.getInitialCapacity() >= 0) { cacheBuilder.initialCapacity(config.getInitialCapacity()); } if (config.getMaximumSize() >= 0) { if (config.isArraySizeEnabled()) { cacheBuilder.maximumWeight(config.getMaximumSize()); cacheBuilder.weigher(new Weigher<K, V>() { @Override public int weigh(final K key, final V value) { if (value instanceof byte[]) { return ((byte[]) value).length; } throw new IllegalStateException("Using array size is only supported for byte arrays"); //$NON-NLS-1$ } }); } else { cacheBuilder.maximumSize(config.getMaximumSize()); } } backend = cacheBuilder.build(); }
Example #10
Source File: ExpirationKCVSCache.java From titan1withtp3.1 with Apache License 2.0 | 5 votes |
public ExpirationKCVSCache(final KeyColumnValueStore store, String metricsName, final long cacheTimeMS, final long invalidationGracePeriodMS, final long maximumByteSize) { super(store, metricsName); Preconditions.checkArgument(cacheTimeMS > 0, "Cache expiration must be positive: %s", cacheTimeMS); Preconditions.checkArgument(System.currentTimeMillis()+1000l*3600*24*365*100+cacheTimeMS>0,"Cache expiration time too large, overflow may occur: %s",cacheTimeMS); this.cacheTimeMS = cacheTimeMS; int concurrencyLevel = Runtime.getRuntime().availableProcessors(); Preconditions.checkArgument(invalidationGracePeriodMS >=0,"Invalid expiration grace peiod: %s", invalidationGracePeriodMS); this.invalidationGracePeriodMS = invalidationGracePeriodMS; CacheBuilder<KeySliceQuery,EntryList> cachebuilder = CacheBuilder.newBuilder() .maximumWeight(maximumByteSize) .concurrencyLevel(concurrencyLevel) .initialCapacity(1000) .expireAfterWrite(cacheTimeMS, TimeUnit.MILLISECONDS) .weigher(new Weigher<KeySliceQuery, EntryList>() { @Override public int weigh(KeySliceQuery keySliceQuery, EntryList entries) { return GUAVA_CACHE_ENTRY_SIZE + KEY_QUERY_SIZE + entries.getByteSize(); } }); cache = cachebuilder.build(); expiredKeys = new ConcurrentHashMap<StaticBuffer, Long>(50,0.75f,concurrencyLevel); penaltyCountdown = new CountDownLatch(PENALTY_THRESHOLD); cleanupThread = new CleanupThread(); cleanupThread.start(); }
Example #11
Source File: GlobalCache.java From phoenix with Apache License 2.0 | 5 votes |
public Cache<ImmutableBytesPtr,PTable> getMetaDataCache() { // Lazy initialize QueryServices so that we only attempt to create an HBase Configuration // object upon the first attempt to connect to any cluster. Otherwise, an attempt will be // made at driver initialization time which is too early for some systems. Cache<ImmutableBytesPtr,PTable> result = metaDataCache; if (result == null) { synchronized(this) { result = metaDataCache; if(result == null) { long maxTTL = Math.min(config.getLong( QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS), config.getLong( QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS)); long maxSize = config.getLong(QueryServices.MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE); metaDataCache = result = CacheBuilder.newBuilder() .maximumWeight(maxSize) .expireAfterAccess(maxTTL, TimeUnit.MILLISECONDS) .weigher(new Weigher<ImmutableBytesPtr, PTable>() { @Override public int weigh(ImmutableBytesPtr key, PTable table) { return SizedUtil.IMMUTABLE_BYTES_PTR_SIZE + key.getLength() + table.getEstimatedSize(); } }) .build(); } } } return result; }
Example #12
Source File: GlobalCache.java From phoenix with Apache License 2.0 | 5 votes |
public Cache<ImmutableBytesPtr,PMetaDataEntity> getMetaDataCache() { // Lazy initialize QueryServices so that we only attempt to create an HBase Configuration // object upon the first attempt to connect to any cluster. Otherwise, an attempt will be // made at driver initialization time which is too early for some systems. Cache<ImmutableBytesPtr,PMetaDataEntity> result = metaDataCache; if (result == null) { synchronized(this) { result = metaDataCache; if(result == null) { long maxTTL = config.getLong( QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS); long maxSize = config.getLong(QueryServices.MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE); metaDataCache = result = CacheBuilder.newBuilder() .maximumWeight(maxSize) .expireAfterAccess(maxTTL, TimeUnit.MILLISECONDS) .weigher(new Weigher<ImmutableBytesPtr, PMetaDataEntity>() { @Override public int weigh(ImmutableBytesPtr key, PMetaDataEntity table) { return SizedUtil.IMMUTABLE_BYTES_PTR_SIZE + key.getLength() + table.getEstimatedSize(); } }) .build(); } } } return result; }
Example #13
Source File: GlobalEngine.java From tajo with Apache License 2.0 | 5 votes |
private QueryContext createQueryContext(Session session) { QueryContext newQueryContext = new QueryContext(context.getConf(), session); // Set default space uri and its root uri newQueryContext.setDefaultSpaceUri(TablespaceManager.getDefault().getUri()); newQueryContext.setDefaultSpaceRootUri(TablespaceManager.getDefault().getRootUri()); if (TajoConstants.IS_TEST_MODE) { newQueryContext.putAll(CommonTestingUtil.getSessionVarsForTest()); } // Set queryCache in session int queryCacheSize = context.getConf().getIntVar(TajoConf.ConfVars.QUERY_SESSION_QUERY_CACHE_SIZE); if (queryCacheSize > 0 && session.getQueryCache() == null) { Weigher<String, Expr> weighByLength = new Weigher<String, Expr>() { public int weigh(String key, Expr expr) { return key.length(); } }; LoadingCache<String, Expr> cache = CacheBuilder.newBuilder() .maximumWeight(queryCacheSize * 1024) .weigher(weighByLength) .expireAfterAccess(1, TimeUnit.HOURS) .build(new CacheLoader<String, Expr>() { public Expr load(String sql) throws SQLSyntaxError { return analyzer.parse(sql); } }); session.setQueryCache(cache); } return newQueryContext; }
Example #14
Source File: GuidePostsCacheImpl.java From phoenix with Apache License 2.0 | 5 votes |
public GuidePostsCacheImpl(PhoenixStatsCacheLoader cacheLoader, Configuration config) { Preconditions.checkNotNull(cacheLoader); // Number of millis to expire cache values after write final long statsUpdateFrequency = config.getLong( QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS); // Maximum total weight (size in bytes) of stats entries final long maxTableStatsCacheSize = config.getLong( QueryServices.STATS_MAX_CACHE_SIZE, QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE); cache = CacheBuilder.newBuilder() // Refresh entries a given amount of time after they were written .refreshAfterWrite(statsUpdateFrequency, TimeUnit.MILLISECONDS) // Maximum total weight (size in bytes) of stats entries .maximumWeight(maxTableStatsCacheSize) // Defer actual size to the PTableStats.getEstimatedSize() .weigher(new Weigher<GuidePostsKey, GuidePostsInfo>() { @Override public int weigh(GuidePostsKey key, GuidePostsInfo info) { return info.getEstimatedSize(); } }) // Log removals at TRACE for debugging .removalListener(new PhoenixStatsCacheRemovalListener()) // Automatically load the cache when entries need to be refreshed .build(cacheLoader); }
Example #15
Source File: ThirdEyeUtils.java From incubator-pinot with Apache License 2.0 | 4 votes |
public static LoadingCache<RelationalQuery, ThirdEyeResultSetGroup> buildResponseCache( CacheLoader cacheLoader) throws Exception { Preconditions.checkNotNull(cacheLoader, "A cache loader is required."); // Initializes listener that prints expired entries in debuggin mode. RemovalListener<RelationalQuery, ThirdEyeResultSet> listener; if (LOG.isDebugEnabled()) { listener = new RemovalListener<RelationalQuery, ThirdEyeResultSet>() { @Override public void onRemoval(RemovalNotification<RelationalQuery, ThirdEyeResultSet> notification) { LOG.debug("Expired {}", notification.getKey().getQuery()); } }; } else { listener = new RemovalListener<RelationalQuery, ThirdEyeResultSet>() { @Override public void onRemoval(RemovalNotification<RelationalQuery, ThirdEyeResultSet> notification) { } }; } // ResultSetGroup Cache. The size of this cache is limited by the total number of buckets in all ResultSetGroup. // We estimate that 1 bucket (including overhead) consumes 1KB and this cache is allowed to use up to 50% of max // heap space. long maxBucketNumber = getApproximateMaxBucketNumber(DEFAULT_HEAP_PERCENTAGE_FOR_RESULTSETGROUP_CACHE); LOG.debug("Max bucket number for {}'s cache is set to {}", cacheLoader.toString(), maxBucketNumber); return CacheBuilder.newBuilder() .removalListener(listener) .expireAfterWrite(15, TimeUnit.MINUTES) .maximumWeight(maxBucketNumber) .weigher(new Weigher<RelationalQuery, ThirdEyeResultSetGroup>() { @Override public int weigh(RelationalQuery relationalQuery, ThirdEyeResultSetGroup resultSetGroup) { int resultSetCount = resultSetGroup.size(); int weight = 0; for (int idx = 0; idx < resultSetCount; ++idx) { ThirdEyeResultSet resultSet = resultSetGroup.get(idx); weight += ((resultSet.getColumnCount() + resultSet.getGroupKeyLength()) * resultSet.getRowCount()); } return weight; } }) .build(cacheLoader); }
Example #16
Source File: PhoenixStatsCacheLoaderTest.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testStatsBeingAutomaticallyRefreshed() { ExecutorService executor = Executors.newFixedThreadPool(4); CountDownLatch firstTimeRefreshedSignal = new CountDownLatch(1); CountDownLatch secondTimeRefreshedSignal = new CountDownLatch(2); Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); LoadingCache<GuidePostsKey, GuidePostsInfo> cache = CacheBuilder.newBuilder() // Refresh entries a given amount of time after they were written .refreshAfterWrite(100, TimeUnit.MILLISECONDS) // Maximum total weight (size in bytes) of stats entries .maximumWeight(QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE) // Defer actual size to the PTableStats.getEstimatedSize() .weigher(new Weigher<GuidePostsKey, GuidePostsInfo>() { @Override public int weigh(GuidePostsKey key, GuidePostsInfo info) { return info.getEstimatedSize(); } }) // Log removals at TRACE for debugging .removalListener(new GuidePostsCacheImpl.PhoenixStatsCacheRemovalListener()) // Automatically load the cache when entries are missing .build(new PhoenixStatsCacheLoader(new TestStatsLoaderImpl( firstTimeRefreshedSignal, secondTimeRefreshedSignal), config)); try { GuidePostsKey guidePostsKey = new GuidePostsKey(new byte[4], new byte[4]); GuidePostsInfo guidePostsInfo = getStats(cache, guidePostsKey); assertTrue(guidePostsInfo.getMaxLength() == 1); // Note: With Guava cache, automatic refreshes are performed when the first stale request for an entry occurs. // After we sleep here for any time which is larger than the refresh cycle, the refresh of cache entry will be // triggered for its first time by the call of getStats(). This is deterministic behavior, and it won't cause // randomized test failures. sleep(150); guidePostsInfo = getStats(cache, guidePostsKey); // Refresh has been triggered for its first time, but still could get the old value assertTrue(guidePostsInfo.getMaxLength() >= 1); firstTimeRefreshedSignal.await(); sleep(150); guidePostsInfo = getStats(cache, guidePostsKey); // Now the second time refresh has been triggered by the above getStats() call, the first time Refresh has completed // and the cache entry has been updated for sure. assertTrue(guidePostsInfo.getMaxLength() >= 2); secondTimeRefreshedSignal.await(); } catch (InterruptedException e) { assertFalse(true); } }
Example #17
Source File: ElasticsearchDataModel.java From elasticsearch-taste with Apache License 2.0 | 4 votes |
public void setMaxCacheWeight(final long weight) { final Weigher<DmKey, DmValue> weigher = (key, value) -> 24 + value .getSize(); cache = CacheBuilder.newBuilder().maximumWeight(weight) .weigher(weigher).build(); }
Example #18
Source File: GuavaCacheFromContext.java From caffeine with Apache License 2.0 | 4 votes |
GuavaWeigher(com.github.benmanes.caffeine.cache.Weigher<K, V> weigher) { this.weigher = weigher; }
Example #19
Source File: StandardTitanTx.java From titan1withtp3.1 with Apache License 2.0 | 4 votes |
public StandardTitanTx(StandardTitanGraph graph, TransactionConfiguration config) { Preconditions.checkNotNull(graph); Preconditions.checkArgument(graph.isOpen()); Preconditions.checkNotNull(config); this.graph = graph; this.times = graph.getConfiguration().getTimestampProvider(); this.config = config; this.idManager = graph.getIDManager(); this.idInspector = idManager; // this.idInspector = idManager.getIdInspector(); this.attributeHandler = graph.getDataSerializer(); this.edgeSerializer = graph.getEdgeSerializer(); this.indexSerializer = graph.getIndexSerializer(); temporaryIds = new IDPool() { private final AtomicLong counter = new AtomicLong(1); @Override public long nextID() { return counter.getAndIncrement(); } @Override public void close() { //Do nothing } }; int concurrencyLevel; if (config.isSingleThreaded()) { addedRelations = new SimpleBufferAddedRelations(); concurrencyLevel = 1; newTypeCache = new HashMap<String, Long>(); newVertexIndexEntries = new SimpleIndexCache(); } else { addedRelations = new ConcurrentBufferAddedRelations(); concurrencyLevel = 1; //TODO: should we increase this? newTypeCache = new NonBlockingHashMap<String, Long>(); newVertexIndexEntries = new ConcurrentIndexCache(); } boolean preloadedData = config.hasPreloadedData(); externalVertexRetriever = new VertexConstructor(config.hasVerifyExternalVertexExistence(), preloadedData); internalVertexRetriever = new VertexConstructor(config.hasVerifyInternalVertexExistence(), preloadedData); existingVertexRetriever = new VertexConstructor(false, preloadedData); long effectiveVertexCacheSize = config.getVertexCacheSize(); if (!config.isReadOnly()) { effectiveVertexCacheSize = Math.max(MIN_VERTEX_CACHE_SIZE, effectiveVertexCacheSize); log.debug("Guava vertex cache size: requested={} effective={} (min={})", config.getVertexCacheSize(), effectiveVertexCacheSize, MIN_VERTEX_CACHE_SIZE); } vertexCache = new GuavaVertexCache(effectiveVertexCacheSize,concurrencyLevel,config.getDirtyVertexSize()); indexCache = CacheBuilder.newBuilder().weigher(new Weigher<JointIndexQuery.Subquery, List<Object>>() { @Override public int weigh(JointIndexQuery.Subquery q, List<Object> r) { return 2 + r.size(); } }).concurrencyLevel(concurrencyLevel).maximumWeight(config.getIndexCacheWeight()).build(); uniqueLocks = UNINITIALIZED_LOCKS; deletedRelations = EMPTY_DELETED_RELATIONS; this.isOpen = true; if (null != config.getGroupName()) { MetricManager.INSTANCE.getCounter(config.getGroupName(), "tx", "begin").inc(); elementProcessor = new MetricsQueryExecutor<GraphCentricQuery, TitanElement, JointIndexQuery>(config.getGroupName(), "graph", elementProcessorImpl); edgeProcessor = new MetricsQueryExecutor<VertexCentricQuery, TitanRelation, SliceQuery>(config.getGroupName(), "vertex", edgeProcessorImpl); } else { elementProcessor = elementProcessorImpl; edgeProcessor = edgeProcessorImpl; } }
Example #20
Source File: MessageCacheBuilder.java From hermes with Apache License 2.0 | 4 votes |
private LoadingCache<Pair<String, Integer>, PageCache<T>> buildCache() { return CacheBuilder.newBuilder()// .concurrencyLevel(m_builder.m_concurrencyLevel)// .maximumWeight(m_builder.m_maximumMessageCapacity)// .removalListener(new RemovalListener<Pair<String, Integer>, PageCache<T>>() { @Override public void onRemoval(RemovalNotification<Pair<String, Integer>, PageCache<T>> notification) { if (m_shrinking.compareAndSet(false, true)) { m_shrinkExecutor.submit(new Runnable() { @Override public void run() { Transaction tx = Cat.newTransaction("Message.Broker.Cache.Shrink", m_name); try { int shrinkCount = m_shrinkStrategy.shrink(m_tpPageCaches.asMap().values()); tx.addData("count", shrinkCount); tx.setStatus(Transaction.SUCCESS); } catch (Exception e) { Cat.logError(e); tx.setStatus(e); } finally { m_shrinking.set(false); tx.complete(); } } }); } } })// .weigher(new Weigher<Pair<String, Integer>, PageCache<T>>() { @Override public int weigh(Pair<String, Integer> key, PageCache<T> pageCache) { return pageCache.pageCount() * pageCache.pageSize(); } })// .build(new CacheLoader<Pair<String, Integer>, PageCache<T>>() { @Override public PageCache<T> load(Pair<String, Integer> tp) throws Exception { return buildPageCache(tp); } }); }