Java Code Examples for org.apache.bookkeeper.stats.StatsLogger#getCounter()
The following examples show how to use
org.apache.bookkeeper.stats.StatsLogger#getCounter() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BookKeeperDataStorageManager.java From herddb with Apache License 2.0 | 6 votes |
public BookKeeperDataStorageManager( String nodeId, Path tmpDirectory, int swapThreshold, ZookeeperMetadataStorageManager zk, BookkeeperCommitLogManager bk, StatsLogger logger ) { this.nodeId = nodeId; this.tmpDirectory = tmpDirectory; this.swapThreshold = swapThreshold; StatsLogger scope = logger.scope("bkdatastore"); this.dataPageReads = scope.getOpStatsLogger("data_pagereads"); this.dataPageWrites = scope.getOpStatsLogger("data_pagewrites"); this.indexPageReads = scope.getOpStatsLogger("index_pagereads"); this.indexPageWrites = scope.getOpStatsLogger("index_pagewrites"); this.zkReads = scope.getCounter("zkReads"); this.zkWrites = scope.getCounter("zkWrites"); this.zkGetChildren = scope.getCounter("zkGetChildren"); this.zk = zk; this.bk = bk; this.rootZkNode = zk.getBasePath() + "/data"; this.baseZkNode = rootZkNode + "/" + nodeId; }
Example 2
Source File: SimplePermitLimiter.java From distributedlog with Apache License 2.0 | 6 votes |
public SimplePermitLimiter(boolean darkmode, int permitsMax, StatsLogger statsLogger, boolean singleton, Feature disableWriteLimitFeature) { this.permits = new AtomicInteger(0); this.permitsMax = permitsMax; this.darkmode = darkmode; this.disableWriteLimitFeature = disableWriteLimitFeature; // stats if (singleton) { statsLogger.registerGauge("num_permits", new Gauge<Number>() { @Override public Number getDefaultValue() { return 0; } @Override public Number getSample() { return permits.get(); } }); } acquireFailureCounter = statsLogger.getCounter("acquireFailure"); permitsMetric = statsLogger.getOpStatsLogger("permits"); }
Example 3
Source File: SimplePermitLimiter.java From distributedlog with Apache License 2.0 | 6 votes |
public SimplePermitLimiter(boolean darkmode, int permitsMax, StatsLogger statsLogger, boolean singleton, Feature disableWriteLimitFeature) { this.permits = new AtomicInteger(0); this.permitsMax = permitsMax; this.darkmode = darkmode; this.disableWriteLimitFeature = disableWriteLimitFeature; // stats if (singleton) { this.statsLogger = statsLogger; this.permitsGauge = new Gauge<Number>() { @Override public Number getDefaultValue() { return 0; } @Override public Number getSample() { return permits.get(); } }; this.permitsGaugeLabel = "permits"; statsLogger.registerGauge(permitsGaugeLabel, permitsGauge); } acquireFailureCounter = statsLogger.getCounter("acquireFailure"); permitsMetric = statsLogger.getOpStatsLogger("permits"); }
Example 4
Source File: BKSyncLogReader.java From distributedlog with Apache License 2.0 | 6 votes |
BKSyncLogReader(DistributedLogConfiguration conf, BKDistributedLogManager bkdlm, DLSN startDLSN, Optional<Long> startTransactionId, StatsLogger statsLogger) throws IOException { this.bkdlm = bkdlm; this.readHandler = bkdlm.createReadHandler( Optional.<String>absent(), this, true); this.maxReadAheadWaitTime = conf.getReadAheadWaitTime(); this.idleErrorThresholdMillis = conf.getReaderIdleErrorThresholdMillis(); this.shouldCheckIdleReader = idleErrorThresholdMillis > 0 && idleErrorThresholdMillis < Integer.MAX_VALUE; this.startTransactionId = startTransactionId; // start readahead startReadAhead(startDLSN); if (!startTransactionId.isPresent()) { positioned = true; } // Stats StatsLogger syncReaderStatsLogger = statsLogger.scope("sync_reader"); idleReaderError = syncReaderStatsLogger.getCounter("idle_reader_error"); }
Example 5
Source File: StreamAcquireLimiter.java From distributedlog with Apache License 2.0 | 5 votes |
public StreamAcquireLimiter(StreamManager streamManager, MovingAverageRate serviceRps, double serviceRpsLimit, StatsLogger statsLogger) { this.streamManager = streamManager; this.serviceRps = serviceRps; this.serviceRpsLimit = serviceRpsLimit; this.overlimitCounter = statsLogger.getCounter("overlimit"); }
Example 6
Source File: StreamAcquireLimiter.java From distributedlog with Apache License 2.0 | 5 votes |
public StreamAcquireLimiter(StreamManager streamManager, MovingAverageRate serviceRps, double serviceRpsLimit, StatsLogger statsLogger) { this.streamManager = streamManager; this.serviceRps = serviceRps; this.serviceRpsLimit = serviceRpsLimit; this.overlimitCounter = statsLogger.getCounter("overlimit"); }
Example 7
Source File: BKAsyncLogReader.java From distributedlog with Apache License 2.0 | 5 votes |
BKAsyncLogReader(BKDistributedLogManager bkdlm, OrderedScheduler scheduler, DLSN startDLSN, Optional<String> subscriberId, boolean returnEndOfStreamRecord, StatsLogger statsLogger) { this.streamName = bkdlm.getStreamName(); this.bkDistributedLogManager = bkdlm; this.scheduler = scheduler; this.readHandler = bkDistributedLogManager.createReadHandler(subscriberId, this, true); LOG.debug("Starting async reader at {}", startDLSN); this.startDLSN = startDLSN; this.scheduleDelayStopwatch = Stopwatch.createUnstarted(); this.readNextDelayStopwatch = Stopwatch.createStarted(); this.positionGapDetectionEnabled = bkdlm.getConf().getPositionGapDetectionEnabled(); this.idleErrorThresholdMillis = bkdlm.getConf().getReaderIdleErrorThresholdMillis(); this.returnEndOfStreamRecord = returnEndOfStreamRecord; // Stats StatsLogger asyncReaderStatsLogger = statsLogger.scope("async_reader"); futureSetLatency = asyncReaderStatsLogger.getOpStatsLogger("future_set"); scheduleLatency = asyncReaderStatsLogger.getOpStatsLogger("schedule"); backgroundReaderRunTime = asyncReaderStatsLogger.getOpStatsLogger("background_read"); readNextExecTime = asyncReaderStatsLogger.getOpStatsLogger("read_next_exec"); timeBetweenReadNexts = asyncReaderStatsLogger.getOpStatsLogger("time_between_read_next"); delayUntilPromiseSatisfied = asyncReaderStatsLogger.getOpStatsLogger("delay_until_promise_satisfied"); idleReaderError = asyncReaderStatsLogger.getCounter("idle_reader_error"); idleReaderCheckCount = asyncReaderStatsLogger.getCounter("idle_reader_check_total"); idleReaderCheckIdleReadRequestCount = asyncReaderStatsLogger.getCounter("idle_reader_check_idle_read_requests"); idleReaderCheckIdleReadAheadCount = asyncReaderStatsLogger.getCounter("idle_reader_check_idle_readahead"); // Lock the stream if requested. The lock will be released when the reader is closed. this.lockStream = false; this.idleReaderTimeoutTask = scheduleIdleReaderTaskIfNecessary(); this.lastProcessTime = Stopwatch.createStarted(); }
Example 8
Source File: BKLogSegmentEntryReader.java From distributedlog with Apache License 2.0 | 5 votes |
BKLogSegmentEntryReader(LogSegmentMetadata metadata, LedgerHandle lh, long startEntryId, BookKeeper bk, OrderedScheduler scheduler, DistributedLogConfiguration conf, StatsLogger statsLogger, AsyncFailureInjector failureInjector) { this.metadata = metadata; this.lssn = metadata.getLogSegmentSequenceNumber(); this.startSequenceId = metadata.getStartSequenceId(); this.envelopeEntries = metadata.getEnvelopeEntries(); this.deserializeRecordSet = conf.getDeserializeRecordSetOnReads(); this.lh = lh; this.nextEntryId = Math.max(startEntryId, 0); this.bk = bk; this.conf = conf; this.numPrefetchEntries = conf.getNumPrefetchEntriesPerLogSegment(); this.maxPrefetchEntries = conf.getMaxPrefetchEntriesPerLogSegment(); this.scheduler = scheduler; this.openLedgerHandles = Lists.newArrayList(); this.openLedgerHandles.add(lh); this.outstandingLongPoll = null; // create the readahead queue this.readAheadEntries = new LinkedBlockingQueue<CacheEntry>(); // create the read request queue this.readQueue = new LinkedList<PendingReadRequest>(); // read backoff settings this.readAheadWaitTime = conf.getReadAheadWaitTime(); this.maxReadBackoffTime = 4 * conf.getReadAheadWaitTime(); // other read settings this.skipBrokenEntries = conf.getReadAheadSkipBrokenEntries(); // Failure Injection this.failureInjector = failureInjector; // Stats this.skippedBrokenEntriesCounter = statsLogger.getCounter("skipped_broken_entries"); }
Example 9
Source File: ComposableRequestLimiter.java From distributedlog with Apache License 2.0 | 5 votes |
public ComposableRequestLimiter( RateLimiter limiter, OverlimitFunction<RequestT> overlimitFunction, CostFunction<RequestT> costFunction, StatsLogger statsLogger) { checkNotNull(limiter); checkNotNull(overlimitFunction); checkNotNull(costFunction); this.limiter = limiter; this.overlimitFunction = overlimitFunction; this.costFunction = costFunction; this.overlimitCounter = statsLogger.getCounter("overlimit"); }
Example 10
Source File: LocalWorker.java From openmessaging-benchmark with Apache License 2.0 | 5 votes |
public LocalWorker(StatsLogger statsLogger) { this.statsLogger = statsLogger; StatsLogger producerStatsLogger = statsLogger.scope("producer"); this.messagesSentCounter = producerStatsLogger.getCounter("messages_sent"); this.bytesSentCounter = producerStatsLogger.getCounter("bytes_sent"); this.publishLatencyStats = producerStatsLogger.getOpStatsLogger("produce_latency"); StatsLogger consumerStatsLogger = statsLogger.scope("consumer"); this.messagesReceivedCounter = consumerStatsLogger.getCounter("messages_recv"); this.bytesReceivedCounter = consumerStatsLogger.getCounter("bytes_recv"); this.endToEndLatencyStats = consumerStatsLogger.getOpStatsLogger("e2e_latency"); }
Example 11
Source File: MonitoredFuturePool.java From distributedlog with Apache License 2.0 | 5 votes |
/** * Create a future pool with stats exposed. * * @param futurePool underlying future pool to execute futures * @param statsLogger stats logger to receive exposed stats * @param traceTaskExecution flag to enable/disable exposing stats about task execution * @param traceTaskExecutionWarnTimeUs flag to enable/disable logging slow tasks * whose execution time is above this value */ public MonitoredFuturePool(FuturePool futurePool, StatsLogger statsLogger, boolean traceTaskExecution, long traceTaskExecutionWarnTimeUs) { this.futurePool = futurePool; this.traceTaskExecution = traceTaskExecution; this.traceTaskExecutionWarnTimeUs = traceTaskExecutionWarnTimeUs; this.statsLogger = statsLogger; this.taskPendingTime = statsLogger.getOpStatsLogger("task_pending_time"); this.taskExecutionTime = statsLogger.getOpStatsLogger("task_execution_time"); this.taskEnqueueTime = statsLogger.getOpStatsLogger("task_enqueue_time"); this.taskPendingCounter = statsLogger.getCounter("tasks_pending"); }
Example 12
Source File: ComposableRequestLimiter.java From distributedlog with Apache License 2.0 | 5 votes |
public ComposableRequestLimiter( RateLimiter limiter, OverlimitFunction<Request> overlimitFunction, CostFunction<Request> costFunction, StatsLogger statsLogger) { Preconditions.checkNotNull(limiter); Preconditions.checkNotNull(overlimitFunction); Preconditions.checkNotNull(costFunction); this.limiter = limiter; this.overlimitFunction = overlimitFunction; this.costFunction = costFunction; this.overlimitCounter = statsLogger.getCounter("overlimit"); }
Example 13
Source File: StreamImpl.java From distributedlog with Apache License 2.0 | 4 votes |
StreamImpl(final String name, final Partition partition, String clientId, StreamManager streamManager, StreamOpStats streamOpStats, ServerConfiguration serverConfig, DistributedLogConfiguration dlConfig, DynamicDistributedLogConfiguration streamConf, FeatureProvider featureProvider, StreamConfigProvider streamConfigProvider, Namespace dlNamespace, OrderedScheduler scheduler, FatalErrorHandler fatalErrorHandler, HashedWheelTimer requestTimer, Timer futureTimer) { this.clientId = clientId; this.dlConfig = dlConfig; this.streamManager = streamManager; this.name = name; this.partition = partition; this.status = StreamStatus.UNINITIALIZED; this.lastException = new IOException("Fail to write record to stream " + name); this.streamConfigProvider = streamConfigProvider; this.dlNamespace = dlNamespace; this.featureRateLimitDisabled = featureProvider.getFeature( ServerFeatureKeys.SERVICE_RATE_LIMIT_DISABLED.name().toLowerCase()); this.scheduler = scheduler; this.serviceTimeoutMs = serverConfig.getServiceTimeoutMs(); this.streamProbationTimeoutMs = serverConfig.getStreamProbationTimeoutMs(); this.writerCloseTimeoutMs = serverConfig.getWriterCloseTimeoutMs(); this.failFastOnStreamNotReady = dlConfig.getFailFastOnStreamNotReady(); this.fatalErrorHandler = fatalErrorHandler; this.dynConf = streamConf; StatsLogger limiterStatsLogger = BroadCastStatsLogger.two( streamOpStats.baseScope("stream_limiter"), streamOpStats.streamRequestScope(partition, "limiter")); this.limiter = new StreamRequestLimiter(name, dynConf, limiterStatsLogger, featureRateLimitDisabled); this.requestTimer = requestTimer; this.futureTimer = futureTimer; // Stats this.streamLogger = streamOpStats.streamRequestStatsLogger(partition); this.limiterStatLogger = streamOpStats.baseScope("request_limiter"); this.streamExceptionStatLogger = streamLogger.scope("exceptions"); this.serviceTimeout = streamOpStats.baseCounter("serviceTimeout"); StatsLogger streamsStatsLogger = streamOpStats.baseScope("streams"); this.streamAcquireStat = streamsStatsLogger.getOpStatsLogger("acquire"); this.pendingOpsCounter = streamOpStats.baseCounter("pending_ops"); this.unexpectedExceptions = streamOpStats.baseCounter("unexpected_exceptions"); this.exceptionStatLogger = streamOpStats.requestScope("exceptions"); this.writerCloseStatLogger = streamsStatsLogger.getOpStatsLogger("writer_close"); this.writerCloseTimeoutCounter = streamsStatsLogger.getCounter("writer_close_timeouts"); // Gauges this.streamStatusGauge = new Gauge<Number>() { @Override public Number getDefaultValue() { return StreamStatus.UNINITIALIZED.getCode(); } @Override public Number getSample() { return status.getCode(); } }; }
Example 14
Source File: TableManager.java From herddb with Apache License 2.0 | 4 votes |
TableManager( Table table, CommitLog log, MemoryManager memoryManager, DataStorageManager dataStorageManager, TableSpaceManager tableSpaceManager, String tableSpaceUUID, long createdInTransaction ) throws DataStorageManagerException { this.stats = new TableManagerStatsImpl(); this.log = log; this.table = table; this.tableSpaceManager = tableSpaceManager; this.dataStorageManager = dataStorageManager; this.createdInTransaction = createdInTransaction; this.tableSpaceUUID = tableSpaceUUID; this.tableContext = buildTableContext(); this.maxLogicalPageSize = memoryManager.getMaxLogicalPageSize(); this.keyToPage = dataStorageManager.createKeyToPageMap(tableSpaceUUID, table.uuid, memoryManager); this.pageReplacementPolicy = memoryManager.getDataPageReplacementPolicy(); this.pages = new ConcurrentHashMap<>(); this.newPages = new ConcurrentHashMap<>(); this.dirtyThreshold = tableSpaceManager.getDbmanager().getServerConfiguration().getDouble( ServerConfiguration.PROPERTY_DIRTY_PAGE_THRESHOLD, ServerConfiguration.PROPERTY_DIRTY_PAGE_THRESHOLD_DEFAULT); this.fillThreshold = tableSpaceManager.getDbmanager().getServerConfiguration().getDouble( ServerConfiguration.PROPERTY_FILL_PAGE_THRESHOLD, ServerConfiguration.PROPERTY_FILL_PAGE_THRESHOLD_DEFAULT); long checkpointTargetTime = tableSpaceManager.getDbmanager().getServerConfiguration().getLong( ServerConfiguration.PROPERTY_CHECKPOINT_DURATION, ServerConfiguration.PROPERTY_CHECKPOINT_DURATION_DEFAULT); this.checkpointTargetTime = checkpointTargetTime < 0 ? Long.MAX_VALUE : checkpointTargetTime; long cleanupTargetTime = tableSpaceManager.getDbmanager().getServerConfiguration().getLong( ServerConfiguration.PROPERTY_CLEANUP_DURATION, ServerConfiguration.PROPERTY_CLEANUP_DURATION_DEFAULT); this.cleanupTargetTime = cleanupTargetTime < 0 ? Long.MAX_VALUE : cleanupTargetTime; long compactionTargetTime = tableSpaceManager.getDbmanager().getServerConfiguration().getLong( ServerConfiguration.PROPERTY_COMPACTION_DURATION, ServerConfiguration.PROPERTY_COMPACTION_DURATION_DEFAULT); this.compactionTargetTime = compactionTargetTime < 0 ? Long.MAX_VALUE : compactionTargetTime; StatsLogger tableMetrics = tableSpaceManager.tablespaceStasLogger.scope("table_" + table.name); this.checkpointProcessedDirtyRecords = tableMetrics.getCounter("checkpoint_processed_dirty_records"); int[] pkTypes = new int[table.primaryKey.length]; for (int i = 0; i < table.primaryKey.length; i++) { Column col = table.getColumn(table.primaryKey[i]); pkTypes[i] = col.type; } this.keyToPageSortedAscending = keyToPage.isSortedAscending(pkTypes); boolean nolocks = tableSpaceManager.getDbmanager().getServerConfiguration().getBoolean( ServerConfiguration.PROPERTY_TABLEMANAGER_DISABLE_ROWLEVELLOCKS, ServerConfiguration.PROPERTY_TABLEMANAGER_DISABLE_ROWLEVELLOCKS_DEFAULT ); if (nolocks) { locksManager = new NullLockManager(); } else { int writeLockTimeout = tableSpaceManager.getDbmanager().getServerConfiguration().getInt( ServerConfiguration.PROPERTY_WRITELOCK_TIMEOUT, ServerConfiguration.PROPERTY_WRITELOCK_TIMEOUT_DEFAULT ); int readLockTimeout = tableSpaceManager.getDbmanager().getServerConfiguration().getInt( ServerConfiguration.PROPERTY_READLOCK_TIMEOUT, ServerConfiguration.PROPERTY_READLOCK_TIMEOUT_DEFAULT ); LocalLockManager newLocksManager = new LocalLockManager(); newLocksManager.setWriteLockTimeout(writeLockTimeout); newLocksManager.setReadLockTimeout(readLockTimeout); locksManager = newLocksManager; } }
Example 15
Source File: ZKSessionLock.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Creates a distributed lock using the given {@code zkClient} to coordinate locking. * * @param zkClient The ZooKeeper client to use. * @param lockPath The path used to manage the lock under. * @param clientId client id use for lock. * @param lockStateExecutor executor to execute all lock state changes. * @param lockOpTimeout timeout of lock operations * @param statsLogger stats logger */ public ZKSessionLock(ZooKeeperClient zkClient, String lockPath, String clientId, OrderedScheduler lockStateExecutor, long lockOpTimeout, StatsLogger statsLogger, DistributedLockContext lockContext) throws IOException { this.zkClient = zkClient; try { this.zk = zkClient.get(); } catch (ZooKeeperClient.ZooKeeperConnectionException zce) { throw new ZKException("Failed to get zookeeper client for lock " + lockPath, KeeperException.Code.CONNECTIONLOSS); } catch (InterruptedException e) { throw new DLInterruptedException("Interrupted on getting zookeeper client for lock " + lockPath, e); } this.lockPath = lockPath; this.lockId = Pair.of(clientId, this.zk.getSessionId()); this.lockContext = lockContext; this.lockStateExecutor = lockStateExecutor; this.lockState = new StateManagement(); this.lockOpTimeout = lockOpTimeout; this.tryStats = statsLogger.getOpStatsLogger("tryAcquire"); this.tryTimeouts = statsLogger.getCounter("tryTimeouts"); this.unlockStats = statsLogger.getOpStatsLogger("unlock"); // Attach interrupt handler to acquire future so clients can abort the future. this.acquireFuture = FutureUtils.createFuture(); this.acquireFuture.whenComplete((value, cause) -> { if (null != cause) { // This will set the lock state to closed, and begin to cleanup the zk lock node. // We have to be careful not to block here since doing so blocks the ordered lock // state executor which can cause deadlocks depending on how futures are chained. ZKSessionLock.this.asyncUnlock(cause); // Note re. logging and exceptions: errors are already logged by unlockAsync. } }); }
Example 16
Source File: FileCommitLog.java From herddb with Apache License 2.0 | 4 votes |
public FileCommitLog( Path logDirectory, String tableSpaceName, long maxLogFileSize, ExecutorService fsyncThreadPool, StatsLogger statslogger, Consumer<FileCommitLog> onClose, int maxUnsynchedBatchSize, int maxUnsynchedBatchBytes, int maxSyncTime, boolean requireSync, boolean enableO_DIRECT ) { this.maxUnsyncedBatchSize = maxUnsynchedBatchSize; this.maxUnsyncedBatchBytes = maxUnsynchedBatchBytes; this.maxSyncTime = TimeUnit.MILLISECONDS.toNanos(maxSyncTime); this.requireSync = requireSync; this.enableO_DIRECT = enableO_DIRECT && OpenFileUtils.isO_DIRECT_Supported(); this.onClose = onClose; this.maxLogFileSize = maxLogFileSize; this.tableSpaceName = tableSpaceName; this.logDirectory = logDirectory.toAbsolutePath(); this.spool = new Thread(new SpoolTask(), "commitlog-" + tableSpaceName); this.spool.setDaemon(true); this.statsFsyncTime = statslogger.getOpStatsLogger("fsync"); this.statsEntryLatency = statslogger.getOpStatsLogger("entryLatency"); this.statsEntrySyncLatency = statslogger.getOpStatsLogger("entrySyncLatency"); this.syncSize = statslogger.getOpStatsLogger("syncBatchSize"); this.syncBytes = statslogger.getOpStatsLogger("syncBatchBytes"); this.deferredSyncs = statslogger.getCounter("deferredSyncs"); this.newfiles = statslogger.getCounter("newfiles"); statslogger.registerGauge("queuesize", new Gauge<Integer>() { @Override public Integer getDefaultValue() { return 0; } @Override public Integer getSample() { return queueSize.get(); } }); statslogger.registerGauge("pendingentries", new Gauge<Integer>() { @Override public Integer getDefaultValue() { return 0; } @Override public Integer getSample() { return pendingEntries.get(); } }); this.fsyncThreadPool = fsyncThreadPool; LOGGER.log(Level.FINE, "tablespace {2}, logdirectory: {0}, maxLogFileSize {1} bytes", new Object[]{logDirectory, maxLogFileSize, tableSpaceName}); }
Example 17
Source File: StatsFilter.java From distributedlog with Apache License 2.0 | 4 votes |
public StatsFilter(StatsLogger stats) { this.stats = stats; this.outstandingAsync = stats.getCounter("outstandingAsync"); this.serviceExec = stats.getOpStatsLogger("serviceExec"); }
Example 18
Source File: ReadAheadWorker.java From distributedlog with Apache License 2.0 | 4 votes |
public ReadAheadWorker(DistributedLogConfiguration conf, DynamicDistributedLogConfiguration dynConf, ZKLogMetadataForReader logMetadata, BKLogHandler ledgerManager, ZooKeeperClient zkc, OrderedScheduler scheduler, LedgerHandleCache handleCache, LedgerReadPosition startPosition, ReadAheadCache readAheadCache, boolean isHandleForReading, ReadAheadExceptionsLogger readAheadExceptionsLogger, StatsLogger handlerStatsLogger, StatsLogger readAheadPerStreamStatsLogger, AlertStatsLogger alertStatsLogger, AsyncFailureInjector failureInjector, AsyncNotification notification) { // Log information this.fullyQualifiedName = logMetadata.getFullyQualifiedName(); this.conf = conf; this.dynConf = dynConf; this.logMetadata = logMetadata; this.bkLedgerManager = ledgerManager; this.isHandleForReading = isHandleForReading; this.notification = notification; // Resources this.zkc = zkc; this.scheduler = scheduler; this.handleCache = handleCache; this.readAheadCache = readAheadCache; // Readahead status this.startReadPosition = new LedgerReadPosition(startPosition); this.nextReadAheadPosition = new LedgerReadPosition(startPosition); // LogSegments this.getLedgersWatcher = this.zkc.getWatcherManager() .registerChildWatcher(logMetadata.getLogSegmentsPath(), this); // Failure Detection this.failureInjector = failureInjector; // Tracing this.metadataLatencyWarnThresholdMillis = conf.getMetadataLatencyWarnThresholdMillis(); this.noLedgerExceptionOnReadLACThreshold = conf.getReadAheadNoSuchLedgerExceptionOnReadLACErrorThresholdMillis() / conf.getReadAheadWaitTime(); this.tracker = new ReadAheadTracker(logMetadata.getLogName(), readAheadCache, ReadAheadPhase.SCHEDULE_READAHEAD, readAheadPerStreamStatsLogger); this.resumeStopWatch = Stopwatch.createUnstarted(); // Misc this.readAheadSkipBrokenEntries = conf.getReadAheadSkipBrokenEntries(); // Stats this.alertStatsLogger = alertStatsLogger; this.readAheadPerStreamStatsLogger = readAheadPerStreamStatsLogger; StatsLogger readAheadStatsLogger = handlerStatsLogger.scope("readahead_worker"); readAheadWorkerWaits = readAheadStatsLogger.getCounter("wait"); readAheadEntryPiggyBackHits = readAheadStatsLogger.getCounter("entry_piggy_back_hits"); readAheadEntryPiggyBackMisses = readAheadStatsLogger.getCounter("entry_piggy_back_misses"); readAheadReadEntriesStat = readAheadStatsLogger.getOpStatsLogger("read_entries"); readAheadReadLACAndEntryCounter = readAheadStatsLogger.getCounter("read_lac_and_entry_counter"); readAheadCacheFullCounter = readAheadStatsLogger.getCounter("cache_full"); readAheadSkippedBrokenEntries = readAheadStatsLogger.getCounter("skipped_broken_entries"); readAheadCacheResumeStat = readAheadStatsLogger.getOpStatsLogger("resume"); readAheadLacLagStats = readAheadStatsLogger.getOpStatsLogger("read_lac_lag"); longPollInterruptionStat = readAheadStatsLogger.getOpStatsLogger("long_poll_interruption"); notificationExecutionStat = readAheadStatsLogger.getOpStatsLogger("notification_execution"); metadataReinitializationStat = readAheadStatsLogger.getOpStatsLogger("metadata_reinitialization"); idleReaderWarn = readAheadStatsLogger.getCounter("idle_reader_warn"); this.readAheadExceptionsLogger = readAheadExceptionsLogger; }
Example 19
Source File: BKAsyncLogReaderDLSN.java From distributedlog with Apache License 2.0 | 4 votes |
BKAsyncLogReaderDLSN(BKDistributedLogManager bkdlm, ScheduledExecutorService executorService, OrderedScheduler lockStateExecutor, DLSN startDLSN, Optional<String> subscriberId, boolean returnEndOfStreamRecord, boolean deserializeRecordSet, StatsLogger statsLogger) { this.bkDistributedLogManager = bkdlm; this.executorService = executorService; this.bkLedgerManager = bkDistributedLogManager.createReadHandler(subscriberId, lockStateExecutor, this, deserializeRecordSet, true); sessionExpireWatcher = this.bkLedgerManager.registerExpirationHandler(this); LOG.debug("Starting async reader at {}", startDLSN); this.startDLSN = startDLSN; this.scheduleDelayStopwatch = Stopwatch.createUnstarted(); this.readNextDelayStopwatch = Stopwatch.createStarted(); this.positionGapDetectionEnabled = bkdlm.getConf().getPositionGapDetectionEnabled(); this.idleErrorThresholdMillis = bkdlm.getConf().getReaderIdleErrorThresholdMillis(); this.returnEndOfStreamRecord = returnEndOfStreamRecord; // Failure Injection this.failureInjector = AsyncRandomFailureInjector.newBuilder() .injectDelays(bkdlm.getConf().getEIInjectReadAheadDelay(), bkdlm.getConf().getEIInjectReadAheadDelayPercent(), bkdlm.getConf().getEIInjectMaxReadAheadDelayMs()) .injectErrors(false, 10) .injectStops(bkdlm.getConf().getEIInjectReadAheadStall(), 10) .injectCorruption(bkdlm.getConf().getEIInjectReadAheadBrokenEntries()) .build(); // Stats StatsLogger asyncReaderStatsLogger = statsLogger.scope("async_reader"); futureSetLatency = asyncReaderStatsLogger.getOpStatsLogger("future_set"); scheduleLatency = asyncReaderStatsLogger.getOpStatsLogger("schedule"); backgroundReaderRunTime = asyncReaderStatsLogger.getOpStatsLogger("background_read"); readNextExecTime = asyncReaderStatsLogger.getOpStatsLogger("read_next_exec"); timeBetweenReadNexts = asyncReaderStatsLogger.getOpStatsLogger("time_between_read_next"); delayUntilPromiseSatisfied = asyncReaderStatsLogger.getOpStatsLogger("delay_until_promise_satisfied"); idleReaderError = asyncReaderStatsLogger.getCounter("idle_reader_error"); idleReaderCheckCount = asyncReaderStatsLogger.getCounter("idle_reader_check_total"); idleReaderCheckIdleReadRequestCount = asyncReaderStatsLogger.getCounter("idle_reader_check_idle_read_requests"); idleReaderCheckIdleReadAheadCount = asyncReaderStatsLogger.getCounter("idle_reader_check_idle_readahead"); // Lock the stream if requested. The lock will be released when the reader is closed. this.lockStream = false; this.idleReaderTimeoutTask = scheduleIdleReaderTaskIfNecessary(); }
Example 20
Source File: ZKSessionLock.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Creates a distributed lock using the given {@code zkClient} to coordinate locking. * * @param zkClient The ZooKeeper client to use. * @param lockPath The path used to manage the lock under. * @param clientId client id use for lock. * @param lockStateExecutor executor to execute all lock state changes. * @param lockOpTimeout timeout of lock operations * @param statsLogger stats logger */ public ZKSessionLock(ZooKeeperClient zkClient, String lockPath, String clientId, OrderedScheduler lockStateExecutor, long lockOpTimeout, StatsLogger statsLogger, DistributedLockContext lockContext) throws IOException { this.zkClient = zkClient; try { this.zk = zkClient.get(); } catch (ZooKeeperClient.ZooKeeperConnectionException zce) { throw new ZKException("Failed to get zookeeper client for lock " + lockPath, KeeperException.Code.CONNECTIONLOSS); } catch (InterruptedException e) { throw new DLInterruptedException("Interrupted on getting zookeeper client for lock " + lockPath, e); } this.lockPath = lockPath; this.lockId = Pair.of(clientId, this.zk.getSessionId()); this.lockContext = lockContext; this.lockStateExecutor = lockStateExecutor; this.lockState = new StateManagement(); this.lockOpTimeout = lockOpTimeout; this.tryStats = statsLogger.getOpStatsLogger("tryAcquire"); this.tryTimeouts = statsLogger.getCounter("tryTimeouts"); this.unlockStats = statsLogger.getOpStatsLogger("unlock"); // Attach interrupt handler to acquire future so clients can abort the future. this.acquireFuture = new Promise<Boolean>(new com.twitter.util.Function<Throwable, BoxedUnit>() { @Override public BoxedUnit apply(Throwable t) { // This will set the lock state to closed, and begin to cleanup the zk lock node. // We have to be careful not to block here since doing so blocks the ordered lock // state executor which can cause deadlocks depending on how futures are chained. ZKSessionLock.this.asyncUnlock(t); // Note re. logging and exceptions: errors are already logged by unlockAsync. return BoxedUnit.UNIT; } }); }