Java Code Examples for java.util.concurrent.Executors#newScheduledThreadPool()
The following examples show how to use
java.util.concurrent.Executors#newScheduledThreadPool() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DefaultAudioPlayerManager.java From lavaplayer with Apache License 2.0 | 6 votes |
/** * Create a new instance */ public DefaultAudioPlayerManager() { sourceManagers = new ArrayList<>(); // Executors trackPlaybackExecutorService = new ThreadPoolExecutor(1, Integer.MAX_VALUE, 10, TimeUnit.SECONDS, new SynchronousQueue<>(), new DaemonThreadFactory("playback")); trackInfoExecutorService = ExecutorTools.createEagerlyScalingExecutor(1, DEFAULT_LOADER_POOL_SIZE, TimeUnit.SECONDS.toMillis(30), LOADER_QUEUE_CAPACITY, new DaemonThreadFactory("info-loader")); scheduledExecutorService = Executors.newScheduledThreadPool(1, new DaemonThreadFactory("manager")); orderedInfoExecutor = new OrderedExecutor(trackInfoExecutorService); // Configuration trackStuckThreshold = TimeUnit.MILLISECONDS.toNanos(10000); configuration = new AudioConfiguration(); cleanupThreshold = new AtomicLong(DEFAULT_CLEANUP_THRESHOLD); frameBufferDuration = DEFAULT_FRAME_BUFFER_DURATION; useSeekGhosting = true; // Additional services remoteNodeManager = new RemoteNodeManager(this); garbageCollectionMonitor = new GarbageCollectionMonitor(scheduledExecutorService); lifecycleManager = new AudioPlayerLifecycleManager(scheduledExecutorService, cleanupThreshold); lifecycleManager.initialise(); }
Example 2
Source File: SwitcherView.java From SwitcherView with GNU General Public License v3.0 | 6 votes |
private void startTimer() { if (mScheduledExecutorService == null) { //定时任务 mScheduledExecutorService = Executors.newScheduledThreadPool(2); // 循环任务,按照上一次任务的发起时间计算下一次任务的开始时间 mScheduledExecutorService.scheduleAtFixedRate(new Runnable() { @Override public void run() { handler.post(new Runnable() { @Override public void run() { updateTextSwitcher(flag); } }); } }, 0, timePeriod, TimeUnit.MILLISECONDS); } }
Example 3
Source File: MongoSink.java From pulsar with Apache License 2.0 | 6 votes |
@Override public void open(Map<String, Object> config, SinkContext sinkContext) throws Exception { log.info("Open MongoDB Sink"); mongoConfig = MongoConfig.load(config); mongoConfig.validate(true, true); if (clientProvider != null) { mongoClient = clientProvider.get(); } else { mongoClient = MongoClients.create(mongoConfig.getMongoUri()); } final MongoDatabase db = mongoClient.getDatabase(mongoConfig.getDatabase()); collection = db.getCollection(mongoConfig.getCollection()); incomingList = Lists.newArrayList(); flushExecutor = Executors.newScheduledThreadPool(1); flushExecutor.scheduleAtFixedRate(() -> flush(), mongoConfig.getBatchTimeMs(), mongoConfig.getBatchTimeMs(), TimeUnit.MILLISECONDS); }
Example 4
Source File: LuceneEventIndex.java From localization_nifi with Apache License 2.0 | 6 votes |
@Override public void initialize(final EventStore eventStore) { this.eventStore = eventStore; directoryManager.initialize(); maintenanceExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Provenance Repository Maintenance")); maintenanceExecutor.scheduleWithFixedDelay(() -> performMaintenance(), 1, 1, TimeUnit.MINUTES); maintenanceExecutor.scheduleWithFixedDelay(new RemoveExpiredQueryResults(), 30, 30, TimeUnit.SECONDS); cachedQueries.add(new LatestEventsQuery()); cachedQueries.add(new LatestEventsPerProcessorQuery()); final Optional<Integer> warmCacheMinutesOption = config.getWarmCacheFrequencyMinutes(); if (warmCacheMinutesOption.isPresent() && warmCacheMinutesOption.get() > 0) { for (final File storageDir : config.getStorageDirectories().values()) { final int minutes = warmCacheMinutesOption.get(); cacheWarmerExecutor.scheduleWithFixedDelay(new LuceneCacheWarmer(storageDir, indexManager), 1, minutes, TimeUnit.MINUTES); } } }
Example 5
Source File: LoggingUpdaterServiceComponent.java From carbon-commons with Apache License 2.0 | 5 votes |
@Activate public void activate(ComponentContext componentContext) { try { DataHolder.getInstance().setModifiedTime(LoggingUpdaterUtil.readModifiedTime()); LogConfigUpdater logConfigUpdater = new LogConfigUpdater(DataHolder.getInstance().getConfigurationAdmin()); ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(1); DataHolder.getInstance().setScheduledExecutorService(scheduledExecutorService); scheduledExecutorService.scheduleAtFixedRate(logConfigUpdater, 5000L, 5000L, TimeUnit.MILLISECONDS); } catch (LoggingUpdaterException e) { log.error("Error while Activating LoggingUpdater component", e); } }
Example 6
Source File: LuceneEventIndex.java From nifi with Apache License 2.0 | 5 votes |
private void triggerReindexOfDefunctIndices() { final ExecutorService rebuildIndexExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Rebuild Defunct Provenance Indices", true)); final List<File> allIndexDirectories = directoryManager.getAllIndexDirectories(true, true); allIndexDirectories.sort(DirectoryUtils.OLDEST_INDEX_FIRST); final List<File> defunctIndices = detectDefunctIndices(allIndexDirectories); final AtomicInteger rebuildCount = new AtomicInteger(0); final int totalCount = defunctIndices.size(); for (final File defunctIndex : defunctIndices) { try { if (isLucene4IndexPresent(defunctIndex)) { logger.info("Encountered Lucene 8 index {} and also the corresponding Lucene 4 index; will only trigger rebuilding of one directory.", defunctIndex); rebuildCount.incrementAndGet(); continue; } logger.info("Determined that Lucene Index Directory {} is defunct. Will destroy and rebuild index", defunctIndex); final Tuple<Long, Long> timeRange = getTimeRange(defunctIndex, allIndexDirectories); rebuildIndexExecutor.submit(new MigrateDefunctIndex(defunctIndex, indexManager, directoryManager, timeRange.getKey(), timeRange.getValue(), eventStore, eventReporter, eventConverter, rebuildCount, totalCount)); } catch (final Exception e) { logger.error("Detected defunct index {} but failed to rebuild index", defunctIndex, e); } } rebuildIndexExecutor.shutdown(); if (!allIndexDirectories.isEmpty()) { final File newestIndexDirectory = allIndexDirectories.get(allIndexDirectories.size() - 1); if (defunctIndices.contains(newestIndexDirectory)) { newestIndexDefunct = true; } } }
Example 7
Source File: AbstractAwsIotClient.java From aws-iot-device-sdk-java with Apache License 2.0 | 5 votes |
public void connect(long timeout, boolean blocking) throws AWSIotException, AWSIotTimeoutException { synchronized (this) { if (executionService == null) { executionService = Executors.newScheduledThreadPool(numOfClientThreads); } } AwsIotCompletion completion = new AwsIotCompletion(timeout, !blocking); connection.connect(completion); completion.get(this); }
Example 8
Source File: TestNoDoubleAssign.java From helix with Apache License 2.0 | 5 votes |
/** * Fetch the JobContext for all jobs in ZK and check that no two tasks are running on the same * Participant. */ private void pollForDoubleAssign() { _executorServicePoll = Executors.newScheduledThreadPool(THREAD_COUNT); _executorServicePoll.scheduleAtFixedRate(() -> { if (!_existsDoubleAssign.get()) { // Get JobContexts and test that they are assigned to disparate Participants for (String job : _jobNames) { JobContext jobContext = _driver.getJobContext(job); if (jobContext == null) { continue; } Set<String> instanceCache = new HashSet<>(); for (int partition : jobContext.getPartitionSet()) { if (jobContext.getPartitionState(partition) == TaskPartitionState.RUNNING) { String assignedParticipant = jobContext.getAssignedParticipant(partition); if (assignedParticipant != null) { if (instanceCache.contains(assignedParticipant)) { // Two tasks running on the same instance at the same time _existsDoubleAssign.set(true); return; } instanceCache.add(assignedParticipant); } } } } } }, 0L, POLL_DELAY, TimeUnit.MILLISECONDS); }
Example 9
Source File: StorageCacheManagerImpl.java From cloudstack with Apache License 2.0 | 5 votes |
@Override public boolean configure(String name, Map<String, Object> params) throws ConfigurationException { cacheReplacementEnabled = Boolean.parseBoolean(configDao.getValue(Config.StorageCacheReplacementEnabled.key())); cacheReplaceMentInterval = NumbersUtil.parseInt(configDao.getValue(Config.StorageCacheReplacementInterval.key()), 86400); workers = NumbersUtil.parseInt(configDao.getValue(Config.ExpungeWorkers.key()), 10); executors = Executors.newScheduledThreadPool(workers, new NamedThreadFactory("StorageCacheManager-cache-replacement")); return true; }
Example 10
Source File: S3ScanWriterTest.java From emodb with Apache License 2.0 | 5 votes |
@Test public void testWriteWithCancel() throws Exception { URI baseUri = URI.create("s3://test-bucket/scan"); ScheduledExecutorService uploadService = Executors.newScheduledThreadPool(2); try { PutObjectResult putObjectResult = new PutObjectResult(); putObjectResult.setETag("dummy-etag"); AmazonS3 amazonS3 = mock(AmazonS3.class); when(amazonS3.putObject(argThat(putsIntoBucket("test-bucket")))) .thenReturn(putObjectResult); AmazonS3Provider amazonS3Provider = mock(AmazonS3Provider.class); when(amazonS3Provider.getS3ClientForBucket("test-bucket")).thenReturn(amazonS3); S3ScanWriter scanWriter = new S3ScanWriter(1, baseUri, Optional.of(2), new MetricRegistry(), amazonS3Provider, uploadService, new ObjectMapper()); ScanDestinationWriter scanDestinationWriters[] = new ScanDestinationWriter[2]; for (int i = 0; i < 2; i++) { scanDestinationWriters[i] = scanWriter.writeShardRows("table" + i, "p0", 0, i); scanDestinationWriters[i].writeDocument(ImmutableMap.of("type", "review", "rating", i)); } // Simulate canceling shardWriter[0] in response to a failure. scanDestinationWriters[0].closeAndCancel(); // Close shardWriter[1] normally scanDestinationWriters[1].closeAndTransferAsync(Optional.of(1)); verifyAllTransfersComplete(scanWriter, uploadService); } finally { uploadService.shutdownNow(); } }
Example 11
Source File: FileStreamKeeper.java From DataLink with Apache License 2.0 | 5 votes |
public static void start() { executorService = Executors.newScheduledThreadPool(1, new NamedThreadFactory("File-Stream-Holder")); executorService.scheduleAtFixedRate( FileStreamKeeper::check, CHECK_RATE, CHECK_RATE, TimeUnit.MILLISECONDS ); LOGGER.info("File Stream Keeper is started."); }
Example 12
Source File: ScheduledThreadPoolDemo.java From javacore with Creative Commons Attribution Share Alike 4.0 International | 5 votes |
private static void scheduleAtFixedRate() { ScheduledExecutorService executorService = Executors.newScheduledThreadPool(5); for (int i = 0; i < 100; i++) { executorService.scheduleAtFixedRate(new Runnable() { @Override public void run() { System.out.println(Thread.currentThread().getName() + " 执行"); } }, 1, 1, TimeUnit.SECONDS); } executorService.shutdown(); }
Example 13
Source File: PeriodicNotificationCoordinatorExecutor.java From rya with Apache License 2.0 | 5 votes |
@Override public void start() { if (!running) { producerThreadPool = Executors.newScheduledThreadPool(numThreads); running = true; } }
Example 14
Source File: ElasticScheduler.java From reactor-core with Apache License 2.0 | 5 votes |
ElasticScheduler(ThreadFactory factory, int ttlSeconds) { if (ttlSeconds < 0) { throw new IllegalArgumentException("ttlSeconds must be positive, was: " + ttlSeconds); } this.ttlSeconds = ttlSeconds; this.factory = factory; this.cache = new ConcurrentLinkedDeque<>(); this.all = new ConcurrentLinkedQueue<>(); this.evictor = Executors.newScheduledThreadPool(1, EVICTOR_FACTORY); this.evictor.scheduleAtFixedRate(this::eviction, ttlSeconds, ttlSeconds, TimeUnit.SECONDS); }
Example 15
Source File: LuceneEventIndex.java From nifi with Apache License 2.0 | 5 votes |
@Override public void initialize(final EventStore eventStore) { this.eventStore = eventStore; directoryManager.initialize(); maintenanceExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Provenance Repository Maintenance")); maintenanceExecutor.scheduleWithFixedDelay(this::performMaintenance, 1, 1, TimeUnit.MINUTES); maintenanceExecutor.scheduleWithFixedDelay(this::purgeObsoleteQueries, 30, 30, TimeUnit.SECONDS); cachedQueries.add(new LatestEventsQuery()); cachedQueries.add(new LatestEventsPerProcessorQuery()); triggerReindexOfDefunctIndices(); triggerCacheWarming(); }
Example 16
Source File: ScheduledTaskBeanLocatorTest.java From bugsnag-java with MIT License | 5 votes |
@Test public void findExecutorByName() { ScheduledExecutorService expected = Executors.newScheduledThreadPool(4); Throwable exc = new NoUniqueBeanDefinitionException(ScheduledExecutorService.class); when(context.getBean(ScheduledExecutorService.class)).thenThrow(exc); when(context.getBean("taskScheduler", ScheduledExecutorService.class)) .thenReturn(expected); assertEquals(expected, beanLocator.resolveScheduledExecutorService()); }
Example 17
Source File: Schedule.java From litchi with Apache License 2.0 | 4 votes |
public Schedule(int threadSize, String name) { executorService = Executors.newScheduledThreadPool(threadSize, new NamedThreadFactory(name)); }
Example 18
Source File: DefaultAsyncContextProviderTest.java From servicetalk with Apache License 2.0 | 4 votes |
@BeforeClass public static void beforeClass() { AsyncContext.autoEnable(); executor = Executors.newScheduledThreadPool(4); }
Example 19
Source File: NiFi.java From localization_nifi with Apache License 2.0 | 4 votes |
/** * Determine if the machine we're running on has timing issues. */ private void detectTimingIssues() { final int minRequiredOccurrences = 25; final int maxOccurrencesOutOfRange = 15; final AtomicLong lastTriggerMillis = new AtomicLong(System.currentTimeMillis()); final ScheduledExecutorService service = Executors.newScheduledThreadPool(1, new ThreadFactory() { private final ThreadFactory defaultFactory = Executors.defaultThreadFactory(); @Override public Thread newThread(final Runnable r) { final Thread t = defaultFactory.newThread(r); t.setDaemon(true); t.setName("Detect Timing Issues"); return t; } }); final AtomicInteger occurrencesOutOfRange = new AtomicInteger(0); final AtomicInteger occurrences = new AtomicInteger(0); final Runnable command = new Runnable() { @Override public void run() { final long curMillis = System.currentTimeMillis(); final long difference = curMillis - lastTriggerMillis.get(); final long millisOff = Math.abs(difference - 2000L); occurrences.incrementAndGet(); if (millisOff > 500L) { occurrencesOutOfRange.incrementAndGet(); } lastTriggerMillis.set(curMillis); } }; final ScheduledFuture<?> future = service.scheduleWithFixedDelay(command, 2000L, 2000L, TimeUnit.MILLISECONDS); final TimerTask timerTask = new TimerTask() { @Override public void run() { future.cancel(true); service.shutdownNow(); if (occurrences.get() < minRequiredOccurrences || occurrencesOutOfRange.get() > maxOccurrencesOutOfRange) { LOGGER.warn("NiFi has detected that this box is not responding within the expected timing interval, which may cause " + "Processors to be scheduled erratically. Please see the NiFi documentation for more information."); } } }; final Timer timer = new Timer(true); timer.schedule(timerTask, 60000L); }
Example 20
Source File: SchedulersTest.java From reactor-core with Apache License 2.0 | 4 votes |
@Test public void scanSupportBuffered() throws InterruptedException { Executor plain = Runnable::run; ExecutorService plainService = Executors.newSingleThreadExecutor(); ExecutorService threadPool = Executors.newFixedThreadPool(3); ScheduledExecutorService scheduledThreadPool = Executors.newScheduledThreadPool(4); DelegateServiceScheduler.UnsupportedScheduledExecutorService unsupportedScheduledExecutorService = new DelegateServiceScheduler.UnsupportedScheduledExecutorService(threadPool); try { assertThat(Schedulers.scanExecutor(plain, Scannable.Attr.BUFFERED)) .as("plain").isEqualTo(null); assertThat(Schedulers.scanExecutor(plainService, Scannable.Attr.BUFFERED)) .as("plainService").isEqualTo(null); scheduledThreadPool.schedule(() -> {}, 500, TimeUnit.MILLISECONDS); scheduledThreadPool.schedule(() -> {}, 500, TimeUnit.MILLISECONDS); Thread.sleep(50); //give some leeway for the pool to have consistent accounting assertThat(Schedulers.scanExecutor(scheduledThreadPool, Scannable.Attr.BUFFERED)) .as("scheduledThreadPool").isEqualTo(2); threadPool.submit(() -> { try { Thread.sleep(200); } catch (InterruptedException e) { e.printStackTrace(); } }); assertThat(Schedulers.scanExecutor(threadPool, Scannable.Attr.BUFFERED)) .as("threadPool").isEqualTo(1); assertThat(Schedulers.scanExecutor(unsupportedScheduledExecutorService, Scannable.Attr.BUFFERED)) .as("unwrapped").isEqualTo(1); Thread.sleep(400); assertThat(Schedulers.scanExecutor(unsupportedScheduledExecutorService, Scannable.Attr.BUFFERED)) .as("unwrapped after task").isEqualTo(0); } finally { plainService.shutdownNow(); unsupportedScheduledExecutorService.shutdownNow(); threadPool.shutdownNow(); scheduledThreadPool.shutdownNow(); } }