Java Code Examples for com.google.common.base.Stopwatch#elapsedMillis()
The following examples show how to use
com.google.common.base.Stopwatch#elapsedMillis() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DoggedCubeBuilder2.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override public void finish(CuboidResult result) { Stopwatch stopwatch = new Stopwatch().start(); int nRetries = 0; while (!outputQueue.offer(result)) { nRetries++; long sleepTime = stopwatch.elapsedMillis(); if (sleepTime > 3600000L) { stopwatch.stop(); throw new RuntimeException( "OutputQueue Full. Cannot offer to the output queue after waiting for one hour!!! Current queue size: " + outputQueue.size()); } logger.warn("OutputQueue Full. Queue size: " + outputQueue.size() + ". Total sleep time : " + sleepTime + ", and retry count : " + nRetries); try { Thread.sleep(5000L); } catch (InterruptedException e) { throw new RuntimeException(e); } } stopwatch.stop(); }
Example 2
Source File: StreamingCubeDataSearcherPerfTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private void search(int time) throws IOException { System.out.println("start " + time + " search"); Stopwatch sw = new Stopwatch(); sw.start(); Set<TblColRef> dimensions = testHelper.simulateDimensions("STREAMING_V2_TABLE.MINUTE_START"); Set<TblColRef> groups = testHelper.simulateDimensions(); Set<FunctionDesc> metrics = Sets.newHashSet(testHelper.simulateCountMetric()); long startTime = DateFormat.stringToMillis("2018-07-30 07:00:00"); long endTime = DateFormat.stringToMillis("2018-07-30 08:00:00"); TupleFilter filter = testHelper.buildTimeRangeFilter("STREAMING_V2_TABLE.MINUTE_START", String.valueOf(startTime), String.valueOf(endTime)); StreamingSearchContext searchRequest = new StreamingSearchContext(parsedStreamingCubeInfo.cubeDesc, dimensions, groups, metrics, filter, null); IStreamingSearchResult searchResult = searcher.doSearch(searchRequest, 0L, true); for (Record record : searchResult) { System.out.println(record); } sw.stop(); long takeTime = sw.elapsedMillis(); System.out.println(time + " search finished, took:" + takeTime); }
Example 3
Source File: StreamingCubeDataSearcherPerfTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private void iiSearch(int time) throws IOException { System.out.println("start " + time + " invertIndex search"); Stopwatch sw = new Stopwatch(); sw.start(); Set<TblColRef> dimensions = testHelper.simulateDimensions("STREAMING_V2_TABLE.MINUTE_START", "STREAMING_V2_TABLE.ITM"); Set<TblColRef> groups = testHelper.simulateDimensions(); Set<FunctionDesc> metrics = Sets.newHashSet(testHelper.simulateCountMetric()); long startTime = DateFormat.stringToMillis("2018-07-30 07:00:00"); long endTime = DateFormat.stringToMillis("2018-07-30 09:00:00"); TupleFilter timeFilter = testHelper.buildTimeRangeFilter("STREAMING_V2_TABLE.MINUTE_START", String.valueOf(startTime), String.valueOf(endTime)); TupleFilter itemFilter = testHelper.buildEQFilter("STREAMING_V2_TABLE.ITM", "ITM0000000000"); TupleFilter filter = testHelper.buildAndFilter(timeFilter, itemFilter); StreamingSearchContext searchRequest = new StreamingSearchContext(parsedStreamingCubeInfo.cubeDesc, dimensions, groups, metrics, filter, null); IStreamingSearchResult searchResult = searcher.doSearch(searchRequest, 0L, true); for (Record record : searchResult) { System.out.println(record); } sw.stop(); long takeTime = sw.elapsedMillis(); System.out.println(time + " search finished, took:" + takeTime); }
Example 4
Source File: StreamingCubeDataSearcherPerfTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private void scan(int time) throws IOException { System.out.println("start " + time + " scan"); Stopwatch sw = new Stopwatch(); sw.start(); Set<TblColRef> dimensions = testHelper.simulateDimensions("STREAMING_V2_TABLE.SITE"); Set<TblColRef> groups = testHelper.simulateDimensions(); Set<FunctionDesc> metrics = Sets.newHashSet(testHelper.simulateCountMetric()); long startTime = DateFormat.stringToMillis("2018-07-30 07:00:00"); long endTime = DateFormat.stringToMillis("2018-07-30 08:00:00"); TupleFilter filter = testHelper.buildTimeRangeFilter("STREAMING_V2_TABLE.MINUTE_START", String.valueOf(startTime), String.valueOf(endTime)); StreamingSearchContext searchRequest = new StreamingSearchContext(parsedStreamingCubeInfo.cubeDesc, dimensions, groups, metrics, null, null); IStreamingSearchResult searchResult = searcher.doSearch(searchRequest, 0L, true); long scanRowCnt = 0; for (Record record : searchResult) { scanRowCnt++; } sw.stop(); long takeTime = sw.elapsedMillis(); System.out.println(time + " search finished, scan row cnt:" + scanRowCnt + ", took:" + takeTime + ",numRowsPerSec:" + scanRowCnt * 1000 / takeTime); }
Example 5
Source File: FragmentCuboidReaderPerfTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private void readRow(int time, List<TblColRef> dimensions, MeasureDesc[] metrics) throws IOException { System.out.println("start " + time + " read, " + dimensions.size() + " dimensions," + metrics.length + " measures"); TblColRef[] dimArray = dimensions.toArray(new TblColRef[dimensions.size()]); Random rand = new Random(); int randReadNum = 100; int[][] readRows = new int[fragments.length][randReadNum]; List<FragmentCuboidReader> fragmentCuboidReaders = Lists.newArrayList(); for (int i = 0; i < fragments.length; i++) { FragmentMetaInfo fragmentMetaInfo = fragments[i].getMetaInfo(); FragmentData fragmentData = new FragmentData(fragmentMetaInfo, fragments[i].getDataFile()); Map<TblColRef, Dictionary<String>> dictionaryMap = fragmentData .getDimensionDictionaries(parsedStreamingCubeInfo.dimensionsUseDictEncoding); DimensionEncoding[] dimensionEncodings = ParsedStreamingCubeInfo.getDimensionEncodings( parsedStreamingCubeInfo.cubeDesc, dimArray, dictionaryMap); FragmentCuboidReader fragmentCuboidReader = new FragmentCuboidReader(parsedStreamingCubeInfo.cubeDesc, fragmentData, fragmentMetaInfo.getBasicCuboidMetaInfo(), dimArray, metrics, dimensionEncodings); fragmentCuboidReaders.add(fragmentCuboidReader); for (int j = 0; j < randReadNum; j++) { readRows[i][j] = rand.nextInt((int) fragmentMetaInfo.getNumberOfRows()); } } Stopwatch sw = new Stopwatch(); sw.start(); int rowNum = 0; for (int i = 0; i < fragments.length; i++) { for (int j = 0; j < readRows.length; j++) { fragmentCuboidReaders.get(i).read(readRows[i][j]); } } sw.stop(); long takeTime = sw.elapsedMillis(); System.out.println(time + " scan finished, total rows:" + rowNum); System.out.println(time + " scan took:" + takeTime + ",rowsPerSec:" + (rowNum / takeTime) * 1000); }
Example 6
Source File: TransactionPruningServiceTest.java From phoenix-tephra with Apache License 2.0 | 5 votes |
public static void waitForRuns(int runs, int timeout, TimeUnit unit) throws Exception { long timeoutMillis = unit.toMillis(timeout); Stopwatch stopWatch = new Stopwatch(); stopWatch.start(); while (pruneRuns < runs && stopWatch.elapsedMillis() < timeoutMillis) { TimeUnit.MILLISECONDS.sleep(100); } }
Example 7
Source File: ResourceReportTestRun.java From twill with Apache License 2.0 | 5 votes |
private ResourceReport getResourceReport(TwillController controller, long timeoutMillis) { ResourceReport report = controller.getResourceReport(); Stopwatch stopwatch = new Stopwatch(); while (report == null && stopwatch.elapsedMillis() < timeoutMillis) { Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS); report = controller.getResourceReport(); } Assert.assertNotNull(report); return report; }
Example 8
Source File: JvmPauseMonitor.java From tajo with Apache License 2.0 | 5 votes |
@Override public void run() { Stopwatch sw = new Stopwatch(); Map<String, GcTimes> gcTimesBeforeSleep = getGcTimes(); while (shouldRun) { sw.reset().start(); try { Thread.sleep(SLEEP_INTERVAL_MS); } catch (InterruptedException ie) { return; } long extraSleepTime = sw.elapsedMillis() - SLEEP_INTERVAL_MS; Map<String, GcTimes> gcTimesAfterSleep = getGcTimes(); if (extraSleepTime > warnThresholdMs) { ++numGcWarnThresholdExceeded; LOG.warn(formatMessage( extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep)); } else if (extraSleepTime > infoThresholdMs) { ++numGcInfoThresholdExceeded; LOG.info(formatMessage( extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep)); } totalGcExtraSleepTime += extraSleepTime; gcTimesBeforeSleep = gcTimesAfterSleep; } }
Example 9
Source File: AggregationCacheMemSizeTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 4 votes |
private void testSetting(Settings settings, int inputCount) { SortedMap<byte[], Object> map = new TreeMap<>(new Comparator<byte[]>() { @Override public int compare(byte[] o1, byte[] o2) { return Bytes.compareTo(o1, o2); } }); final int reportInterval = inputCount / 10; final Stopwatch stopwatch = new Stopwatch(); long estimateMillis = 0; long actualMillis = 0; System.out.println("Settings: " + settings); System.out.printf(Locale.ROOT, "%15s %15s %15s %15s %15s\n", "Size", "Estimate(bytes)", "Actual(bytes)", "Estimate(ms)", "Actual(ms)"); for (int i = 0; i < inputCount; i++) { byte[] key = new byte[10]; random.nextBytes(key); MeasureAggregator[] values = createAggrs(settings); map.put(key, values); if ((i + 1) % reportInterval == 0) { stopwatch.start(); long estimateBytes = GTAggregateScanner.estimateSizeOfAggrCache(key, values, map.size()); estimateMillis += stopwatch.elapsedMillis(); stopwatch.reset(); stopwatch.start(); long actualBytes = meter.measureDeep(map); actualMillis += stopwatch.elapsedMillis(); stopwatch.reset(); System.out.printf(Locale.ROOT, "%,15d %,15d %,15d %,15d %,15d\n", map.size(), estimateBytes, actualBytes, estimateMillis, actualMillis); } } System.out.println("---------------------------------------\n"); map = null; System.gc(); }
Example 10
Source File: ConnectionQueryServicesImpl.java From phoenix with Apache License 2.0 | 4 votes |
private void checkAndRetry(RetriableOperation op) throws InterruptedException, TimeoutException { int maxRetries = ConnectionQueryServicesImpl.this.props.getInt( QueryServices.NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, QueryServicesOptions.DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK); long sleepInterval = ConnectionQueryServicesImpl.this.props .getLong(QueryServices.DELAY_FOR_SCHEMA_UPDATE_CHECK, QueryServicesOptions.DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK); boolean success = false; int numTries = 1; Stopwatch watch = new Stopwatch(); watch.start(); do { try { success = op.checkForCompletion(); } catch (Exception ex) { // If we encounter any exception on the first or last try, propagate the exception and fail. // Else, we swallow the exception and retry till we reach maxRetries. if (numTries == 1 || numTries == maxRetries) { watch.stop(); TimeoutException toThrow = new TimeoutException("Operation " + op.getOperatioName() + " didn't complete because of exception. Time elapsed: " + watch.elapsedMillis()); toThrow.initCause(ex); throw toThrow; } } numTries++; Thread.sleep(sleepInterval); } while (numTries < maxRetries && !success); watch.stop(); if (!success) { throw new TimeoutException("Operation " + op.getOperatioName() + " didn't complete within " + watch.elapsedMillis() + " ms " + (numTries > 1 ? ("after trying " + numTries + (numTries > 1 ? "times." : "time.")) : "")); } else { if (logger.isDebugEnabled()) { logger.debug("Operation " + op.getOperatioName() + " completed within " + watch.elapsedMillis() + "ms " + (numTries > 1 ? ("after trying " + numTries + (numTries > 1 ? "times." : "time.")) : "")); } } }