Java Code Examples for org.apache.commons.lang3.mutable.MutableLong#longValue()
The following examples show how to use
org.apache.commons.lang3.mutable.MutableLong#longValue() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SerialReplicationChecker.java From hbase with Apache License 2.0 | 6 votes |
public boolean canPush(Entry entry, Cell firstCellInEdit) throws IOException { String encodedNameAsString = Bytes.toString(entry.getKey().getEncodedRegionName()); long seqId = entry.getKey().getSequenceId(); Long canReplicateUnderSeqId = canPushUnder.getIfPresent(encodedNameAsString); if (canReplicateUnderSeqId != null) { if (seqId < canReplicateUnderSeqId.longValue()) { LOG.trace("{} is before the end barrier {}, pass", entry, canReplicateUnderSeqId); return true; } LOG.debug("{} is beyond the previous end barrier {}, remove from cache", entry, canReplicateUnderSeqId); // we are already beyond the last safe point, remove canPushUnder.invalidate(encodedNameAsString); } // This is for the case where the region is currently opened on us, if the sequence id is // continuous then we are safe to replicate. If there is a breakpoint, then maybe the region // has been moved to another RS and then back, so we need to check the barrier. MutableLong previousPushedSeqId = pushed.getUnchecked(encodedNameAsString); if (seqId == previousPushedSeqId.longValue() + 1) { LOG.trace("The sequence id for {} is continuous, pass", entry); previousPushedSeqId.increment(); return true; } return canPush(entry, CellUtil.cloneRow(firstCellInEdit)); }
Example 2
Source File: AbstractFSWAL.java From hbase with Apache License 2.0 | 6 votes |
protected final long stampSequenceIdAndPublishToRingBuffer(RegionInfo hri, WALKeyImpl key, WALEdit edits, boolean inMemstore, RingBuffer<RingBufferTruck> ringBuffer) throws IOException { if (this.closed) { throw new IOException( "Cannot append; log is closed, regionName = " + hri.getRegionNameAsString()); } MutableLong txidHolder = new MutableLong(); MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(() -> { txidHolder.setValue(ringBuffer.next()); }); long txid = txidHolder.longValue(); ServerCall<?> rpcCall = RpcServer.getCurrentCall().filter(c -> c instanceof ServerCall) .filter(c -> c.getCellScanner() != null).map(c -> (ServerCall) c).orElse(null); try (TraceScope scope = TraceUtil.createTrace(implClassName + ".append")) { FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore, rpcCall); entry.stampRegionSequenceId(we); ringBuffer.get(txid).load(entry); } finally { ringBuffer.publish(txid); } return txid; }
Example 3
Source File: FileDataInterface.java From count-db with MIT License | 6 votes |
@Override public long freeMemory() { MutableLong totalBytesReleased = new MutableLong(0); ifNotClosed(() -> { for (FileBucket bucket : fileBuckets) { bucket.lockRead(); for (FileInfo fileInfo : bucket.getFiles()) { long bytesReleased = fileInfo.discardFileContents(); updateSizeOfCachedFileContents(-bytesReleased); totalBytesReleased.add(bytesReleased); } bucket.unlockRead(); } }); return totalBytesReleased.longValue(); }
Example 4
Source File: WriteHeavyIncrementObserver.java From hbase with Apache License 2.0 | 5 votes |
private long getUniqueTimestamp(byte[] row) { int slot = Bytes.hashCode(row) & mask; MutableLong lastTimestamp = lastTimestamps[slot]; long now = System.currentTimeMillis(); synchronized (lastTimestamp) { long pt = lastTimestamp.longValue() >> 10; if (now > pt) { lastTimestamp.setValue(now << 10); } else { lastTimestamp.increment(); } return lastTimestamp.longValue(); } }
Example 5
Source File: BigramTestsMain.java From count-db with MIT License | 5 votes |
private void testSeparateWritingReading(DataType dataType, DataInterfaceFactory factory, DatabaseCachingType cachingType, int numberOfThreads, long numberOfItems) throws FileNotFoundException, InterruptedException { final BaseDataInterface dataInterface = createDataInterface(dataType, cachingType, factory); dataInterface.dropAllData(); final DataInputStream inputStream = new DataInputStream(new BufferedInputStream(new FileInputStream(bigramFile))); //write data MutableLong numberOfItemsWritten = new MutableLong(0); CountDownLatch writeLatch = new CountDownLatch(numberOfThreads); long startOfWrite = System.nanoTime(); for (int i = 0; i < numberOfThreads; i++) { new BigramTestsThread(dataType, numberOfItemsWritten, numberOfItems, inputStream, dataInterface, writeLatch, false).start(); } writeLatch.await(); dataInterface.flush(); long endOfWrite = System.nanoTime(); double writesPerSecond = numberOfItemsWritten.longValue() * 1e9 / (endOfWrite - startOfWrite); dataInterface.optimizeForReading(); MutableLong numberOfItemsRead = new MutableLong(0); CountDownLatch readLatch = new CountDownLatch(numberOfThreads); long startOfRead = System.nanoTime(); for (int i = 0; i < numberOfThreads; i++) { new BigramTestsThread(dataType, numberOfItemsRead, numberOfItems, inputStream, dataInterface, readLatch, true).start(); } readLatch.await(); dataInterface.flush(); long endOfRead = System.nanoTime(); double readsPerSecond = numberOfItemsRead.longValue() * 1e9 / (endOfRead - startOfRead); dataInterface.close(); Log.i(factory.getClass().getSimpleName() + " threads " + numberOfThreads + " items " + numberOfItems + " write " + NumUtils.fmt(writesPerSecond) + " read " + NumUtils.fmt(readsPerSecond)); }
Example 6
Source File: UniformDataTestsMain.java From count-db with MIT License | 5 votes |
private void testBatchWritingAndReading(DataInterfaceFactory factory, DatabaseCachingType cachingType, int numberOfThreads, final long numberOfItems) throws FileNotFoundException, InterruptedException { final BaseDataInterface dataInterface = createDataInterface(cachingType, factory); dataInterface.dropAllData(); MutableLong numberOfItemsWritten = new MutableLong(0); long startOfWrite = System.nanoTime(); CountDownLatch countDownLatch = new CountDownLatch(numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { new UniformDataTestsThread(numberOfItemsWritten, numberOfItems, dataInterface, countDownLatch, true).start(); } countDownLatch.await(); dataInterface.flush(); long endOfWrite = System.nanoTime(); double writesPerSecond = numberOfItemsWritten.longValue() * 1e9 / (endOfWrite - startOfWrite); countDownLatch = new CountDownLatch(numberOfThreads); long startOfRead = System.nanoTime(); dataInterface.optimizeForReading(); MutableLong numberOfItemsRead = new MutableLong(0); for (int i = 0; i < numberOfThreads; i++) { new UniformDataTestsThread(numberOfItemsRead, numberOfItems, dataInterface, countDownLatch, false).start(); } countDownLatch.await(); long endOfRead = System.nanoTime(); double readsPerSecond = numberOfItemsRead.longValue() * 1e9 / (endOfRead - startOfRead); Log.i(factory.getClass().getSimpleName() + " threads " + numberOfThreads + " items " + numberOfItems + " write " + NumUtils.fmt(writesPerSecond) + " read " + NumUtils.fmt(readsPerSecond)); dataInterface.close(); }
Example 7
Source File: ParquetGroupScanStatistics.java From Bats with Apache License 2.0 | 4 votes |
public void collect(List<T> metadataList) { resetHolders(); boolean first = true; for (T metadata : metadataList) { long localRowCount = (long) TableStatisticsKind.ROW_COUNT.getValue(metadata); for (Map.Entry<SchemaPath, ColumnStatistics> columnsStatistics : metadata.getColumnsStatistics().entrySet()) { SchemaPath schemaPath = columnsStatistics.getKey(); ColumnStatistics statistics = columnsStatistics.getValue(); MutableLong emptyCount = new MutableLong(); MutableLong previousCount = columnValueCounts.putIfAbsent(schemaPath, emptyCount); if (previousCount == null) { previousCount = emptyCount; } Long nullsNum = (Long) statistics.getStatistic(ColumnStatisticsKind.NULLS_COUNT); if (previousCount.longValue() != GroupScan.NO_COLUMN_STATS && nullsNum != null && nullsNum != GroupScan.NO_COLUMN_STATS) { previousCount.add(localRowCount - nullsNum); } else { previousCount.setValue(GroupScan.NO_COLUMN_STATS); } ColumnMetadata columnMetadata = SchemaPathUtils.getColumnMetadata(schemaPath, metadata.getSchema()); TypeProtos.MajorType majorType = columnMetadata != null ? columnMetadata.majorType() : null; boolean partitionColumn = checkForPartitionColumn(statistics, first, localRowCount, majorType, schemaPath); if (partitionColumn) { Object value = partitionValueMap.get(metadata.getLocation(), schemaPath); Object currentValue = statistics.getStatistic(ColumnStatisticsKind.MAX_VALUE); if (value != null && value != BaseParquetMetadataProvider.NULL_VALUE) { if (value != currentValue) { partitionColTypeMap.remove(schemaPath); } } else { // the value of a column with primitive type can not be null, // so checks that there are really null value and puts it to the map if (localRowCount == (long) statistics.getStatistic(ColumnStatisticsKind.NULLS_COUNT)) { partitionValueMap.put(metadata.getLocation(), schemaPath, BaseParquetMetadataProvider.NULL_VALUE); } else { partitionValueMap.put(metadata.getLocation(), schemaPath, currentValue); } } } else { partitionColTypeMap.remove(schemaPath); } } this.rowCount += localRowCount; first = false; } }
Example 8
Source File: SumLong.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Override public Long getOutput(MutableLong accumulatedValue) { return accumulatedValue.longValue(); }