net.openhft.chronicle.map.ExternalMapQueryContext Java Examples
The following examples show how to use
net.openhft.chronicle.map.ExternalMapQueryContext.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ChronicleMapUnitTest.java From tutorials with MIT License | 6 votes |
@Test public void givenMultipleKeyQuery_whenProcessed_shouldChangeTheValue() { try (ExternalMapQueryContext<Integer, Set<Integer>, ?> fistContext = multiMap.queryContext(1)) { try (ExternalMapQueryContext<Integer, Set<Integer>, ?> secondContext = multiMap.queryContext(2)) { fistContext.updateLock() .lock(); secondContext.updateLock() .lock(); MapEntry<Integer, Set<Integer>> firstEntry = fistContext.entry(); Set<Integer> firstSet = firstEntry.value() .get(); firstSet.remove(2); MapEntry<Integer, Set<Integer>> secondEntry = secondContext.entry(); Set<Integer> secondSet = secondEntry.value() .get(); secondSet.add(4); firstEntry.doReplaceValue(fistContext.wrapValueAsData(firstSet)); secondEntry.doReplaceValue(secondContext.wrapValueAsData(secondSet)); } } finally { assertThat(multiMap.get(1) .size(), is(equalTo(1))); assertThat(multiMap.get(2) .size(), is(equalTo(2))); } }
Example #2
Source File: JMHChronicleMapTokyoCabinetReadBenchmark.java From xodus with Apache License 2.0 | 5 votes |
@Benchmark @BenchmarkMode(Mode.SingleShotTime) @Warmup(iterations = WARMUP_ITERATIONS) @Measurement(iterations = MEASUREMENT_ITERATIONS) @Fork(FORKS) public void randomRead(final Blackhole bh) { executeInTransaction(map -> { for (final String key : randomKeys) { try (ExternalMapQueryContext<String, String, ?> c = map.queryContext(key)) { bh.consume(c.entry().value().get()); } } }); }
Example #3
Source File: ChecksumEntryTest.java From Chronicle-Map with Apache License 2.0 | 5 votes |
@Test public void testChecksumEntriesWithValueInterface() throws IOException { File file = Builder.getPersistenceFile(); try (ChronicleMap<Integer, LongValue> map = ChronicleMap .of(Integer.class, LongValue.class) .entries(1) // Entry checksums make sense only for persisted Chronicle Maps, and are ON by // default for such maps .createPersistedTo(file)) { LongValue value = Values.newHeapInstance(LongValue.class); value.setValue(42); map.put(1, value); try (ExternalMapQueryContext<Integer, LongValue, ?> c = map.queryContext(1)) { // Update lock required for calling ChecksumEntry.checkSum() c.updateLock().lock(); MapEntry<Integer, LongValue> entry = c.entry(); Assert.assertNotNull(entry); ChecksumEntry checksumEntry = (ChecksumEntry) entry; Assert.assertTrue(checksumEntry.checkSum()); // to access off-heap bytes, should call value().getUsing() with Native value // provided. Simple get() return Heap value by default LongValue nativeValue = entry.value().getUsing(Values.newNativeReference(LongValue.class)); // This value bytes update bypass Chronicle Map internals, so checksum is not // updated automatically nativeValue.setValue(43); Assert.assertFalse(checksumEntry.checkSum()); // Restore correct checksum checksumEntry.updateChecksum(); Assert.assertTrue(checksumEntry.checkSum()); } } }
Example #4
Source File: TierRecovery.java From Chronicle-Map with Apache License 2.0 | 4 votes |
public void removeDuplicatesInSegment( ChronicleHashCorruption.Listener corruptionListener, ChronicleHashCorruptionImpl corruption) { long startHlPos = 0L; VanillaChronicleMap<?, ?, ?> m = mh.m(); CompactOffHeapLinearHashTable hashLookup = m.hashLookup; long currentTierBaseAddr = s.tierBaseAddr; while (!hashLookup.empty(hashLookup.readEntry(currentTierBaseAddr, startHlPos))) { startHlPos = hashLookup.step(startHlPos); } long hlPos = startHlPos; int steps = 0; long entries = 0; tierIteration: do { hlPos = hashLookup.step(hlPos); steps++; long entry = hashLookup.readEntry(currentTierBaseAddr, hlPos); if (!hashLookup.empty(entry)) { e.readExistingEntry(hashLookup.value(entry)); Data key = e.key(); try (ExternalMapQueryContext<?, ?, ?> c = m.queryContext(key)) { MapEntry<?, ?> entry2 = c.entry(); Data<?> key2 = ((MapEntry) c).key(); long keyAddress = key.bytes().addressForRead(key.offset()); long key2Address = key2.bytes().addressForRead(key2.offset()); if (key2Address != keyAddress) { report(corruptionListener, corruption, s.segmentIndex, () -> format("entries with duplicate key {} in segment {}: " + "with values {} and {}, removing the latter", key, c.segmentIndex(), entry2 != null ? ((MapEntry) c).value() : "<deleted>", !e.entryDeleted() ? e.value() : "<deleted>") ); if (hashLookup.remove(currentTierBaseAddr, hlPos) != hlPos) { hlPos = hashLookup.stepBack(hlPos); steps--; } continue tierIteration; } } entries++; } // the `steps == 0` condition and this variable updates in the loop fix the bug, when // shift deletion occurs on the first entry of the tier, and the hlPos // becomes equal to start pos without making the whole loop, but only visiting a single // entry } while (hlPos != startHlPos || steps == 0); recoverTierEntriesCounter(entries, corruptionListener, corruption); recoverLowestPossibleFreeChunkTiered(corruptionListener, corruption); }