Java Code Examples for org.rocksdb.RocksDB#newIterator()
The following examples show how to use
org.rocksdb.RocksDB#newIterator() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Index.java From outbackcdx with Apache License 2.0 | 6 votes |
public Records(RocksDB db, ColumnFamilyHandle columnFamilyHandle, byte[] startKey, RecordConstructor<T> constructor, Predicate<T> scope, boolean reverse, long cap) { final RocksIterator it = db.newIterator(columnFamilyHandle); it.seek(startKey); if (reverse) { if (it.isValid()) { it.prev(); } else { it.seekToLast(); } } this.constructor = constructor; this.scope = scope; this.it = it; this.reverse = reverse; this.cap = cap; }
Example 2
Source File: RocksDBStore.java From dremio-oss with Apache License 2.0 | 6 votes |
public FindByRangeIterator(RocksDB db, ColumnFamilyHandle handle, FindByRange<byte[]> range, MetaManager blob) { this.iter = db.newIterator(handle); this.end = range == null ? null : range.getEnd(); this.endInclusive = range == null ? false : range.isEndInclusive(); this.blob = blob; durations = ITERATOR_METRICS ? new DescriptiveStatistics() : null; valueSizes = ITERATOR_METRICS ? new DescriptiveStatistics() : null; stopwatch = ITERATOR_METRICS ? Stopwatch.createStarted() : null; // position at beginning of cursor. if (range != null && range.getStart() != null) { iter.seek(range.getStart()); if (iter.isValid() && !range.isStartInclusive() && Arrays.equals(iter.key(), range.getStart())) { seekNext(); } } else { iter.seekToFirst(); } if (ITERATOR_METRICS) { cursorSetup = stopwatch.elapsed(TimeUnit.MICROSECONDS); } populateNext(); }
Example 3
Source File: RocksDBIndexTable.java From geowave with Apache License 2.0 | 6 votes |
public CloseableIterator<GeoWaveRow> iterator() { final RocksDB readDb = getReadDb(); if (readDb == null) { return new CloseableIterator.Empty<>(); } final ReadOptions options = new ReadOptions().setFillCache(false); final RocksIterator it = readDb.newIterator(options); it.seekToFirst(); return new RocksDBRowIterator( options, it, adapterId, partition, requiresTimestamp, visibilityEnabled); }
Example 4
Source File: RocksDBManager.java From WebCollector with GNU General Public License v3.0 | 5 votes |
public void list() throws Exception { String crawldbPath = FilenameUtils.concat(crawlPath, "crawldb"); RocksDB crawldbDatabase = RocksDBUtils.open(crawldbPath); RocksIterator crawldbIterator = crawldbDatabase.newIterator(); for(crawldbIterator.seekToFirst(); crawldbIterator.isValid(); crawldbIterator.next()){ CrawlDatum datum = RocksDBUtils.createCrawlDatum(crawldbIterator.key(), crawldbIterator.value()); System.out.println(CrawlDatumFormater.datumToString(datum)); } crawldbDatabase.close(); }
Example 5
Source File: RocksFullSnapshotStrategy.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private static RocksIteratorWrapper getRocksIterator( RocksDB db, ColumnFamilyHandle columnFamilyHandle, StateSnapshotTransformer<byte[]> stateSnapshotTransformer, ReadOptions readOptions) { RocksIterator rocksIterator = db.newIterator(columnFamilyHandle, readOptions); return stateSnapshotTransformer == null ? new RocksIteratorWrapper(rocksIterator) : new RocksTransformingIteratorWrapper(rocksIterator, stateSnapshotTransformer); }
Example 6
Source File: RocksDBIterator.java From ache with Apache License 2.0 | 5 votes |
public RocksDBIterator(RocksDB db) { this.db = db; this.cursor = db.newIterator(); this.cursor.seekToFirst(); this.isOpen = true; readNextKV(true); }
Example 7
Source File: AccessControl.java From outbackcdx with Apache License 2.0 | 5 votes |
private static Map<Long, AccessRule> loadRules(RocksDB db, ColumnFamilyHandle ruleCf) { Map<Long,AccessRule> map = new TreeMap<>(); try (RocksIterator it = db.newIterator(ruleCf)) { it.seekToFirst(); while (it.isValid()) { AccessRule rule = GSON.fromJson(new String(it.value(), UTF_8), AccessRule.class); map.put(rule.id, rule); it.next(); } } return map; }
Example 8
Source File: RocksDBManager.java From WebCollector with GNU General Public License v3.0 | 5 votes |
@Override public void merge() throws Exception { LOG.info("start merge"); RocksDB crawldbDatabase = RocksDBUtils.openCrawldbDatabase(crawlPath); /*合并fetch库*/ LOG.info("merge fetch database"); RocksDB fetchDatabase = RocksDBUtils.openFetchDatabase(crawlPath); RocksIterator fetchIterator = fetchDatabase.newIterator(); for(fetchIterator.seekToFirst(); fetchIterator.isValid(); fetchIterator.next()){ crawldbDatabase.put(fetchIterator.key(), fetchIterator.value()); } fetchDatabase.close(); /*合并link库*/ LOG.info("merge link database"); RocksDB linkDatabase = RocksDBUtils.openLinkDatabase(crawlPath); RocksIterator linkIterator = linkDatabase.newIterator(); for(linkIterator.seekToFirst(); linkIterator.isValid(); linkIterator.next()){ if(crawldbDatabase.get(linkIterator.key()) == null){ crawldbDatabase.put(linkIterator.key(), linkIterator.value()); } } linkDatabase.close(); LOG.info("end merge"); crawldbDatabase.close(); // env.removeDatabase(null, "fetch"); RocksDBUtils.destroyFetchDatabase(crawlPath); LOG.debug("remove fetch database"); // env.removeDatabase(null, "link"); RocksDBUtils.destroyLinkDatabase(crawlPath); LOG.debug("remove link database"); }
Example 9
Source File: BackupEngineTest.java From DDMQ with Apache License 2.0 | 5 votes |
@Test public void backupDb2() throws RocksDBException { // String originPath = dbFolder.getRoot().getAbsolutePath(); // String backupPath = backupFolder.getRoot().getAbsolutePath(); String originPath = "/tmp/rocksdb"; String originPath2 = "/tmp/rocksdb2"; String backupPath = "/tmp/rocksdb_backup"; System.out.println("originPath=" + originPath); System.out.println("backupPath=" + backupPath); // Open empty database. try (final Options opt = new Options().setCreateIfMissing(true); final RocksDB db = RocksDB.open(opt, originPath)) { // Fill database with some test values prepareDatabase(db); try (RocksIterator it = db.newIterator()) { for (it.seekToFirst(); it.isValid(); it.next()) { System.out.println(originPath + ":" + new String(it.key()) + ":" + new String(it.value())); } } // Create two backups try (final BackupableDBOptions bopt = new BackupableDBOptions(backupPath); final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { be.createNewBackup(db, true); be.createNewBackup(db, true); //restore the backup final List<BackupInfo> backupInfo = verifyNumberOfValidBackups(be, 2); // restore db from first backup be.restoreDbFromBackup(backupInfo.get(0).backupId(), originPath2, originPath2, new RestoreOptions(true)); // Open database again. RocksDB db2 = RocksDB.open(opt, originPath2); try (RocksIterator it = db2.newIterator()) { for (it.seekToFirst(); it.isValid(); it.next()) { System.out.println(originPath2 + ":" + new String(it.key()) + ":" + new String(it.value())); } } db2.close(); } } }
Example 10
Source File: RocksFullSnapshotStrategy.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private static RocksIteratorWrapper getRocksIterator( RocksDB db, ColumnFamilyHandle columnFamilyHandle, StateSnapshotTransformer<byte[]> stateSnapshotTransformer, ReadOptions readOptions) { RocksIterator rocksIterator = db.newIterator(columnFamilyHandle, readOptions); return stateSnapshotTransformer == null ? new RocksIteratorWrapper(rocksIterator) : new RocksTransformingIteratorWrapper(rocksIterator, stateSnapshotTransformer); }
Example 11
Source File: RocksFullSnapshotStrategy.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private static RocksIteratorWrapper getRocksIterator( RocksDB db, ColumnFamilyHandle columnFamilyHandle, StateSnapshotTransformer<byte[]> stateSnapshotTransformer, ReadOptions readOptions) { RocksIterator rocksIterator = db.newIterator(columnFamilyHandle, readOptions); return stateSnapshotTransformer == null ? new RocksIteratorWrapper(rocksIterator) : new RocksTransformingIteratorWrapper(rocksIterator, stateSnapshotTransformer); }
Example 12
Source File: BackupEngineTest.java From DDMQ with Apache License 2.0 | 4 votes |
@Test public void backupDb2() throws RocksDBException { // String originPath = dbFolder.getRoot().getAbsolutePath(); // String backupPath = backupFolder.getRoot().getAbsolutePath(); String originPath = "/tmp/rocksdb"; String originPath2 = "/tmp/rocksdb2"; String backupPath = "/tmp/rocksdb_backup"; System.out.println("originPath=" + originPath); System.out.println("backupPath=" + backupPath); // Open empty database. try (final Options opt = new Options().setCreateIfMissing(true); final RocksDB db = RocksDB.open(opt, originPath)) { // Fill database with some test values prepareDatabase(db); try (RocksIterator it = db.newIterator()) { for (it.seekToFirst(); it.isValid(); it.next()) { System.out.println(originPath + ":" + new String(it.key()) + ":" + new String(it.value())); } } // Create two backups try (final BackupableDBOptions bopt = new BackupableDBOptions(backupPath); final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { be.createNewBackup(db, true); be.createNewBackup(db, true); //restore the backup final List<BackupInfo> backupInfo = verifyNumberOfValidBackups(be, 2); // restore db from first backup be.restoreDbFromBackup(backupInfo.get(0).backupId(), originPath2, originPath2, new RestoreOptions(true)); // Open database again. RocksDB db2 = RocksDB.open(opt, originPath2); try (RocksIterator it = db2.newIterator()) { for (it.seekToFirst(); it.isValid(); it.next()) { System.out.println(originPath2 + ":" + new String(it.key()) + ":" + new String(it.value())); } } db2.close(); } } }
Example 13
Source File: BackupDB.java From DDMQ with Apache License 2.0 | 4 votes |
public static RestoreState restore() throws RocksDBException { if (restoring) { LOGGER.info("is restoring, return"); return RestoreState.BEING_RESTORE; } LOGGER.info("start restore"); restoring = true; RocksDB restoreDB = null; try (final BackupableDBOptions bopt = new BackupableDBOptions(DB_PATH_BACKUP); final BackupEngine be = BackupEngine.open(Env.getDefault(), bopt)) { // restore db from first backup /** * @param keepLogFiles If true, restore won't overwrite the existing log files * in wal_dir. It will also move all log files from archive directory to * wal_dir. Use this option in combination with * BackupableDBOptions::backup_log_files = false for persisting in-memory * databases. * Default: false */ boolean keepLogFiles = false; be.restoreDbFromLatestBackup(DB_PATH_RESTORE, DB_PATH_RESTORE, new RestoreOptions(keepLogFiles)); // open database again. restoreDB = RocksDB.open(OptionsConfig.DB_OPTIONS, DB_PATH_RESTORE, CFManager.CF_DESCRIPTORS, CFManager.CF_HANDLES); int i = 0; try (RocksIterator it = restoreDB.newIterator()) { for (it.seekToFirst(); it.isValid(); it.next()) { LOGGER.info("i:{}, key:{}, value:{}", i++, new String(it.key()), new String(it.value())); if (i == 10) { break; } } } return RestoreState.SUCCESS; } catch (RocksDBException e) { LOGGER.error("error while restore, path:{}, err:{}", DB_PATH_RESTORE, e.getMessage(), e); return RestoreState.FAIL; } finally { if (restoreDB != null) { restoreDB.close(); } restoring = false; LOGGER.info("end restore"); } }
Example 14
Source File: PubchemTTLMerger.java From act with GNU General Public License v3.0 | 4 votes |
protected void merge(Pair<RocksDB, Map<COLUMN_FAMILIES, ColumnFamilyHandle>> dbAndHandles) throws RocksDBException, IOException, ClassNotFoundException { LOGGER.info("Beginning merge on Pubchem CID"); RocksDB db = dbAndHandles.getLeft(); ColumnFamilyHandle pubchemIdCFH = dbAndHandles.getRight().get(COLUMN_FAMILIES.CID_TO_HASHES); ColumnFamilyHandle meshCFH = dbAndHandles.getRight().get(COLUMN_FAMILIES.HASH_TO_MESH); ColumnFamilyHandle synonymCFH = dbAndHandles.getRight().get(COLUMN_FAMILIES.HASH_TO_SYNONYMS); ColumnFamilyHandle synonymTypeCFH = dbAndHandles.getRight().get(COLUMN_FAMILIES.HASH_TO_SYNONYM_TYPE); ColumnFamilyHandle mergeResultsCFH = dbAndHandles.getRight().get(COLUMN_FAMILIES.CID_TO_SYNONYMS); RocksIterator cidIterator = db.newIterator(pubchemIdCFH); // With help from https://github.com/facebook/rocksdb/wiki/Basic-Operations int processed = 0; for (cidIterator.seekToFirst(); cidIterator.isValid(); cidIterator.next()) { byte[] key = cidIterator.key(); byte[] val = cidIterator.value(); String pubchemId = new String(key, UTF8); List<String> hashes; try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(val))) { // We know all our values so far have been lists of strings, so this should be completely safe. hashes = (List<String>) ois.readObject(); } PubchemSynonyms pubchemSynonyms = new PubchemSynonyms(pubchemId); /* The hash keys are based on synonym value, which we can manually compute with: * $ echo -n 'dimethyltin(iv)' | md5 * This means that MeSH ids are linked to synonyms rather than pubchem ids. We need to look up each cid-linked * hash in both the MeSH and synonym collections, as the key may legitimately exist in both (and serve to link * cid to synonym and cid to MeSH). */ for (String hash : hashes) { /* Note: these ids are not proper MeSH topic ids, but are internal MeSH ids found in the RDF and TTL * representations of the MeSH corpus. You can find them in the MeSH .nt or .xml files, but they won't turn up * anything on the MeSH website. */ List<String> meshIds = getValueAsObject(db, meshCFH, hash); if (meshIds != null) { pubchemSynonyms.addMeSHIds(meshIds); } List<String> synonyms = getValueAsObject(db, synonymCFH, hash); // There are, surprisingly, some dangling hashes in the DB! Handle them gracefully. if (synonyms == null) { LOGGER.warn("Dangling synonym hash reference, adding empty list in place of value: cid = %s, hash = %s", pubchemId, hash); synonyms = Collections.emptyList(); } List<String> synonymTypeStrings = getValueAsObject(db, synonymTypeCFH, hash); Set<PC_SYNONYM_TYPES> synonymTypes = DEFAULT_SYNONYM_DATA_TYPES; if (synonymTypeStrings != null) { synonymTypes = synonymTypeStrings.stream().map(PC_SYNONYM_TYPES::valueOf).collect(Collectors.toSet()); } if (synonymTypes.size() == 0) { LOGGER.warn("Found zero synonym types for synonym, defaulting to %s: %s %s, synonyms = %s", PC_SYNONYM_TYPES.UNKNOWN.name(), pubchemId, hash, StringUtils.join(synonyms, ", ")); } /* It turns out that *lots* of synonyms are duplicated as depositor supplied names, so don't complain about it * here. For performance sake we might want to consider changing the data model of PubchemSynonyms to reduce * synonym string duplication, as the current model is pretty inefficient. */ for (PC_SYNONYM_TYPES synonymType : synonymTypes) { for (String synonym : synonyms) { // Let the PubchemSynonyms object do the de-duplication for us rather than reducing `synonyms` to a Set. pubchemSynonyms.addSynonym(synonymType, synonym); } } } try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutputStream oo = new ObjectOutputStream(bos)) { oo.writeObject(pubchemSynonyms); oo.flush(); db.put(mergeResultsCFH, key, bos.toByteArray()); } processed++; if (processed % 100000 == 0) { LOGGER.info("Merged %d entries on Pubchem compound id", processed); } } LOGGER.info("Merge complete, %d entries processed", processed); }
Example 15
Source File: AccessControl.java From outbackcdx with Apache License 2.0 | 4 votes |
private long calculateNextId(RocksDB db, ColumnFamilyHandle cf) { try (RocksIterator it = db.newIterator(cf)) { it.seekToLast(); return it.isValid() ? decodeKey(it.key()) + 1 : 0; } }
Example 16
Source File: RocksDBOperationUtils.java From flink with Apache License 2.0 | 4 votes |
public static RocksIteratorWrapper getRocksIterator(RocksDB db, ColumnFamilyHandle columnFamilyHandle) { return new RocksIteratorWrapper(db.newIterator(columnFamilyHandle)); }
Example 17
Source File: RocksDBOperationUtils.java From flink with Apache License 2.0 | 4 votes |
public static RocksIteratorWrapper getRocksIterator(RocksDB db) { return new RocksIteratorWrapper(db.newIterator()); }
Example 18
Source File: RocksDBOperationUtils.java From flink with Apache License 2.0 | 4 votes |
public static RocksIteratorWrapper getRocksIterator(RocksDB db, ColumnFamilyHandle columnFamilyHandle, ReadOptions readOptions) { return new RocksIteratorWrapper(db.newIterator(columnFamilyHandle, readOptions)); }
Example 19
Source File: RocksDBOperationUtils.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public static RocksIteratorWrapper getRocksIterator(RocksDB db, ColumnFamilyHandle columnFamilyHandle) { return new RocksIteratorWrapper(db.newIterator(columnFamilyHandle)); }
Example 20
Source File: RocksDBOperationUtils.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public static RocksIteratorWrapper getRocksIterator(RocksDB db) { return new RocksIteratorWrapper(db.newIterator()); }