org.rocksdb.WriteOptions Java Examples
The following examples show how to use
org.rocksdb.WriteOptions.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksDBPerformanceTest.java From flink with Apache License 2.0 | 6 votes |
@Before public void init() throws IOException { rocksDir = tmp.newFolder(); // ensure the RocksDB library is loaded to a distinct location each retry NativeLibraryLoader.getInstance().loadLibrary(rocksDir.getAbsolutePath()); options = new Options() .setCompactionStyle(CompactionStyle.LEVEL) .setLevelCompactionDynamicLevelBytes(true) .setIncreaseParallelism(4) .setUseFsync(false) .setMaxOpenFiles(-1) .setCreateIfMissing(true) .setMergeOperatorName(RocksDBKeyedStateBackend.MERGE_OPERATOR_NAME); writeOptions = new WriteOptions() .setSync(false) .setDisableWAL(true); }
Example #2
Source File: RocksDBBlockHeaderStorage.java From WeCross with Apache License 2.0 | 6 votes |
@Override public void writeBlockHeader(long blockNumber, byte[] blockHeader) { if (dbClosed == true) { logger.warn("Write RocksDB error: RocksDB has been closed"); return; } String key = blockKeyPrefix + String.valueOf(blockNumber); try { WriteBatch writeBatch = new WriteBatch(); writeBatch.put(numberKey.getBytes(), String.valueOf(blockNumber).getBytes()); writeBatch.put(key.getBytes(), blockHeader); WriteOptions writeOptions = new WriteOptions(); rocksDB.write(writeOptions, writeBatch); onBlockHeader(blockNumber, blockHeader); } catch (RocksDBException e) { logger.error("RocksDB write error", e); } }
Example #3
Source File: RocksDBClient.java From geowave with Apache License 2.0 | 6 votes |
public synchronized RocksDBIndexTable getIndexTable( final String tableName, final short adapterId, final byte[] partition, final boolean requiresTimestamp) { if (indexWriteOptions == null) { RocksDB.loadLibrary(); final int cores = Runtime.getRuntime().availableProcessors(); indexWriteOptions = new Options().setCreateIfMissing(true).prepareForBulkLoad().setIncreaseParallelism(cores); indexReadOptions = new Options().setIncreaseParallelism(cores); batchWriteOptions = new WriteOptions().setDisableWAL(false).setNoSlowdown(false).setSync(false); } final String directory = subDirectory + "/" + tableName; return indexTableCache.get( (IndexCacheKey) keyCache.get( directory, d -> new IndexCacheKey(d, adapterId, partition, requiresTimestamp))); }
Example #4
Source File: RocksDBResource.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected void before() throws Throwable { this.temporaryFolder = new TemporaryFolder(); this.temporaryFolder.create(); final File rocksFolder = temporaryFolder.newFolder(); this.dbOptions = optionsFactory.createDBOptions(PredefinedOptions.DEFAULT.createDBOptions()). setCreateIfMissing(true); this.columnFamilyOptions = optionsFactory.createColumnOptions(PredefinedOptions.DEFAULT.createColumnOptions()); this.writeOptions = new WriteOptions(); this.writeOptions.disableWAL(); this.readOptions = new ReadOptions(); this.columnFamilyHandles = new ArrayList<>(1); this.rocksDB = RocksDB.open( dbOptions, rocksFolder.getAbsolutePath(), Collections.singletonList(new ColumnFamilyDescriptor("default".getBytes(), columnFamilyOptions)), columnFamilyHandles); this.batchWrapper = new RocksDBWriteBatchWrapper(rocksDB, writeOptions); }
Example #5
Source File: SamzaTimerInternalsFactoryTest.java From beam with Apache License 2.0 | 6 votes |
private static KeyValueStore<ByteArray, byte[]> createStore(String name) { final Options options = new Options(); options.setCreateIfMissing(true); RocksDbKeyValueStore rocksStore = new RocksDbKeyValueStore( new File(System.getProperty("java.io.tmpdir") + "/" + name), options, new MapConfig(), false, "beamStore", new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("beamStore", new MetricsRegistryMap())); return new SerializedKeyValueStore<>( rocksStore, new ByteArraySerdeFactory.ByteArraySerde(), new ByteSerde(), new SerializedKeyValueStoreMetrics("beamStore", new MetricsRegistryMap())); }
Example #6
Source File: RocksDBSenseVectors.java From biomedicus with Apache License 2.0 | 6 votes |
@Override public void removeWord(int index) { try (WriteBatch writeBatch = new WriteBatch()) { try (RocksIterator rocksIterator = rocksDB.newIterator()) { rocksIterator.seekToFirst(); while (rocksIterator.isValid()) { SparseVector sparseVector = new SparseVector(rocksIterator.value()); sparseVector.remove(index); writeBatch.put(rocksIterator.key(), sparseVector.toBytes()); } } rocksDB.write(new WriteOptions(), writeBatch); } catch (RocksDBException e) { throw new RuntimeException(e); } }
Example #7
Source File: RocksDBSenseVectors.java From biomedicus with Apache License 2.0 | 6 votes |
@Override public void removeWords(Collection<Integer> indexes) { try (WriteBatch writeBatch = new WriteBatch()) { try (RocksIterator rocksIterator = rocksDB.newIterator()) { rocksIterator.seekToFirst(); while (rocksIterator.isValid()) { SparseVector sparseVector = new SparseVector(rocksIterator.value()); sparseVector.removeAll(indexes); writeBatch.put(rocksIterator.key(), sparseVector.toBytes()); } } rocksDB.write(new WriteOptions(), writeBatch); } catch (RocksDBException e) { throw new RuntimeException(e); } }
Example #8
Source File: RocksDbDataSourceImpl.java From gsc-core with GNU Lesser General Public License v3.0 | 6 votes |
private void updateByBatchInner(Map<byte[], byte[]> rows, WriteOptions options) throws Exception { if (quitIfNotAlive()) { return; } try (WriteBatch batch = new WriteBatch()) { for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) { if (entry.getValue() == null) { batch.delete(entry.getKey()); } else { batch.put(entry.getKey(), entry.getValue()); } } database.write(new WriteOptions(), batch); } }
Example #9
Source File: RocksDBClient.java From geowave with Apache License 2.0 | 6 votes |
public synchronized RocksDBDataIndexTable getDataIndexTable( final String tableName, final short adapterId) { if (indexWriteOptions == null) { RocksDB.loadLibrary(); final int cores = Runtime.getRuntime().availableProcessors(); indexWriteOptions = new Options().setCreateIfMissing(true).prepareForBulkLoad().setIncreaseParallelism(cores); indexReadOptions = new Options().setIncreaseParallelism(cores); batchWriteOptions = new WriteOptions().setDisableWAL(false).setNoSlowdown(false).setSync(false); } final String directory = subDirectory + "/" + tableName; return dataIndexTableCache.get( (DataIndexCacheKey) keyCache.get(directory, d -> new DataIndexCacheKey(d, adapterId))); }
Example #10
Source File: RocksDBDataIndexTable.java From geowave with Apache License 2.0 | 6 votes |
public RocksDBDataIndexTable( final Options writeOptions, final Options readOptions, final WriteOptions batchWriteOptions, final String subDirectory, final short adapterId, final boolean visibilityEnabled, final boolean compactOnWrite, final int batchSize) { super( writeOptions, readOptions, batchWriteOptions, subDirectory, adapterId, visibilityEnabled, compactOnWrite, batchSize); }
Example #11
Source File: RocksDBPerformanceTest.java From flink with Apache License 2.0 | 6 votes |
@Before public void init() throws IOException { rocksDir = tmp.newFolder(); // ensure the RocksDB library is loaded to a distinct location each retry NativeLibraryLoader.getInstance().loadLibrary(rocksDir.getAbsolutePath()); options = new Options() .setCompactionStyle(CompactionStyle.LEVEL) .setLevelCompactionDynamicLevelBytes(true) .setIncreaseParallelism(4) .setUseFsync(false) .setMaxOpenFiles(-1) .setCreateIfMissing(true) .setMergeOperatorName(RocksDBKeyedStateBackend.MERGE_OPERATOR_NAME); writeOptions = new WriteOptions() .setSync(false) .setDisableWAL(true); }
Example #12
Source File: RocksDBWriteBatchWrapperTest.java From flink with Apache License 2.0 | 6 votes |
/** * Tests that {@link RocksDBWriteBatchWrapper} flushes after the memory consumed exceeds the preconfigured value. */ @Test public void testWriteBatchWrapperFlushAfterMemorySizeExceed() throws Exception { try (RocksDB db = RocksDB.open(folder.newFolder().getAbsolutePath()); WriteOptions options = new WriteOptions().setDisableWAL(true); ColumnFamilyHandle handle = db.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes())); RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, options, 200, 50)) { long initBatchSize = writeBatchWrapper.getDataSize(); byte[] dummy = new byte[6]; ThreadLocalRandom.current().nextBytes(dummy); // will add 1 + 1 + 1 + 6 + 1 + 6 = 16 bytes for each KV // format is [handleType|kvType|keyLen|key|valueLen|value] // more information please ref write_batch.cc in RocksDB writeBatchWrapper.put(handle, dummy, dummy); assertEquals(initBatchSize + 16, writeBatchWrapper.getDataSize()); writeBatchWrapper.put(handle, dummy, dummy); assertEquals(initBatchSize + 32, writeBatchWrapper.getDataSize()); writeBatchWrapper.put(handle, dummy, dummy); // will flush all, then an empty write batch assertEquals(initBatchSize, writeBatchWrapper.getDataSize()); } }
Example #13
Source File: RocksDBWriteBatchWrapperTest.java From flink with Apache License 2.0 | 6 votes |
/** * Tests that {@link RocksDBWriteBatchWrapper} flushes after the kv count exceeds the preconfigured value. */ @Test public void testWriteBatchWrapperFlushAfterCountExceed() throws Exception { try (RocksDB db = RocksDB.open(folder.newFolder().getAbsolutePath()); WriteOptions options = new WriteOptions().setDisableWAL(true); ColumnFamilyHandle handle = db.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes())); RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, options, 100, 50000)) { long initBatchSize = writeBatchWrapper.getDataSize(); byte[] dummy = new byte[2]; ThreadLocalRandom.current().nextBytes(dummy); for (int i = 1; i < 100; ++i) { writeBatchWrapper.put(handle, dummy, dummy); // each kv consumes 8 bytes assertEquals(initBatchSize + 8 * i, writeBatchWrapper.getDataSize()); } writeBatchWrapper.put(handle, dummy, dummy); assertEquals(initBatchSize, writeBatchWrapper.getDataSize()); } }
Example #14
Source File: RocksDBResource.java From flink with Apache License 2.0 | 6 votes |
@Override protected void before() throws Throwable { this.temporaryFolder = new TemporaryFolder(); this.temporaryFolder.create(); final File rocksFolder = temporaryFolder.newFolder(); this.dbOptions = optionsFactory.createDBOptions(PredefinedOptions.DEFAULT.createDBOptions()). setCreateIfMissing(true); this.columnFamilyOptions = optionsFactory.createColumnOptions(PredefinedOptions.DEFAULT.createColumnOptions()); this.writeOptions = new WriteOptions(); this.writeOptions.disableWAL(); this.readOptions = new ReadOptions(); this.columnFamilyHandles = new ArrayList<>(1); this.rocksDB = RocksDB.open( dbOptions, rocksFolder.getAbsolutePath(), Collections.singletonList(new ColumnFamilyDescriptor("default".getBytes(), columnFamilyOptions)), columnFamilyHandles); this.batchWrapper = new RocksDBWriteBatchWrapper(rocksDB, writeOptions); }
Example #15
Source File: RocksDBIndexTable.java From geowave with Apache License 2.0 | 6 votes |
public RocksDBIndexTable( final Options writeOptions, final Options readOptions, final WriteOptions batchWriteOptions, final String subDirectory, final short adapterId, final byte[] partition, final boolean requiresTimestamp, final boolean visibilityEnabled, final boolean compactOnWrite, final int batchSize) { super( writeOptions, readOptions, batchWriteOptions, subDirectory, adapterId, visibilityEnabled, compactOnWrite, batchSize); this.requiresTimestamp = requiresTimestamp; this.partition = partition; }
Example #16
Source File: AbstractRocksDBTable.java From geowave with Apache License 2.0 | 6 votes |
public AbstractRocksDBTable( final Options writeOptions, final Options readOptions, final WriteOptions batchWriteOptions, final String subDirectory, final short adapterId, final boolean visibilityEnabled, final boolean compactOnWrite, final int batchSize) { super(); this.writeOptions = writeOptions; this.readOptions = readOptions; this.batchWriteOptions = batchWriteOptions; this.subDirectory = subDirectory; this.adapterId = adapterId; exists = new File(subDirectory).exists(); this.visibilityEnabled = visibilityEnabled; this.compactOnWrite = compactOnWrite; this.batchSize = batchSize; batchWrite = batchSize > 1; }
Example #17
Source File: DBStoreBuilder.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Builds a DBStore instance and returns that. * * @return DBStore */ public DBStore build() throws IOException { if(StringUtil.isBlank(dbname) || (dbPath == null)) { LOG.error("Required Parameter missing."); throw new IOException("Required parameter is missing. Please make sure " + "sure Path and DB name is provided."); } processDBProfile(); processTables(); DBOptions options = getDbProfile(); WriteOptions writeOptions = new WriteOptions(); writeOptions.setSync(rocksDBConfiguration.getSyncOption()); File dbFile = getDBFile(); if (!dbFile.getParentFile().exists()) { throw new IOException("The DB destination directory should exist."); } return new RDBStore(dbFile, options, writeOptions, tables, registry); }
Example #18
Source File: WindowedRocksDbHdfsState.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void putBatch(TimeWindow window, Map<K, V> batch) { try { ColumnFamilyHandle handler = getColumnFamilyHandle(window); WriteBatch writeBatch = new WriteBatch(); for (Map.Entry<K, V> entry : batch.entrySet()) { writeBatch.put(handler, serializer.serialize(entry.getKey()), serializer.serialize(entry.getValue())); } rocksDb.write(new WriteOptions(), writeBatch); } catch (RocksDBException e) { LOG.error("Failed to put batch={} for window={}", batch, window); throw new RuntimeException(e.getMessage()); } }
Example #19
Source File: JRocksDB.java From snowblossom with Apache License 2.0 | 5 votes |
public JRocksDB(Config config) throws Exception { super(config); use_separate_dbs=config.getBoolean("db_separate"); config.require("db_path"); String path = config.get("db_path"); base_path = new File(path); base_path.mkdirs(); logger.info(String.format("Loading RocksDB with path %s", path)); RocksDB.loadLibrary(); sharedWriteOptions = new WriteOptions(); sharedWriteOptions.setDisableWAL(false); sharedWriteOptions.setSync(false); // Separate DBs should only be used when you don't care about syncing between // the databases, If you are fine with writes to them being preserved out of order // relative to each other it should be fine. // For example, in combined DBs if you write a to A then b to B, you will either get {}, {a}, or {a,b} // on a bad shutdown. If you use separate, you could very well get {b}. if (use_separate_dbs) { separate_db_map = new TreeMap<>(); } else { shared_db = openRocksDB(path); } }
Example #20
Source File: RocksDBWriteBatchWrapper.java From flink with Apache License 2.0 | 5 votes |
public RocksDBWriteBatchWrapper(@Nonnull RocksDB rocksDB, @Nullable WriteOptions options, int capacity, long batchSize) { Preconditions.checkArgument(capacity >= MIN_CAPACITY && capacity <= MAX_CAPACITY, "capacity should be between " + MIN_CAPACITY + " and " + MAX_CAPACITY); Preconditions.checkArgument(batchSize >= 0, "Max batch size have to be no negative."); this.db = rocksDB; this.options = options; this.capacity = capacity; this.batchSize = batchSize; if (this.batchSize > 0) { this.batch = new WriteBatch((int) Math.min(this.batchSize, this.capacity * PER_RECORD_BYTES)); } else { this.batch = new WriteBatch(this.capacity * PER_RECORD_BYTES); } }
Example #21
Source File: RocksDBResourceContainer.java From flink with Apache License 2.0 | 5 votes |
/** * Gets the RocksDB {@link WriteOptions} to be used for write operations. */ public WriteOptions getWriteOptions() { // Disable WAL by default WriteOptions opt = new WriteOptions().setDisableWAL(true); handlesToClose.add(opt); // add user-defined options factory, if specified if (optionsFactory != null) { opt = optionsFactory.createWriteOptions(opt, handlesToClose); } return opt; }
Example #22
Source File: RocksDBMetronome.java From nifi with Apache License 2.0 | 5 votes |
/** * Put the key / value pair into the database in the specified column family * * @param columnFamilyHandle the column family in to which to put the value * @param writeOptions specification of options for write operations * @param key the key to be inserted * @param value the value to be associated with the specified key * @throws RocksDBException thrown if there is an error in the underlying library. */ public void put(final ColumnFamilyHandle columnFamilyHandle, WriteOptions writeOptions, final byte[] key, final byte[] value) throws RocksDBException { dbReadLock.lock(); try { checkDbState(); rocksDB.put(columnFamilyHandle, writeOptions, key, value); } finally { dbReadLock.unlock(); } }
Example #23
Source File: RocksDBResourceContainerTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testFreeWriteReadOptionsAfterClose() throws Exception { RocksDBResourceContainer container = new RocksDBResourceContainer(); WriteOptions writeOptions = container.getWriteOptions(); ReadOptions readOptions = container.getReadOptions(); assertThat(writeOptions.isOwningHandle(), is(true)); assertThat(readOptions.isOwningHandle(), is(true)); container.close(); assertThat(writeOptions.isOwningHandle(), is(false)); assertThat(readOptions.isOwningHandle(), is(false)); }
Example #24
Source File: RocksDBMetronome.java From nifi with Apache License 2.0 | 5 votes |
/** * Delete the key / value pair from the specified column family * * @param columnFamilyHandle the column family in to which to put the value * @param writeOptions specification of options for write operations * @param key the key to be inserted * @throws RocksDBException thrown if there is an error in the underlying library. */ public void delete(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final WriteOptions writeOptions) throws RocksDBException { dbReadLock.lock(); try { checkDbState(); rocksDB.delete(columnFamilyHandle, writeOptions, key); } finally { dbReadLock.unlock(); } }
Example #25
Source File: RDB.java From DDMQ with Apache License 2.0 | 5 votes |
private static boolean write(final WriteOptions writeOptions, final WriteBatch writeBatch) { try { DB.write(writeOptions, writeBatch); LOGGER.debug("succ write writeBatch, size:{}", writeBatch.count()); } catch (RocksDBException e) { // TODO: 2017/11/8 上报写入失败 LOGGER.error("error while write batch, err:{}", e.getMessage(), e); return false; } return true; }
Example #26
Source File: RocksDBStdSessions.java From hugegraph with Apache License 2.0 | 5 votes |
public StdSession(HugeConfig conf) { boolean bulkload = conf.get(RocksDBOptions.BULKLOAD_MODE); this.batch = new WriteBatch(); this.writeOptions = new WriteOptions(); this.writeOptions.setDisableWAL(bulkload); //this.writeOptions.setSync(false); }
Example #27
Source File: RDB.java From iot-mqtt with Apache License 2.0 | 5 votes |
public boolean put(final ColumnFamilyHandle cfh,final WriteOptions writeOptions,final byte[] key,final byte[] value){ try { this.DB.put(cfh, writeOptions, key, value); log.debug("[RocksDB] -> success put value"); } catch (RocksDBException e) { log.error("[RocksDB] -> error while put, columnFamilyHandle:{}, key:{}, err:{}", cfh.isOwningHandle(), new String(key), e.getMessage(), e); return false; } return true; }
Example #28
Source File: RocksDbDataSourceImpl.java From gsc-core with GNU Lesser General Public License v3.0 | 5 votes |
private void updateByBatchInner(Map<byte[], byte[]> rows) throws Exception { if (quitIfNotAlive()) { return; } try (WriteBatch batch = new WriteBatch()) { for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) { if (entry.getValue() == null) { batch.delete(entry.getKey()); } else { batch.put(entry.getKey(), entry.getValue()); } } database.write(new WriteOptions(), batch); } }
Example #29
Source File: RocksDBStore.java From hadoop-ozone with Apache License 2.0 | 5 votes |
public RocksDBStore(File dbFile, Options options) throws IOException { Preconditions.checkNotNull(dbFile, "DB file location cannot be null"); RocksDB.loadLibrary(); dbOptions = options; dbLocation = dbFile; writeOptions = new WriteOptions(); try { db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath()); if (dbOptions.statistics() != null) { Map<String, String> jmxProperties = new HashMap<String, String>(); jmxProperties.put("dbName", dbFile.getName()); statMBeanName = HddsUtils.registerWithJmxProperties( "Ozone", "RocksDbStore", jmxProperties, RocksDBStoreMBean.create(dbOptions.statistics(), dbFile.getName())); if (statMBeanName == null) { LOG.warn("jmx registration failed during RocksDB init, db path :{}", dbFile.getAbsolutePath()); } } } catch (RocksDBException e) { String msg = "Failed init RocksDB, db path : " + dbFile.getAbsolutePath() + ", " + "exception :" + (e.getCause() == null ? e.getClass().getCanonicalName() + " " + e.getMessage() : e.getCause().getClass().getCanonicalName() + " " + e.getCause().getMessage()); throw new IOException(msg, e); } if (LOG.isDebugEnabled()) { LOG.debug("RocksDB successfully opened."); LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath()); LOG.debug("[Option] createIfMissing = {}", options.createIfMissing()); LOG.debug("[Option] compactionPriority= {}", options.compactionStyle()); LOG.debug("[Option] compressionType= {}", options.compressionType()); LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles()); LOG.debug("[Option] writeBufferSize= {}", options.writeBufferSize()); } }
Example #30
Source File: RocksDbInstance.java From teku with Apache License 2.0 | 5 votes |
private Transaction( final TransactionDB db, final ColumnFamilyHandle defaultHandle, final ImmutableMap<RocksDbColumn<?, ?>, ColumnFamilyHandle> columnHandles) { this.defaultHandle = defaultHandle; this.columnHandles = columnHandles; this.writeOptions = new WriteOptions(); this.rocksDbTx = db.beginTransaction(writeOptions); }