Java Code Examples for org.rocksdb.Checkpoint#create()
The following examples show how to use
org.rocksdb.Checkpoint#create() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RocksDbHdfsState.java From jstorm with Apache License 2.0 | 6 votes |
/** * Flush the data in memtable of RocksDB into disk, and then create checkpoint * * @param batchId */ @Override public void checkpoint(long batchId) { long startTime = System.currentTimeMillis(); try { rocksDb.flush(new FlushOptions()); Checkpoint cp = Checkpoint.create(rocksDb); cp.createCheckpoint(getLocalCheckpointPath(batchId)); } catch (RocksDBException e) { LOG.error("Failed to create checkpoint for batch-" + batchId, e); throw new RuntimeException(e.getMessage()); } if (JStormMetrics.enabled) rocksDbFlushAndCpLatency.update(System.currentTimeMillis() - startTime); }
Example 2
Source File: RocksIncrementalSnapshotStrategy.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void takeDBNativeCheckpoint(@Nonnull SnapshotDirectory outputDirectory) throws Exception { // create hard links of living files in the output path try ( ResourceGuard.Lease ignored = rocksDBResourceGuard.acquireResource(); Checkpoint checkpoint = Checkpoint.create(db)) { checkpoint.createCheckpoint(outputDirectory.getDirectory().getPath()); } catch (Exception ex) { try { outputDirectory.cleanup(); } catch (IOException cleanupEx) { ex = ExceptionUtils.firstOrSuppressed(cleanupEx, ex); } throw ex; } }
Example 3
Source File: RocksIncrementalSnapshotStrategy.java From flink with Apache License 2.0 | 5 votes |
private void takeDBNativeCheckpoint(@Nonnull SnapshotDirectory outputDirectory) throws Exception { // create hard links of living files in the output path try ( ResourceGuard.Lease ignored = rocksDBResourceGuard.acquireResource(); Checkpoint checkpoint = Checkpoint.create(db)) { checkpoint.createCheckpoint(outputDirectory.getDirectory().getPath()); } catch (Exception ex) { try { outputDirectory.cleanup(); } catch (IOException cleanupEx) { ex = ExceptionUtils.firstOrSuppressed(cleanupEx, ex); } throw ex; } }
Example 4
Source File: ExampleStateMachine.java From raft-java with Apache License 2.0 | 5 votes |
@Override public void writeSnapshot(String snapshotDir) { Checkpoint checkpoint = Checkpoint.create(db); try { checkpoint.createCheckpoint(snapshotDir); } catch (Exception ex) { ex.printStackTrace(); LOG.warn("writeSnapshot meet exception, dir={}, msg={}", snapshotDir, ex.getMessage()); } }
Example 5
Source File: RocksIncrementalSnapshotStrategy.java From flink with Apache License 2.0 | 5 votes |
private void takeDBNativeCheckpoint(@Nonnull SnapshotDirectory outputDirectory) throws Exception { // create hard links of living files in the output path try ( ResourceGuard.Lease ignored = rocksDBResourceGuard.acquireResource(); Checkpoint checkpoint = Checkpoint.create(db)) { checkpoint.createCheckpoint(outputDirectory.getDirectory().toString()); } catch (Exception ex) { try { outputDirectory.cleanup(); } catch (IOException cleanupEx) { ex = ExceptionUtils.firstOrSuppressed(cleanupEx, ex); } throw ex; } }
Example 6
Source File: RocksDbUnitTest.java From jstorm with Apache License 2.0 | 5 votes |
private static void rocksDbTest(RocksDB db, List<ColumnFamilyHandle> handlers) { try { ColumnFamilyHandle handler1 = null; ColumnFamilyHandle handler2 = null; if (handlers.size() > 0) { // skip default column family handler1 = handlers.get(1); handler2 = handlers.get(2); } else { handler1 = db.createColumnFamily(new ColumnFamilyDescriptor("test1".getBytes())); handler2 = db.createColumnFamily(new ColumnFamilyDescriptor("test2".getBytes())); } int startValue1 = getStartValue(db, handler1); int startValue2 = getStartValue(db, handler2);; Checkpoint cp = Checkpoint.create(db); if (isCompaction) { db.compactRange(); LOG.info("Compaction!"); } long flushWaitTime = System.currentTimeMillis() + flushInterval; for (int i = 0; i < putNum || putNum == -1; i++) { db.put(handler1, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue1 + i).getBytes()); db.put(handler2, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue2 + i).getBytes()); if (isFlush && flushWaitTime <= System.currentTimeMillis()) { db.flush(new FlushOptions()); if (isCheckpoint) { cp.createCheckpoint(cpPath + "/" + i); } flushWaitTime = System.currentTimeMillis() + flushInterval; } } } catch (RocksDBException e) { LOG.error("Failed to put or flush", e); } }
Example 7
Source File: RDBCheckpointManager.java From hadoop-ozone with Apache License 2.0 | 4 votes |
public RDBCheckpointManager(RocksDB rocksDB) { this.db = rocksDB; this.checkpoint = Checkpoint.create(rocksDB); }
Example 8
Source File: RocksDbDataSourceImpl.java From gsc-core with GNU Lesser General Public License v3.0 | 4 votes |
public void backup(String dir) throws RocksDBException { Checkpoint cp = Checkpoint.create(database); cp.createCheckpoint(dir + this.getDBName()); }
Example 9
Source File: RDBCheckpointManager.java From hadoop-ozone with Apache License 2.0 | 2 votes |
/** * Create a checkpoint manager with a prefix to be added to the * snapshots created. * * @param rocksDB DB instance * @param checkpointPrefix prefix string. */ public RDBCheckpointManager(RocksDB rocksDB, String checkpointPrefix) { this.db = rocksDB; this.checkpointNamePrefix = checkpointPrefix; this.checkpoint = Checkpoint.create(rocksDB); }