Java Code Examples for org.rocksdb.RocksDB#listColumnFamilies()
The following examples show how to use
org.rocksdb.RocksDB#listColumnFamilies() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RocksDBDAO.java From hudi with Apache License 2.0 | 6 votes |
/** * Helper to load managed column family descriptors. */ private List<ColumnFamilyDescriptor> loadManagedColumnFamilies(DBOptions dbOptions) throws RocksDBException { final List<ColumnFamilyDescriptor> managedColumnFamilies = new ArrayList<>(); final Options options = new Options(dbOptions, new ColumnFamilyOptions()); List<byte[]> existing = RocksDB.listColumnFamilies(options, rocksDBBasePath); if (existing.isEmpty()) { LOG.info("No column family found. Loading default"); managedColumnFamilies.add(getColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); } else { LOG.info("Loading column families :" + existing.stream().map(String::new).collect(Collectors.toList())); managedColumnFamilies .addAll(existing.stream().map(RocksDBDAO::getColumnFamilyDescriptor).collect(Collectors.toList())); } return managedColumnFamilies; }
Example 2
Source File: RDBStore.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Read DB and return existing column families. * @return List of column families * @throws RocksDBException on Error. */ private List<TableConfig> getColumnFamiliesInExistingDb() throws RocksDBException { List<byte[]> bytes = RocksDB.listColumnFamilies(new Options(), dbLocation.getAbsolutePath()); List<TableConfig> columnFamiliesInDb = bytes.stream() .map(cfbytes -> new TableConfig(StringUtils.bytes2String(cfbytes), DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE.getColumnFamilyOptions())) .collect(Collectors.toList()); if (LOG.isDebugEnabled()) { LOG.debug("Found column Families in DB : {}", columnFamiliesInDb); } return columnFamiliesInDb; }
Example 3
Source File: ListTables.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Override public Void call() throws Exception { List<byte[]> columnFamilies = RocksDB.listColumnFamilies(new Options(), parent.getDbPath()); for (byte[] b : columnFamilies) { System.out.println(new String(b, StandardCharsets.UTF_8)); } return null; }
Example 4
Source File: RocksDBStdSessions.java From hugegraph with Apache License 2.0 | 5 votes |
public static Set<String> listCFs(String path) throws RocksDBException { Set<String> cfs = new HashSet<>(); List<byte[]> oldCFs = RocksDB.listColumnFamilies(new Options(), path); if (oldCFs.isEmpty()) { cfs.add("default"); } else { for (byte[] oldCF : oldCFs) { cfs.add(decode(oldCF)); } } return cfs; }
Example 5
Source File: ByteStoreManager.java From dremio-oss with Apache License 2.0 | 4 votes |
public void start() throws Exception { if (inMemory) { return; } final String baseDirectory = CoreStoreProviderImpl.MODE == ForcedMemoryMode.DISK && this.baseDirectory == null ? Files.createTempDirectory(null).toString() : this.baseDirectory.toString(); final File dbDirectory = new File(baseDirectory, CATALOG_STORE_NAME); if (dbDirectory.exists()) { if (!dbDirectory.isDirectory()) { throw new DatastoreException( String.format("Invalid path %s for local catalog db, not a directory.", dbDirectory.getAbsolutePath())); } // If there are any files that exist within the dbDirectory, verify that the first file in the directory is // owned by the process user. verifyDBOwner(dbDirectory); } else { if (!dbDirectory.mkdirs()) { throw new DatastoreException( String.format("Failed to create directory %s for local catalog db.", dbDirectory.getAbsolutePath())); } } final String path = dbDirectory.toString(); final List<byte[]> families; try (final Options options = new Options()) { options.setCreateIfMissing(true); // get a list of existing families. families = new ArrayList<>(RocksDB.listColumnFamilies(options, path)); } // if empty, add the default family if (families.isEmpty()) { families.add(RocksDB.DEFAULT_COLUMN_FAMILY); } final Function<byte[], ColumnFamilyDescriptor> func = ColumnFamilyDescriptor::new; List<ColumnFamilyHandle> familyHandles = new ArrayList<>(); try (final DBOptions dboptions = new DBOptions()) { dboptions.setCreateIfMissing(true); // From docs, ... if WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then // WAL files will be checked every WAL_ttl_seconds / 2 and those that // are older than WAL_ttl_seconds will be deleted. dboptions.setWalSizeLimitMB(0); dboptions.setWalTtlSeconds(WAL_TTL_SECONDS); LOGGER.debug("WAL settings: size: '{} MB', TTL: '{}' seconds", dboptions.walSizeLimitMB(), dboptions.walTtlSeconds()); registerMetrics(dboptions); db = openDB(dboptions, path, new ArrayList<>(Lists.transform(families, func)), familyHandles); } // create an output list to be populated when we open the db. // populate the local cache with the existing tables. for (int i = 0; i < families.size(); i++) { byte[] family = families.get(i); if (Arrays.equals(family, RocksDB.DEFAULT_COLUMN_FAMILY)) { defaultHandle = familyHandles.get(i); } else { String name = new String(family, UTF_8); final ColumnFamilyHandle handle = familyHandles.get(i); handleIdToNameMap.put(handle.getID(), name); RocksDBStore store = newRocksDBStore(name, new ColumnFamilyDescriptor(family), handle); maps.put(name, store); } } // update the metadata manager metadataManager = new StoreMetadataManagerImpl(); for (String tableName : handleIdToNameMap.values()) { metadataManager.createEntry(tableName, true); } }
Example 6
Source File: RocksDbUnitTest.java From jstorm with Apache License 2.0 | 4 votes |
public static void main(String[] args) { Map conf = JStormHelper.LoadConf(args[0]); putNum = JStormUtils.parseInt(conf.get("put.number"), 100); isFlush = JStormUtils.parseBoolean(conf.get("is.flush"), true); isCheckpoint = JStormUtils.parseBoolean(conf.get("is.checkpoint"), true); sleepTime = JStormUtils.parseInt(conf.get("sleep.time"), 5000); compactionInterval = JStormUtils.parseInt(conf.get("compaction.interval"), 30000); flushInterval = JStormUtils.parseInt(conf.get("flush.interval"), 3000); isCompaction = JStormUtils.parseBoolean(conf.get("is.compaction"), true); fileSizeBase = JStormUtils.parseLong(conf.get("file.size.base"), 10 * SizeUnit.KB); levelNum = JStormUtils.parseInt(conf.get("db.level.num"), 1); compactionTriggerNum = JStormUtils.parseInt(conf.get("db.compaction.trigger.num"), 4); LOG.info("Conf={}", conf); RocksDB db; File file = new File(cpPath); file.mkdirs(); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(); try { Options options = new Options(); options.setCreateMissingColumnFamilies(true); options.setCreateIfMissing(true); options.setTargetFileSizeBase(fileSizeBase); options.setMaxBackgroundFlushes(2); options.setMaxBackgroundCompactions(2); options.setCompactionStyle(CompactionStyle.LEVEL); options.setNumLevels(levelNum); options.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum); DBOptions dbOptions = new DBOptions(); dbOptions.setCreateMissingColumnFamilies(true); dbOptions.setCreateIfMissing(true); dbOptions.setMaxBackgroundFlushes(2); dbOptions.setMaxBackgroundCompactions(2); ColumnFamilyOptions familyOptions = new ColumnFamilyOptions(); familyOptions.setTargetFileSizeBase(fileSizeBase); familyOptions.setCompactionStyle(CompactionStyle.LEVEL); familyOptions.setNumLevels(levelNum); familyOptions.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum); List<byte[]> families = RocksDB.listColumnFamilies(options, dbPath); List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>(); if (families != null) { for (byte[] bytes : families) { columnFamilyDescriptors.add(new ColumnFamilyDescriptor(bytes, familyOptions)); LOG.info("Load colum family of {}", new String(bytes)); } } if (columnFamilyDescriptors.size() > 0) { db = RocksDB.open(dbOptions, dbPath, columnFamilyDescriptors, columnFamilyHandles); } else { db = RocksDB.open(options, dbPath); } } catch (RocksDBException e) { LOG.error("Failed to open db", e); return; } rocksDbTest(db, columnFamilyHandles); db.close(); }
Example 7
Source File: WindowedRocksDbHdfsState.java From jstorm with Apache License 2.0 | 4 votes |
@Override protected void initRocksDb() { windowToCFHandler = new HashMap<>(); RocksDbOptionsFactory optionFactory = new RocksDbOptionsFactory.Defaults(); Options options = optionFactory.createOptions(null); DBOptions dbOptions = optionFactory.createDbOptions(null); ColumnFamilyOptions cfOptions = optionFactory.createColumnFamilyOptions(null); String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS); if (optionsFactoryClass != null) { RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass); options = udfOptionFactory.createOptions(options); dbOptions = udfOptionFactory.createDbOptions(dbOptions); cfOptions = udfOptionFactory.createColumnFamilyOptions(cfOptions); } try { ttlTimeSec = ConfigExtension.getStateTtlTime(conf); List<Integer> ttlValues = new ArrayList<>(); List<byte[]> families = RocksDB.listColumnFamilies(options, rocksDbDir); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(); List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>(); if (families != null) { for (byte[] bytes : families) { columnFamilyDescriptors.add(new ColumnFamilyDescriptor(bytes, cfOptions)); LOG.debug("Load colum family of {}", new String(bytes)); if (ttlTimeSec > 0) ttlValues.add(ttlTimeSec); } } if (columnFamilyDescriptors.size() > 0) { if (ttlTimeSec > 0) rocksDb = TtlDB.open(dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles, ttlValues, false); else rocksDb = RocksDB.open(dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles); int n = Math.min(columnFamilyDescriptors.size(), columnFamilyHandles.size()); LOG.info("Try to load RocksDB with column family, desc_num={}, handler_num={}", columnFamilyDescriptors.size(), columnFamilyHandles.size()); // skip default column for (int i = 1; i < n; i++) { windowToCFHandler.put((TimeWindow) serializer.deserialize(columnFamilyDescriptors.get(i).columnFamilyName()), columnFamilyHandles.get(i)); } } else { rocksDb = RocksDB.open(options, rocksDbDir); } rocksDb.compactRange(); LOG.info("Finish the initialization of RocksDB"); } catch (RocksDBException e) { LOG.error("Failed to open rocksdb located at " + rocksDbDir, e); throw new RuntimeException(e.getMessage()); } lastCheckpointFiles = new HashSet<String>(); lastCleanTime = System.currentTimeMillis(); lastSuccessBatchId = -1; }