Java Code Examples for org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#of()
The following examples show how to use
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#of() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestBulkLoadHFiles.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testSplitStoreFile() throws IOException { Path dir = util.getDataTestDirOnTestFS("testSplitHFile"); FileSystem fs = util.getTestFileSystem(); Path testIn = new Path(dir, "testhfile"); ColumnFamilyDescriptor familyDesc = ColumnFamilyDescriptorBuilder.of(FAMILY); HFileTestUtil.createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000); Path bottomOut = new Path(dir, "bottom.out"); Path topOut = new Path(dir, "top.out"); BulkLoadHFilesTool.splitStoreFile(util.getConfiguration(), testIn, familyDesc, Bytes.toBytes("ggg"), bottomOut, topOut); int rowCount = verifyHFile(bottomOut); rowCount += verifyHFile(topOut); assertEquals(1000, rowCount); }
Example 2
Source File: AddColumnAction.java From hbase with Apache License 2.0 | 6 votes |
@Override public void perform() throws Exception { TableDescriptor tableDescriptor = admin.getDescriptor(tableName); ColumnFamilyDescriptor columnDescriptor = null; while (columnDescriptor == null || tableDescriptor.getColumnFamily(columnDescriptor.getName()) != null) { columnDescriptor = ColumnFamilyDescriptorBuilder.of(RandomStringUtils.randomAlphabetic(5)); } // Don't try the modify if we're stopping if (context.isStopping()) { return; } getLogger().debug("Performing action: Adding " + columnDescriptor + " to " + tableName); TableDescriptor modifiedTable = TableDescriptorBuilder.newBuilder(tableDescriptor) .setColumnFamily(columnDescriptor).build(); admin.modifyTable(modifiedTable); }
Example 3
Source File: TestRefreshHFilesEndpoint.java From hbase with Apache License 2.0 | 6 votes |
@Override public List<HStore> getStores() { List<HStore> list = new ArrayList<>(stores.size()); /* * This is used to trigger the custom definition (faulty) * of refresh HFiles API. */ try { if (this.store == null) { store = new HStoreWithFaultyRefreshHFilesAPI(this, ColumnFamilyDescriptorBuilder.of(FAMILY), this.conf); } list.add(store); } catch (IOException ioe) { LOG.info("Couldn't instantiate custom store implementation", ioe); } list.addAll(stores.values()); return list; }
Example 4
Source File: HBaseStoreManager.java From atlas with Apache License 2.0 | 5 votes |
private TableDescriptor createTable(String tableName, String cfName, int ttlInSeconds, AdminMask adm) throws IOException { TableDescriptor desc = compat.newTableDescriptor(tableName); ColumnFamilyDescriptor cdesc = ColumnFamilyDescriptorBuilder.of(cfName); cdesc = setCFOptions(cdesc, ttlInSeconds); desc = compat.addColumnFamilyToTableDescriptor(desc, cdesc); int count; // total regions to create String src; if (MIN_REGION_COUNT <= (count = regionCount)) { src = "region count configuration"; } else if (0 < regionsPerServer && MIN_REGION_COUNT <= (count = regionsPerServer * adm.getEstimatedRegionServerCount())) { src = "ClusterStatus server count"; } else { count = -1; src = "default"; } if (MIN_REGION_COUNT < count) { adm.createTable(desc, getStartKey(count), getEndKey(count), count); logger.debug("Created table {} with region count {} from {}", tableName, count, src); } else { adm.createTable(desc); logger.debug("Created table {} with default start key, end key, and region count", tableName); } return desc; }
Example 5
Source File: TestHStoreFile.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testEmptyStoreFileRestrictKeyRanges() throws Exception { StoreFileReader reader = mock(StoreFileReader.class); HStore store = mock(HStore.class); byte[] cf = Bytes.toBytes("ty"); ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(cf); when(store.getColumnFamilyDescriptor()).thenReturn(cfd); try (StoreFileScanner scanner = new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true)) { Scan scan = new Scan(); scan.setColumnFamilyTimeRange(cf, 0, 1); assertFalse(scanner.shouldUseScanner(scan, store, 0)); } }
Example 6
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java From phoenix with Apache License 2.0 | 5 votes |
/** * Create simple HTD with three families: 'a', 'b', and 'c' * @param tableName name of the table descriptor * @return */ private TableDescriptor createBasic3FamilyHTD(final String tableName) { TableDescriptorBuilder tableBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); ColumnFamilyDescriptor a = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("a")); tableBuilder.addColumnFamily(a); ColumnFamilyDescriptor b = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("b")); tableBuilder.addColumnFamily(b); ColumnFamilyDescriptor c = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("c")); tableBuilder.addColumnFamily(c); return tableBuilder.build(); }
Example 7
Source File: TestMetaBrowser.java From hbase with Apache License 2.0 | 4 votes |
private ColumnFamilyDescriptor columnFamilyDescriptor() { return ColumnFamilyDescriptorBuilder.of("f1"); }
Example 8
Source File: TestZooKeeperTableArchiveClient.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testArchivingOnSingleTable() throws Exception { createArchiveDirectory(); FileSystem fs = UTIL.getTestFileSystem(); Path archiveDir = getArchiveDir(); Path tableDir = getTableDir(STRING_TABLE_NAME); toCleanup.add(archiveDir); toCleanup.add(tableDir); Configuration conf = UTIL.getConfiguration(); // setup the delegate Stoppable stop = new StoppableImplementation(); HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop); List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner); final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0); // create the region ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM); HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd); List<HRegion> regions = new ArrayList<>(); regions.add(region); Mockito.doReturn(regions).when(rss).getRegions(); final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false); loadFlushAndCompact(region, TEST_FAM); compactionCleaner.chore(); // get the current hfiles in the archive directory List<Path> files = getAllFiles(fs, archiveDir); if (files == null) { CommonFSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG); throw new RuntimeException("Didn't archive any files!"); } CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size()); runCleaner(cleaner, finished, stop); // know the cleaner ran, so now check all the files again to make sure they are still there List<Path> archivedFiles = getAllFiles(fs, archiveDir); assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles); // but we still have the archive directory assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration()))); }
Example 9
Source File: TestRSGroupsAdmin1.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testRSGroupListDoesNotContainFailedTableCreation() throws Exception { toggleQuotaCheckAndRestartMiniCluster(true); String nsp = "np1"; NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); ADMIN.createNamespace(nspDesc); assertEquals(3, ADMIN.listNamespaceDescriptors().length); ColumnFamilyDescriptor fam1 = ColumnFamilyDescriptorBuilder.of("fam1"); TableDescriptor tableDescOne = TableDescriptorBuilder .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")) .setColumnFamily(fam1).build(); ADMIN.createTable(tableDescOne); TableDescriptor tableDescTwo = TableDescriptorBuilder .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2")) .setColumnFamily(fam1).build(); boolean constraintViolated = false; try { ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 6); fail("Creation table should fail because of quota violation."); } catch (Exception exp) { assertTrue(exp instanceof IOException); constraintViolated = true; } finally { assertTrue("Constraint not violated for table " + tableDescTwo.getTableName(), constraintViolated); } List<RSGroupInfo> rsGroupInfoList = ADMIN.listRSGroups(); boolean foundTable2 = false; boolean foundTable1 = false; for (int i = 0; i < rsGroupInfoList.size(); i++) { Set<TableName> tables = Sets.newHashSet(ADMIN.listTablesInRSGroup(rsGroupInfoList.get(i).getName())); if (tables.contains(tableDescTwo.getTableName())) { foundTable2 = true; } if (tables.contains(tableDescOne.getTableName())) { foundTable1 = true; } } assertFalse("Found table2 in rsgroup list.", foundTable2); assertTrue("Did not find table1 in rsgroup list", foundTable1); TEST_UTIL.deleteTable(tableDescOne.getTableName()); ADMIN.deleteNamespace(nspDesc.getName()); toggleQuotaCheckAndRestartMiniCluster(false); }
Example 10
Source File: IntegrationTestDDLMasterFailover.java From hbase with Apache License 2.0 | 4 votes |
private ColumnFamilyDescriptor createFamilyDesc() { String familyName = String.format("cf-%010d", RandomUtils.nextInt()); return ColumnFamilyDescriptorBuilder.of(familyName); }