org.apache.accumulo.core.client.AccumuloSecurityException Java Examples
The following examples show how to use
org.apache.accumulo.core.client.AccumuloSecurityException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: IngestMetricsSummaryLoader.java From datawave with Apache License 2.0 | 6 votes |
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); String user = conf.get(MetricsConfig.USER); String password = conf.get(MetricsConfig.PASS); String instance = conf.get(MetricsConfig.INSTANCE); String zookeepers = conf.get(MetricsConfig.ZOOKEEPERS); useHourlyPrecision = HourlyPrecisionHelper.checkForHourlyPrecisionOption(context.getConfiguration(), log); try { ZooKeeperInstance inst = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instance).withZkHosts(zookeepers)); Connector con = inst.getConnector(user, new PasswordToken(password)); ingestScanner = con.createScanner(conf.get(MetricsConfig.INGEST_TABLE, MetricsConfig.DEFAULT_INGEST_TABLE), Authorizations.EMPTY); } catch (TableNotFoundException | AccumuloException | AccumuloSecurityException e) { throw new IOException(e); } }
Example #2
Source File: MetricsTableConfigHelper.java From datawave with Apache License 2.0 | 6 votes |
@Override public void configure(TableOperations tops) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { if (MetricsConfiguration.isEnabled(conf)) { try { String table = MetricsConfiguration.getTable(conf); if (!table.equals(this.tableName)) { throw new IllegalArgumentException("Table names did not match. Configuration = " + table + ", Configuration Helper = " + this.tableName); } Collection<MetricsReceiver> receivers = MetricsConfiguration.getReceivers(conf); for (MetricsReceiver receiver : receivers) { logger.info("Configuring metrics receiver " + receiver); receiver.configureTable(table, tops, conf); } } catch (Exception e) { logger.error("An error occurred while configuring ingest metrics, disabling", e); MetricsConfiguration.disable(conf); } } }
Example #3
Source File: MetadataTableSplits.java From datawave with Apache License 2.0 | 6 votes |
/** * updates the splits file if the splits in the new file have not decreased beyond the maximum deviation allowed */ public void update() { try { FileSystem fs = FileSystem.get(this.splitsPath.toUri(), conf); initAccumuloHelper(); Path tmpSplitsFile = createTempFile(fs); Map<String,Integer> tmpSplitsPerTable = writeSplits(fs, tmpSplitsFile); if (null == getFileStatus() || !exceedsMaxSplitsDeviation(tmpSplitsPerTable)) { log.info("updating splits file"); createCacheFile(fs, tmpSplitsFile); } else { log.info("Deleting " + tmpSplitsFile); fs.delete(tmpSplitsFile, false); } } catch (IOException | AccumuloException | AccumuloSecurityException | TableNotFoundException ex) { log.error("Unable to update the splits file", ex); } }
Example #4
Source File: AccumuloCounterSource.java From datawave with Apache License 2.0 | 6 votes |
@Override public boolean hasNext() { if (null == iterator) { try { BatchScanner scanner = connector.createBatchScanner(queryTable, connector.securityOperations().getUserAuthorizations(username), 100); scanner.setRanges(ranges); for (String cf : cfs) { scanner.fetchColumnFamily(new Text(cf)); } iterator = scanner.iterator(); } catch (TableNotFoundException | AccumuloException | AccumuloSecurityException e) { throw new RuntimeException(e); } } nextIterator(); return null != topKey; }
Example #5
Source File: LoadDateTableConfigHelperTest.java From datawave with Apache License 2.0 | 6 votes |
@Test public void testLocalityGroupsProperlyLoadedFromConfig() throws AccumuloSecurityException, AccumuloException, TableNotFoundException { LoadDateTableConfigHelper loadDateTableConfigHelper = new LoadDateTableConfigHelper(); Configuration configuration = new Configuration(); configuration.addResource(ClassLoader.getSystemResource("config/metadata-config.xml")); configuration.set(LoadDateTableConfigHelper.LOAD_DATES_TABLE_NAME_PROP, TABLE_NAME); loadDateTableConfigHelper.setup(TABLE_NAME, configuration, log); loadDateTableConfigHelper.configure(tableOperations); String actual = tableOperations.getLocalityGroups(TABLE_NAME).toString(); String errorMessage = "Incorrect result (possibly the null character): " + actual; Assert.assertTrue(errorMessage, actual.contains("LAC\u0000protobufEdge")); Assert.assertTrue(errorMessage, actual.contains("LAC\u0000errorShard")); Assert.assertTrue(errorMessage, actual.contains("LAC\u0000shard")); Assert.assertTrue(errorMessage, actual.contains("LAC\u0000knowledgeShard")); }
Example #6
Source File: IngestJob.java From datawave with Apache License 2.0 | 6 votes |
/** * Creates the tables that are needed to load data using this ingest job if they don't already exist. If a table is created, it is configured with the * appropriate iterators, aggregators, and locality groups that are required for ingest and query functionality to work correctly. * * @param tableNames * the names of the table to create if they don't exist * @param tops * accumulo table operations helper for checking/creating tables * @param conf * the Hadoop {@link Configuration} for retrieving table configuration information * @param log * a logger for diagnostic messages * @param enableBloomFilters * an indication of whether bloom filters should be enabled in the configuration * @throws AccumuloSecurityException * @throws AccumuloException * @throws TableNotFoundException */ protected void createAndConfigureTablesIfNecessary(String[] tableNames, TableOperations tops, NamespaceOperations namespaceOperations, Configuration conf, Logger log, boolean enableBloomFilters) throws AccumuloSecurityException, AccumuloException, TableNotFoundException { for (String table : tableNames) { createNamespaceIfNecessary(namespaceOperations, table); // If the tables don't exist, then create them. try { if (!tops.exists(table)) { tops.create(table); } } catch (TableExistsException te) { // in this case, somebody else must have created the table after our existence check log.info("Tried to create " + table + " but somebody beat us to the punch"); } } // Pass along the enabling of bloom filters using the configuration conf.setBoolean(ShardTableConfigHelper.ENABLE_BLOOM_FILTERS, enableBloomFilters); configureTablesIfNecessary(tableNames, tops, conf, log); }
Example #7
Source File: ShardTableQueryMetricHandler.java From datawave with Apache License 2.0 | 6 votes |
protected void createAndConfigureTablesIfNecessary(String[] tableNames, TableOperations tops, Configuration conf) throws AccumuloSecurityException, AccumuloException, TableNotFoundException { for (String table : tableNames) { // If the tables don't exist, then create them. try { if (!tops.exists(table)) { tops.create(table); Map<String,TableConfigHelper> tableConfigs = getTableConfigs(log, conf, tableNames); TableConfigHelper tableHelper = tableConfigs.get(table); if (tableHelper != null) { tableHelper.configure(tops); } else { log.info("No configuration supplied for table: " + table); } } } catch (TableExistsException te) { // in this case, somebody else must have created the table after our existence check log.debug("Tried to create " + table + " but somebody beat us to the punch"); } } }
Example #8
Source File: AbstractTableConfigHelper.java From datawave with Apache License 2.0 | 6 votes |
/** * Set the locality group configuration for a table if necessary. If the specified configuration is not already included in the current group configuration, * then the new locality groups are merged with the current set and the locality groups are reset for the table. * * @param tableName * @param newLocalityGroups * @param tops * @param log * @throws AccumuloException * @throws TableNotFoundException * @throws AccumuloSecurityException */ protected void setLocalityGroupConfigurationIfNecessary(String tableName, Map<String,Set<Text>> newLocalityGroups, TableOperations tops, Logger log) throws AccumuloException, TableNotFoundException, AccumuloSecurityException { if (areLocalityGroupsConfigured(tableName, newLocalityGroups, tops)) { log.debug("Verified the following locality groups are configured for " + tableName + ": " + newLocalityGroups); return; } log.info("Creating the locality groups for " + tableName + ": " + newLocalityGroups); Map<String,Set<Text>> localityGroups = tops.getLocalityGroups(tableName); for (Map.Entry<String,Set<Text>> entry : newLocalityGroups.entrySet()) { Set<Text> families = localityGroups.get(entry.getKey()); if (families == null) { families = new HashSet<>(); localityGroups.put(entry.getKey(), families); } families.addAll(entry.getValue()); } tops.setLocalityGroups(tableName, localityGroups); log.info("Reset the locality groups for " + tableName + " to " + localityGroups); }
Example #9
Source File: MetricsSummaryTableConfigHelper.java From datawave with Apache License 2.0 | 6 votes |
@Override public void configure(TableOperations tops) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { // add the SummingCombiner Iterator for each iterator scope (majc, minc, scan) for (IteratorUtil.IteratorScope scope : IteratorUtil.IteratorScope.values()) { final StringBuilder propName = new StringBuilder(String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name(), "sum")); setPropertyIfNecessary(mTableName, propName.toString(), "19,org.apache.accumulo.core.iterators.user.SummingCombiner", tops, mLog); propName.append(".opt."); setPropertyIfNecessary(mTableName, propName + "all", "true", tops, mLog); setPropertyIfNecessary(mTableName, propName + "lossy", "FALSE", tops, mLog); setPropertyIfNecessary(mTableName, propName + "type", "STRING", tops, mLog); } // enable bloom filters if necessary. if (mEnableBloomFilters) { setPropertyIfNecessary(mTableName, Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ShardIndexKeyFunctor.class.getName(), tops, mLog); } setPropertyIfNecessary(mTableName, Property.TABLE_BLOOM_ENABLED.getKey(), Boolean.toString(mEnableBloomFilters), tops, mLog); }
Example #10
Source File: AccumuloClient.java From presto with Apache License 2.0 | 6 votes |
@Inject public AccumuloClient( Connector connector, AccumuloConfig config, ZooKeeperMetadataManager metaManager, AccumuloTableManager tableManager, IndexLookup indexLookup) throws AccumuloException, AccumuloSecurityException { this.connector = requireNonNull(connector, "connector is null"); this.username = requireNonNull(config, "config is null").getUsername(); this.metaManager = requireNonNull(metaManager, "metaManager is null"); this.tableManager = requireNonNull(tableManager, "tableManager is null"); this.indexLookup = requireNonNull(indexLookup, "indexLookup is null"); this.auths = connector.securityOperations().getUserAuthorizations(username); }
Example #11
Source File: AccumuloIndexAgeDisplayTest.java From datawave with Apache License 2.0 | 6 votes |
/** * A test verifying the buckets are sorted in reverse order. */ @Test public void sortBucketsInReverseOrderTest() { Assert.assertNotNull(mockInstance); try { aiad = new AccumuloIndexAgeDisplay(mockInstance, tableName, columns, userName, password, new Integer[0]); aiad.setBuckets(null); Integer[] expected = {180, 90, 60, 30, 14, 7, 2}; Integer[] actual = aiad.getBuckets(); Assert.assertArrayEquals(expected, actual); Integer[] useExpectedWithTooSmallNumber = {1, 2, 3, 4, 5}; expected = new Integer[] {5, 4, 3, 2}; aiad.setBuckets(useExpectedWithTooSmallNumber); actual = aiad.getBuckets(); Assert.assertArrayEquals(expected, actual); } catch (AccumuloException ae) { log.error("Accumlo exception from our mock instance."); log.error(ae.getMessage()); } catch (AccumuloSecurityException ase) { log.error("Accumulo security exception from our mock instance"); log.error(ase.getMessage()); } }
Example #12
Source File: AccumuloIndexAgeDisplayTest.java From datawave with Apache License 2.0 | 6 votes |
/** * This method completes the setup process and was redundant in all but of the tests. * * @param bucketsToUse * - the array of buckets to use */ private void completeSetup(Integer[] bucketsToUse) { try { deleteFile(fileName); aiad = new AccumuloIndexAgeDisplay(mockInstance, tableName, columns, userName, password, bucketsToUse); aiad.extractDataFromAccumulo(); aiad.logAgeSummary(); aiad.createAccumuloShellScript(fileName); } catch (AccumuloException ae) { log.error("Accumlo exception from our mock instance."); log.error(ae.getMessage()); } catch (AccumuloSecurityException ase) { log.error("Accumulo security exception from our mock instance"); log.error(ase.getMessage()); } }
Example #13
Source File: AccumuloCounterSource.java From datawave with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException { String instance = args[0]; String zookeepers = args[1]; String username = args[2]; String password = PasswordConverter.parseArg(args[3]); String table = args[4]; String startRow = args[5]; String endRow = args[6]; String columnFamily = args[7]; AccumuloCounterSource source = new AccumuloCounterSource(instance, zookeepers, username, password, table); Range range = new Range(startRow, endRow); source.addRange(range); source.addColumnFaily(columnFamily); CounterDump dumper = new CounterDump(source); System.out.println(dumper); }
Example #14
Source File: ShardTableConfigHelper.java From datawave with Apache License 2.0 | 6 votes |
@Override public void configure(TableOperations tops) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { switch (this.tableType) { case SHARD: configureShardTable(tops); break; case GIDX: configureGidxTable(tops); break; case GRIDX: configureGridxTable(tops); break; case DINDX: configureDictionaryTable(tops); break; default: // Technically, this is dead code. If 'Configure' is called prior to 'Setup' // tableType is null and throws a NullPointerException in the switch statement. // If 'Setup' successfully runs to completion then tableType is assigned one // of the three other values. throw new TableNotFoundException(null, tableName, "Table is not a Shard Type Table"); } }
Example #15
Source File: ShardTableConfigHelper.java From datawave with Apache License 2.0 | 6 votes |
protected void configureGidxTable(TableOperations tops) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { // Add the UID aggregator for (IteratorScope scope : IteratorScope.values()) { String stem = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name(), "UIDAggregator"); setPropertyIfNecessary(tableName, stem, "19,datawave.iterators.TotalAggregatingIterator", tops, log); stem += ".opt."; setPropertyIfNecessary(tableName, stem + "*", "datawave.ingest.table.aggregator.GlobalIndexUidAggregator", tops, log); if (markingsSetupIteratorEnabled) { // we want the markings setup iterator init method to be called up front stem = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name(), "MarkingsLoader"); setPropertyIfNecessary(tableName, stem, markingsSetupIteratorConfig, tops, log); } } // Set up the bloom filters for faster queries on the index portion if (enableBloomFilters) { setPropertyIfNecessary(tableName, Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ShardIndexKeyFunctor.class.getName(), tops, log); } setPropertyIfNecessary(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), Boolean.toString(enableBloomFilters), tops, log); }
Example #16
Source File: AccumuloQueryRunner.java From presto with Apache License 2.0 | 6 votes |
/** * Gets the AccumuloConnector singleton, starting the MiniAccumuloCluster on initialization. * This singleton instance is required so all test cases access the same MiniAccumuloCluster. * * @return Accumulo connector */ public static Connector getAccumuloConnector() { if (connector != null) { return connector; } try { MiniAccumuloCluster accumulo = createMiniAccumuloCluster(); Instance instance = new ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers()); connector = instance.getConnector(MAC_USER, new PasswordToken(MAC_PASSWORD)); LOG.info("Connection to MAC instance %s at %s established, user %s password %s", accumulo.getInstanceName(), accumulo.getZooKeepers(), MAC_USER, MAC_PASSWORD); return connector; } catch (AccumuloException | AccumuloSecurityException | InterruptedException | IOException e) { throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to get connector to Accumulo", e); } }
Example #17
Source File: ShardTableConfigHelper.java From datawave with Apache License 2.0 | 6 votes |
protected void configureGridxTable(TableOperations tops) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { // Add the UID aggregator for (IteratorScope scope : IteratorScope.values()) { String stem = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name(), "UIDAggregator"); setPropertyIfNecessary(tableName, stem, "19,datawave.iterators.TotalAggregatingIterator", tops, log); stem += ".opt."; setPropertyIfNecessary(tableName, stem + "*", "datawave.ingest.table.aggregator.GlobalIndexUidAggregator", tops, log); if (markingsSetupIteratorEnabled) { // we want the markings setup iterator init method to be called up front stem = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name(), "MarkingsLoader"); setPropertyIfNecessary(tableName, stem, markingsSetupIteratorConfig, tops, log); } } // Set up the bloom filters for faster queries on the index portion if (enableBloomFilters) { setPropertyIfNecessary(tableName, Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ShardIndexKeyFunctor.class.getName(), tops, log); } setPropertyIfNecessary(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), Boolean.toString(enableBloomFilters), tops, log); }
Example #18
Source File: QueryTestTableHelper.java From datawave with Apache License 2.0 | 5 votes |
public QueryTestTableHelper(String instanceName, Logger log, RebuildingScannerTestHelper.TEARDOWN teardown, RebuildingScannerTestHelper.INTERRUPT interrupt) throws AccumuloSecurityException, AccumuloException, TableExistsException, TableNotFoundException { // create mock instance and connector InMemoryInstance i = new InMemoryInstance(instanceName); this.connector = RebuildingScannerTestHelper.getConnector(i, "root", new PasswordToken(""), teardown, interrupt); this.log = log; createTables(); }
Example #19
Source File: ShardTableQueryMetricHandler.java From datawave with Apache License 2.0 | 5 votes |
@Override public void reload() { try { if (this.recordWriter != null) { // don't try to flush the mtbw (close). If recordWriter != null then this method is being called // because of an Exception and the metrics have been saved off to be added to the new recordWriter. this.recordWriter.returnConnector(); } recordWriter = new AccumuloRecordWriter(this.connectionFactory, conf); } catch (AccumuloException | AccumuloSecurityException | IOException e) { log.error(e.getMessage(), e); } }
Example #20
Source File: MockMetadataHelper.java From datawave with Apache License 2.0 | 5 votes |
private static Connector getConnector() { try { return new InMemoryInstance().getConnector("root", new PasswordToken("")); } catch (AccumuloException | AccumuloSecurityException e) { throw new RuntimeException(e); } }
Example #21
Source File: NumShards.java From datawave with Apache License 2.0 | 5 votes |
private void ensureTableExists(Connector connector, String metadataTableName) throws AccumuloException, AccumuloSecurityException { TableOperations tops = connector.tableOperations(); if (!tops.exists(metadataTableName)) { log.info("Creating table: " + metadataTableName); try { tops.create(metadataTableName); } catch (TableExistsException tee) { log.error(metadataTableName + " already exists someone got here first"); } } }
Example #22
Source File: ProtobufEdgeTableConfigHelper.java From datawave with Apache License 2.0 | 5 votes |
@Override public void configure(TableOperations tops) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { if (tableName != null) { // Add the Edge Combiner for (IteratorScope scope : IteratorScope.values()) { // Add the EdgeCombiner BELOW the Versioning iterator int combinerPriority = getVersionIteratorPriority(tops, scope) - 1; String stem = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name(), "EdgeCombiner"); setPropertyIfNecessary(tableName, stem, combinerPriority + ",datawave.iterators.EdgeCombiner", tops, log); setPropertyIfNecessary(tableName, stem + ".opt.all", "true", tops, log); } } }
Example #23
Source File: DateIndexQueryExpansionVisitorTest.java From datawave with Apache License 2.0 | 5 votes |
private void deleteAndCreateTable(TableOperations tops, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException { if (tops.exists(tableName)) { tops.delete(tableName); } tops.create(tableName); }
Example #24
Source File: Persister.java From datawave with Apache License 2.0 | 5 votes |
private void tableCheck(Connector c) throws AccumuloException, AccumuloSecurityException, TableExistsException { if (!c.tableOperations().exists(TABLE_NAME)) { c.tableOperations().create(TABLE_NAME); try { IteratorSetting iteratorCfg = new IteratorSetting(19, "ageoff", QueriesTableAgeOffIterator.class); c.tableOperations().attachIterator(TABLE_NAME, iteratorCfg, EnumSet.allOf(IteratorScope.class)); } catch (TableNotFoundException e) { throw new AccumuloException("We just created " + TABLE_NAME + " so this shouldn't have happened!", e); } } }
Example #25
Source File: MetadataTableConfigHelper.java From datawave with Apache License 2.0 | 5 votes |
@Override public void configure(TableOperations tops) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { if (tableName != null) { for (IteratorScope scope : IteratorScope.values()) { setFrequencyCombiner(tops, scope.name()); setCombinerForCountMetadata(tops, scope.name()); setCombinerForEdgeMetadata(tops, scope.name()); } } }
Example #26
Source File: MultiRFileOutputFormatterTest.java From datawave with Apache License 2.0 | 5 votes |
@Test public void testRFileEntrySizeLimit() throws IOException, InterruptedException, AccumuloSecurityException, AccumuloException, URISyntaxException { MultiRFileOutputFormatter.setRFileLimits(conf, 1, 0); RecordWriter<BulkIngestKey,Value> writer = createWriter(formatter, conf); writeShardPairs(writer, 2); assertNumFileNames(5); assertFileNameForShardIndex(0); expectShardFiles(4); }
Example #27
Source File: AbstractTableConfigHelper.java From datawave with Apache License 2.0 | 5 votes |
/** * Is the specified configuration already included in the current table configuration for locality groups. * * @param tableName * @param newLocalityGroups * @param tops * @return true if the new configuration is already included in the current configuration * @throws AccumuloException * @throws TableNotFoundException * @throws AccumuloSecurityException */ protected boolean areLocalityGroupsConfigured(String tableName, Map<String,Set<Text>> newLocalityGroups, TableOperations tops) throws AccumuloException, TableNotFoundException, AccumuloSecurityException { Map<String,Set<Text>> localityGroups = tops.getLocalityGroups(tableName); for (Map.Entry<String,Set<Text>> entry : newLocalityGroups.entrySet()) { Set<Text> families = localityGroups.get(entry.getKey()); if (families == null) { return false; } if (!families.containsAll(entry.getValue())) { return false; } } return true; }
Example #28
Source File: DateIndexTableConfigHelper.java From datawave with Apache License 2.0 | 5 votes |
protected void configureDateIndexTable(TableOperations tops) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { // Add the DATE aggregator for (IteratorScope scope : IteratorScope.values()) { String stem = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name(), "DATEAggregator"); setPropertyIfNecessary(tableName, stem, "19,datawave.iterators.TotalAggregatingIterator", tops, log); stem += ".opt."; setPropertyIfNecessary(tableName, stem + "*", "datawave.ingest.table.aggregator.DateIndexDateAggregator", tops, log); } setPropertyIfNecessary(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), Boolean.toString(false), tops, log); // Set the locality group for the full content column family setLocalityGroupConfigurationIfNecessary(tableName, localityGroups, tops, log); }
Example #29
Source File: ShardTableConfigHelper.java From datawave with Apache License 2.0 | 5 votes |
protected void configureShardTable(TableOperations tops) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { // Set a text index aggregator on the "tf" (Term Frequency) column family CombinerConfiguration tfConf = new CombinerConfiguration(new Column("tf"), new IteratorSetting(10, "TF", datawave.ingest.table.aggregator.TextIndexAggregator.class.getName())); setAggregatorConfigurationIfNecessary(tableName, Collections.singletonList(tfConf), tops, log); if (markingsSetupIteratorEnabled) { for (IteratorScope scope : IteratorScope.values()) { // we want the markings setup iterator init method to be called up front String stem = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name(), "MarkingsLoader"); setPropertyIfNecessary(tableName, stem, markingsSetupIteratorConfig, tops, log); } } // Set the locality group for the full content column family setLocalityGroupConfigurationIfNecessary(tableName, localityGroups, tops, log); // Set up the bloom filters for faster queries on the index portion if (enableBloomFilters) { setPropertyIfNecessary(tableName, Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), SHARD_KEY_FUNCTOR_CLASS, tops, log); } setPropertyIfNecessary(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), Boolean.toString(enableBloomFilters), tops, log); // Set up the table balancer for shards setPropertyIfNecessary(tableName, Property.TABLE_LOAD_BALANCER.getKey(), shardTableBalancerClass, tops, log); }
Example #30
Source File: GenerateMultipleNumShardsCacheFile.java From datawave with Apache License 2.0 | 5 votes |
@SuppressWarnings("static-access") public static void main(String[] args) throws ParseException, AccumuloException, AccumuloSecurityException, TableNotFoundException, IOException { AccumuloCliOptions accumuloOptions = new AccumuloCliOptions(); Options options = accumuloOptions.getOptions(); options.addOption(OptionBuilder.isRequired(true).hasArg().withDescription("Config directory path").create(CONFIG_DIRECTORY_LOCATION_OVERRIDE)); options.addOption(OptionBuilder.isRequired(false).hasArg().withDescription("Config file suffix").create(CONFIG_SUFFIEX_OVERRIDE)); options.addOption(OptionBuilder.isRequired(false).hasArg().withDescription("Multiple numShards cache file path") .create(MULTIPLE_NUMSHARD_CACHE_FILE_LOCATION_OVERRIDE)); Configuration conf = accumuloOptions.getConf(args, true); CommandLine cl; String configDirectory = null; String configSuffix; try { cl = new BasicParser().parse(options, args); if (cl.hasOption(CONFIG_DIRECTORY_LOCATION_OVERRIDE)) { configDirectory = cl.getOptionValue(CONFIG_DIRECTORY_LOCATION_OVERRIDE); } else { HelpFormatter helpFormatter = new HelpFormatter(); helpFormatter.printHelp("Generate Multiple NumShards Cache", options); System.exit(1); } if (cl.hasOption(MULTIPLE_NUMSHARD_CACHE_FILE_LOCATION_OVERRIDE)) { conf.set(NumShards.MULTIPLE_NUMSHARDS_CACHE_PATH, cl.getOptionValue(MULTIPLE_NUMSHARD_CACHE_FILE_LOCATION_OVERRIDE)); } if (cl.hasOption(CONFIG_SUFFIEX_OVERRIDE)) { configSuffix = cl.getOptionValue(CONFIG_SUFFIEX_OVERRIDE); } else { configSuffix = "config.xml"; } ConfigurationFileHelper.setConfigurationFromFiles(conf, configDirectory, configSuffix); NumShards numShards = new NumShards(conf); numShards.updateCache(); } catch (ParseException ex) { log.error(GenerateMultipleNumShardsCacheFile.class.getName(), ex); } }