org.apache.accumulo.core.client.security.tokens.PasswordToken Java Examples
The following examples show how to use
org.apache.accumulo.core.client.security.tokens.PasswordToken.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TermIndexSink.java From OSTMap with Apache License 2.0 | 6 votes |
/** * creates a batchwriter to write data to accumulo * * @param table to write data into * @return a ready to user batch writer object * @throws AccumuloSecurityException * @throws AccumuloException * @throws TableNotFoundException */ private BatchWriter createBatchWriter(String table) throws AccumuloSecurityException, AccumuloException, TableNotFoundException, TableExistsException { final BatchWriterConfig bwConfig = new BatchWriterConfig(); // buffer max 100kb ( 100 * 1024 = 102400) bwConfig.setMaxMemory(102400); // buffer max 10 seconds bwConfig.setMaxLatency(10, TimeUnit.SECONDS); // ensure persistance bwConfig.setDurability(Durability.SYNC); // build the accumulo connector Instance inst = new ZooKeeperInstance(cfg.accumuloInstanceName, cfg.accumuloZookeeper); conn = inst.getConnector(cfg.accumuloUser, new PasswordToken(cfg.accumuloPassword)); Authorizations auths = new Authorizations(AccumuloIdentifiers.AUTHORIZATION.toString()); // create the table if not already existent TableOperations tableOpts = conn.tableOperations(); try{ tableOpts.create(table); } catch(Exception e) {} // build and return the batchwriter return conn.createBatchWriter(table, bwConfig); }
Example #2
Source File: IngestMetricsSummaryLoader.java From datawave with Apache License 2.0 | 6 votes |
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); String user = conf.get(MetricsConfig.USER); String password = conf.get(MetricsConfig.PASS); String instance = conf.get(MetricsConfig.INSTANCE); String zookeepers = conf.get(MetricsConfig.ZOOKEEPERS); useHourlyPrecision = HourlyPrecisionHelper.checkForHourlyPrecisionOption(context.getConfiguration(), log); try { ZooKeeperInstance inst = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instance).withZkHosts(zookeepers)); Connector con = inst.getConnector(user, new PasswordToken(password)); ingestScanner = con.createScanner(conf.get(MetricsConfig.INGEST_TABLE, MetricsConfig.DEFAULT_INGEST_TABLE), Authorizations.EMPTY); } catch (TableNotFoundException | AccumuloException | AccumuloSecurityException e) { throw new IOException(e); } }
Example #3
Source File: AccumuloQueryRunner.java From presto with Apache License 2.0 | 6 votes |
/** * Gets the AccumuloConnector singleton, starting the MiniAccumuloCluster on initialization. * This singleton instance is required so all test cases access the same MiniAccumuloCluster. * * @return Accumulo connector */ public static Connector getAccumuloConnector() { if (connector != null) { return connector; } try { MiniAccumuloCluster accumulo = createMiniAccumuloCluster(); Instance instance = new ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers()); connector = instance.getConnector(MAC_USER, new PasswordToken(MAC_PASSWORD)); LOG.info("Connection to MAC instance %s at %s established, user %s password %s", accumulo.getInstanceName(), accumulo.getZooKeepers(), MAC_USER, MAC_PASSWORD); return connector; } catch (AccumuloException | AccumuloSecurityException | InterruptedException | IOException e) { throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to get connector to Accumulo", e); } }
Example #4
Source File: RawTwitterDataSink.java From OSTMap with Apache License 2.0 | 6 votes |
/** * creates a batchwriter to write data to accumulo * * @param table to write data into * @return a ready to user batch writer object * @throws AccumuloSecurityException * @throws AccumuloException * @throws TableNotFoundException */ private BatchWriter createBatchWriter(String table) throws AccumuloSecurityException, AccumuloException, TableNotFoundException, TableExistsException { final BatchWriterConfig bwConfig = new BatchWriterConfig(); // buffer max 100kb ( 100 * 1024 = 102400) bwConfig.setMaxMemory(102400); // buffer max 10 seconds bwConfig.setMaxLatency(10, TimeUnit.SECONDS); // ensure persistance bwConfig.setDurability(Durability.SYNC); // build the accumulo connector Instance inst = new ZooKeeperInstance(cfg.accumuloInstanceName, cfg.accumuloZookeeper); conn = inst.getConnector(cfg.accumuloUser, new PasswordToken(cfg.accumuloPassword)); Authorizations auths = new Authorizations(AccumuloIdentifiers.AUTHORIZATION.toString()); // create the table if not already existent TableOperations tableOpts = conn.tableOperations(); try{ tableOpts.create(table); } catch(Exception e) {} // build and return the batchwriter return conn.createBatchWriter(table, bwConfig); }
Example #5
Source File: MapReduceStatePersisterTest.java From datawave with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { System.setProperty(NpeUtils.NPE_OU_PROPERTY, "iamnotaperson"); System.setProperty("dw.metadatahelper.all.auths", "A,B,C,D"); connection = instance.getConnector("root", new PasswordToken("")); if (connection.tableOperations().exists(TABLE_NAME)) connection.tableOperations().delete(TABLE_NAME); if (connection.tableOperations().exists(INDEX_TABLE_NAME)) connection.tableOperations().delete(INDEX_TABLE_NAME); DatawaveUser user = new DatawaveUser(SubjectIssuerDNPair.of(userDN, "CN=ca, OU=acme"), UserType.USER, Arrays.asList(auths), null, null, 0L); principal = new DatawavePrincipal(Collections.singletonList(user)); connectionFactory = createMock(AccumuloConnectionFactory.class); ctx = createStrictMock(EJBContext.class); bean = new MapReduceStatePersisterBean(); field(MapReduceStatePersisterBean.class, "connectionFactory").set(bean, connectionFactory); field(MapReduceStatePersisterBean.class, "ctx").set(bean, ctx); Logger.getLogger(MapReduceStatePersisterBean.class).setLevel(Level.OFF); }
Example #6
Source File: DataStore.java From qonduit with Apache License 2.0 | 6 votes |
public DataStore(Configuration conf) throws QonduitException { try { final HashMap<String, String> apacheConf = new HashMap<>(); Configuration.Accumulo accumuloConf = conf.getAccumulo(); apacheConf.put("instance.name", accumuloConf.getInstanceName()); apacheConf.put("instance.zookeeper.host", accumuloConf.getZookeepers()); final ClientConfiguration aconf = ClientConfiguration.fromMap(apacheConf); final Instance instance = new ZooKeeperInstance(aconf); connector = instance.getConnector(accumuloConf.getUsername(), new PasswordToken(accumuloConf.getPassword())); } catch (Exception e) { throw new QonduitException(HttpResponseStatus.INTERNAL_SERVER_ERROR.code(), "Error creating DataStoreImpl", e.getMessage(), e); } }
Example #7
Source File: GetMetricTableSplitPoints.java From timely with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { try (ConfigurableApplicationContext ctx = new SpringApplicationBuilder(SpringBootstrap.class) .bannerMode(Mode.OFF).web(WebApplicationType.NONE).run(args)) { Configuration conf = ctx.getBean(Configuration.class); final Map<String, String> properties = new HashMap<>(); Accumulo accumuloConf = conf.getAccumulo(); properties.put("instance.name", accumuloConf.getInstanceName()); properties.put("instance.zookeeper.host", accumuloConf.getZookeepers()); final ClientConfiguration aconf = ClientConfiguration.fromMap(properties); final Instance instance = new ZooKeeperInstance(aconf); Connector con = instance.getConnector(accumuloConf.getUsername(), new PasswordToken(accumuloConf.getPassword())); Scanner s = con.createScanner(conf.getMetaTable(), con.securityOperations().getUserAuthorizations(con.whoami())); try { s.setRange(new Range(Meta.METRIC_PREFIX, true, Meta.TAG_PREFIX, false)); for (Entry<Key, Value> e : s) { System.out.println(e.getKey().getRow().toString().substring(Meta.METRIC_PREFIX.length())); } } finally { s.close(); } } }
Example #8
Source File: TabletMetadataConsole.java From timely with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { try (ConfigurableApplicationContext ctx = new SpringApplicationBuilder(SpringBootstrap.class) .bannerMode(Banner.Mode.OFF).web(WebApplicationType.NONE).run(args)) { Configuration conf = ctx.getBean(Configuration.class); HashMap<String, String> apacheConf = new HashMap<>(); Accumulo accumuloConf = conf.getAccumulo(); apacheConf.put("instance.name", accumuloConf.getInstanceName()); apacheConf.put("instance.zookeeper.host", accumuloConf.getZookeepers()); ClientConfiguration aconf = ClientConfiguration.fromMap(apacheConf); Instance instance = new ZooKeeperInstance(aconf); Connector con = instance.getConnector(accumuloConf.getUsername(), new PasswordToken(accumuloConf.getPassword())); TabletMetadataQuery query = new TabletMetadataQuery(con, conf.getMetricsTable()); TabletMetadataView view = query.run(); System.out.println(view.toText(TimeUnit.DAYS)); } }
Example #9
Source File: ExamplesIT.java From accumulo-examples with Apache License 2.0 | 6 votes |
@Before public void setupTest() throws Exception { c = Accumulo.newClient().from(getClientProps()).build(); String user = c.whoami(); String instance = getClientInfo().getInstanceName(); String keepers = getClientInfo().getZooKeepers(); AuthenticationToken token = getAdminToken(); if (token instanceof PasswordToken) { String passwd = new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8); writeClientPropsFile(getClientPropsFile(), instance, keepers, user, passwd); } else { Assert.fail("Unknown token type: " + token); } fs = getCluster().getFileSystem(); dir = new Path(cluster.getTemporaryPath(), getClass().getName()).toString(); origAuths = c.securityOperations().getUserAuthorizations(user); c.securityOperations().changeUserAuthorizations(user, new Authorizations(auths.split(","))); }
Example #10
Source File: FlinkEnvManager.java From OSTMap with Apache License 2.0 | 6 votes |
/** * creates output format to write data from flink DataSet to accumulo * @return * @throws AccumuloSecurityException */ public HadoopOutputFormat getHadoopOF() throws AccumuloSecurityException, IOException { if(job == null){ job = Job.getInstance(new Configuration(), jobName); } AccumuloOutputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword)); ClientConfiguration clientConfig = new ClientConfiguration(); clientConfig.withInstance(accumuloInstanceName); clientConfig.withZkHosts(accumuloZookeeper); AccumuloOutputFormat.setZooKeeperInstance(job, clientConfig); AccumuloOutputFormat.setDefaultTableName(job, outTable); AccumuloFileOutputFormat.setOutputPath(job,new Path("/tmp")); HadoopOutputFormat<Text, Mutation> hadoopOF = new HadoopOutputFormat<>(new AccumuloOutputFormat() , job); return hadoopOF; }
Example #11
Source File: LanguageFrequencySink.java From OSTMap with Apache License 2.0 | 6 votes |
/** * creates a batchwriter to write data to accumulo * * @param table to write data into * @return a ready to user batch writer object * @throws AccumuloSecurityException * @throws AccumuloException * @throws TableNotFoundException */ private BatchWriter createBatchWriter(String table) throws AccumuloSecurityException, AccumuloException, TableNotFoundException, TableExistsException { final BatchWriterConfig bwConfig = new BatchWriterConfig(); // buffer max 100kb ( 100 * 1024 = 102400) bwConfig.setMaxMemory(102400); // buffer max 10 seconds bwConfig.setMaxLatency(10, TimeUnit.SECONDS); // ensure persistance bwConfig.setDurability(Durability.SYNC); // build the accumulo connector Instance inst = new ZooKeeperInstance(cfg.accumuloInstanceName, cfg.accumuloZookeeper); conn = inst.getConnector(cfg.accumuloUser, new PasswordToken(cfg.accumuloPassword)); Authorizations auths = new Authorizations(AccumuloIdentifiers.AUTHORIZATION.toString()); // create the table if not already existent TableOperations tableOpts = conn.tableOperations(); try{ tableOpts.create(table); } catch(Exception e) {} // build and return the batchwriter return conn.createBatchWriter(table, bwConfig); }
Example #12
Source File: AccumuloService.java From OSTMap with Apache License 2.0 | 5 votes |
/** * builds a accumulo connector * * @return the ready to use connector * @throws AccumuloSecurityException * @throws AccumuloException */ public Connector getConnector() throws AccumuloSecurityException, AccumuloException { // build the accumulo connector Instance inst = new ZooKeeperInstance(accumuloInstanceName, accumuloZookeeper); Connector conn = inst.getConnector(accumuloUser, new PasswordToken(accumuloPassword)); Authorizations auths = new Authorizations(AccumuloIdentifiers.AUTHORIZATION.toString()); return conn; }
Example #13
Source File: QueriesTableAgeOffIteratorTest.java From datawave with Apache License 2.0 | 5 votes |
@Test public void testAgeOffIterator() throws Exception { InMemoryInstance instance = new InMemoryInstance(); Connector connector = instance.getConnector("root", new PasswordToken("")); connector.tableOperations().create(TABLE_NAME); IteratorSetting iteratorCfg = new IteratorSetting(19, "ageoff", QueriesTableAgeOffIterator.class); connector.tableOperations().attachIterator(TABLE_NAME, iteratorCfg, EnumSet.allOf(IteratorScope.class)); long now = System.currentTimeMillis(); // Write in a couple of keys with varying timestamps BatchWriter writer = connector.createBatchWriter(TABLE_NAME, new BatchWriterConfig().setMaxLatency(30, TimeUnit.MILLISECONDS).setMaxMemory(1024L) .setMaxWriteThreads(1)); Mutation m1 = new Mutation("row1"); m1.put("colf1", "colq1", now, ""); writer.addMutation(m1); Mutation m2 = new Mutation("row2"); m2.put("colf2", "colq2", (now + 100000), ""); writer.addMutation(m2); writer.close(); // Scan the entire table, we should only see keys whose timestamps are greater than or equal to now. // Mutation 1 should be expired by now, we should only see Mutation 2; boolean sawRow2 = false; Scanner scanner = connector.createScanner(TABLE_NAME, new Authorizations()); for (Entry<Key,Value> entry : scanner) { if (entry.getKey().getRow().toString().equals("row1")) Assert.fail("We saw row1 when it should be expired."); if (entry.getKey().getRow().toString().equals("row2")) sawRow2 = true; } if (!sawRow2) Assert.fail("We did not see row2 and we should have"); }
Example #14
Source File: SentimentDataSink.java From OSTMap with Apache License 2.0 | 5 votes |
private BatchWriter createBatchWriter(String tableName) throws AccumuloSecurityException, AccumuloException, TableNotFoundException { final BatchWriterConfig batchWriterConfig = new BatchWriterConfig(); batchWriterConfig.setMaxMemory(102400); batchWriterConfig.setMaxLatency(10, TimeUnit.SECONDS); batchWriterConfig.setDurability(Durability.SYNC); Instance instance = new ZooKeeperInstance( sinkConfiguration.accumuloInstanceName, sinkConfiguration.accumuloZookeeper); connector = instance.getConnector( sinkConfiguration.accumuloUser, new PasswordToken(sinkConfiguration.accumuloPassword)); Authorizations authorizations = new Authorizations(AccumuloIdentifiers.AUTHORIZATION.toString()); TableOperations tableOperations = connector.tableOperations(); try { if (!tableOperations.exists(tableName)) { tableOperations.create(tableName); TreeSet<Text> textTreeSet = new TreeSet<>(); for (int i = 0; i <= 255; i++) { byte[] bytes = {(byte) i}; textTreeSet.add(new Text(bytes)); } tableOperations.addSplits(tableName, textTreeSet); } } catch (Exception e) { log.error(e); } return connector.createBatchWriter(tableName, batchWriterConfig); }
Example #15
Source File: RunningQueryTest.java From datawave with Apache License 2.0 | 5 votes |
@Test public void testWithCompositeQueryLogic() throws Exception { // setup InMemoryInstance instance = new InMemoryInstance("test instance"); Connector connector = instance.getConnector("root", new PasswordToken("")); // expected merged auths String[] auths = new String[2]; auths[0] = "A"; auths[1] = "C"; List<BaseQueryLogic<?>> logics = new ArrayList<>(); TestQueryLogic logic1 = new TestQueryLogic(); HashSet<String> roles = new HashSet<>(); roles.add("NONTESTROLE"); logic1.setTableName("thisTable"); logic1.setRoleManager(new DatawaveRoleManager(roles)); CompositeQueryLogicTest.TestQueryLogic2 logic2 = new CompositeQueryLogicTest.TestQueryLogic2(); HashSet<String> roles2 = new HashSet<>(); roles2.add("NONTESTROLE"); logic2.setTableName("thatTable"); logic2.setRoleManager(new DatawaveRoleManager(roles2)); logics.add(logic1); logics.add(logic2); CompositeQueryLogic compositeQueryLogic = new CompositeQueryLogic(); compositeQueryLogic.setQueryLogics(logics); DatawaveUser user = new DatawaveUser(userDN, UserType.USER, Arrays.asList(auths), null, null, 0L); DatawavePrincipal principal = new DatawavePrincipal(Collections.singletonList(user)); try { RunningQuery query = new RunningQuery(connector, connectionPriority, compositeQueryLogic, settings, null, principal, new QueryMetricFactoryImpl()); } catch (NullPointerException npe) { Assert.fail("NullPointer encountered. This could be caused by configuration being null. Check logic.initialize() "); } }
Example #16
Source File: FlinkEnvManager.java From OSTMap with Apache License 2.0 | 5 votes |
/** * makes accumulo input accessible by flink DataSet api * @param env * @return * @throws IOException * @throws AccumuloSecurityException */ public DataSet<Tuple2<Key,Value>> getDataFromAccumulo(ExecutionEnvironment env) throws IOException, AccumuloSecurityException { job = Job.getInstance(new Configuration(), jobName); AccumuloInputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword)); AccumuloInputFormat.setScanAuthorizations(job, new Authorizations(AccumuloIdentifiers.AUTHORIZATION.toString())); ClientConfiguration clientConfig = new ClientConfiguration(); clientConfig.withInstance(accumuloInstanceName); clientConfig.withZkHosts(accumuloZookeeper); AccumuloInputFormat.setZooKeeperInstance(job, clientConfig); AccumuloInputFormat.setInputTableName(job, inTable); return env.createHadoopInput(new AccumuloInputFormat(),Key.class,Value.class, job); }
Example #17
Source File: RunningQueryTest.java From datawave with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Test public void testConstructorSetsConnection() throws Exception { DatawaveUser user = new DatawaveUser(userDN, UserType.USER, null, null, null, 0L); DatawavePrincipal principal = new DatawavePrincipal(Collections.singletonList(user)); // setup mock connector InMemoryInstance instance = new InMemoryInstance("test instance"); Connector connector = instance.getConnector("root", new PasswordToken("")); // setup mock logic, handles the setConnection method SampleGenericQueryConfiguration config = new SampleGenericQueryConfiguration(); expect(logic.initialize(anyObject(), anyObject(), anyObject())).andReturn(config); logic.setupQuery(config); TransformIterator iter = new TransformIterator(); expect(logic.getCollectQueryMetrics()).andReturn(Boolean.FALSE); expect(logic.getTransformIterator(settings)).andReturn(iter); replay(logic); RunningQuery query = new RunningQuery(connector, connectionPriority, logic, settings, methodAuths, principal, new QueryMetricFactoryImpl()); verify(logic); // extra tests to verify setConnection worked. Would rather mock and don't really like multiple asserts per test, but there is too much setup assertEquals(connector, query.getConnection()); assertEquals(iter, query.getTransformIterator()); }
Example #18
Source File: MetricsTableConfigHelperTest.java From datawave with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { tops = new InMemoryInstance().getConnector("user", new PasswordToken("pass")).tableOperations(); String tableName = MetricsConfiguration.getTable(conf); tops.create(tableName); configHelper = new MetricsTableConfigHelper(); configHelper.setup(tableName, conf, logger); }
Example #19
Source File: AccumuloCounterSource.java From datawave with Apache License 2.0 | 5 votes |
public AccumuloCounterSource(String instanceStr, String zookeepers, String username, String password, String table) throws AccumuloException, AccumuloSecurityException { ZooKeeperInstance instance = new ZooKeeperInstance(instanceStr, zookeepers); connector = instance.getConnector(username, new PasswordToken(password)); queryTable = table; this.username = username; }
Example #20
Source File: AccumuloHelper.java From datawave with Apache License 2.0 | 5 votes |
public void setup(Configuration config) throws IllegalArgumentException { username = ConfigurationHelper.isNull(config, USERNAME, String.class); byte[] pw = Base64.decodeBase64(ConfigurationHelper.isNull(config, PASSWORD, String.class).getBytes()); password = new PasswordToken(pw); instanceName = ConfigurationHelper.isNull(config, INSTANCE_NAME, String.class); zooKeepers = ConfigurationHelper.isNull(config, ZOOKEEPERS, String.class); }
Example #21
Source File: AccumuloRecordWriter.java From datawave with Apache License 2.0 | 5 votes |
public AccumuloRecordWriter(AccumuloConnectionFactory connectionFactory, Configuration conf) throws AccumuloException, AccumuloSecurityException, IOException { Level l = getLogLevel(conf); if (l != null) { log.setLevel(getLogLevel(conf)); } this.simulate = getSimulationMode(conf); this.createTables = canCreateTables(conf); if (simulate) { log.info("Simulating output only. No writes to tables will occur"); } this.bws = new HashMap<>(); String tname = getDefaultTableName(conf); this.defaultTableName = (tname == null) ? null : new Text(tname); if (!simulate) { try { if (connectionFactory == null) { this.conn = getInstance(conf).getConnector(getUsername(conf), new PasswordToken(getPassword(conf))); } else { this.connFactory = connectionFactory; Map<String,String> trackingMap = connectionFactory.getTrackingMap(Thread.currentThread().getStackTrace()); this.conn = connectionFactory.getConnection(Priority.ADMIN, trackingMap); } mtbw = conn.createMultiTableBatchWriter(getMaxMutationBufferSize(conf), getMaxLatency(conf), getMaxWriteThreads(conf)); } catch (Exception e) { log.error(e.getMessage(), e); } } }
Example #22
Source File: CardinalityScanner.java From datawave with Apache License 2.0 | 5 votes |
public Set<CardinalityIntersectionRecord> scanCardinalities(List<String> fields, DateAggregationType dateAggregationType, DatatypeAggregationType datatypeAggregationType) throws Exception { Map<CardinalityIntersectionRecord,HyperLogLogPlus> cardinalityMap = new TreeMap<>(); Scanner scanner = null; try { ZooKeeperInstance instance = new ZooKeeperInstance(config.getInstanceName(), config.getZookeepers()); Connector connector = instance.getConnector(config.getUsername(), new PasswordToken(config.getPassword())); Collection<Authorizations> authCollection = Collections.singleton(new Authorizations(config.getAuths().split(","))); if (!connector.tableOperations().exists(config.getTableName())) { throw new IllegalArgumentException("Table " + config.getTableName() + " does not exist"); } scanner = ScannerHelper.createScanner(connector, config.getTableName(), authCollection); Range r = new Range(config.getBeginDate(), config.getEndDate() + "\0"); scanner.setRange(r); Iterator<Map.Entry<Key,Value>> itr = scanner.iterator(); while (itr.hasNext()) { Map.Entry<Key,Value> nextEntry = itr.next(); Key key = nextEntry.getKey(); String field = key.getColumnFamily().toString(); if (fields != null && !fields.isEmpty() && !fields.contains(field)) { continue; } else { addEntry(cardinalityMap, nextEntry, dateAggregationType, datatypeAggregationType); } } } catch (Exception e) { log.error(e); } finally { if (scanner != null) { scanner.close(); } } return cardinalityMap.keySet(); }
Example #23
Source File: BulkInputFormat.java From datawave with Apache License 2.0 | 5 votes |
/** * Check whether a configuration is fully configured to be used with an Accumulo {@link org.apache.hadoop.mapreduce.InputFormat}. * * @param conf * the Hadoop configuration object * @throws IOException * if the configuration is improperly configured */ protected static void validateOptions(Configuration conf) throws IOException { if (!conf.getBoolean(INPUT_INFO_HAS_BEEN_SET, false)) throw new IOException("Input info has not been set."); if (!conf.getBoolean(INSTANCE_HAS_BEEN_SET, false)) throw new IOException("Instance info has not been set."); /* * if (conf.get(RACKSTRATEGY) == null) { throw new IOException("Rack strategy must be set."); } */ // validate that we can connect as configured try { Connector c = getInstance(conf).getConnector(getUsername(conf), new PasswordToken(getPassword(conf))); if (!c.securityOperations().authenticateUser(getUsername(conf), new PasswordToken(getPassword(conf)))) throw new IOException("Unable to authenticate user"); if (!c.securityOperations().hasTablePermission(getUsername(conf), getTablename(conf), TablePermission.READ)) throw new IOException("Unable to access table"); if (!usesLocalIterators(conf)) { // validate that any scan-time iterators can be loaded by the the tablet servers for (AccumuloIterator iter : getIterators(conf)) { if (!c.tableOperations().testClassLoad(getTablename(conf), iter.getIteratorClass(), SortedKeyValueIterator.class.getName()) && !c.instanceOperations().testClassLoad(iter.getIteratorClass(), SortedKeyValueIterator.class.getName())) throw new AccumuloException("Servers are unable to load " + iter.getIteratorClass() + " as a " + SortedKeyValueIterator.class.getName()); } } } catch (AccumuloException | AccumuloSecurityException | TableNotFoundException e) { throw new IOException(e); } }
Example #24
Source File: QueryTestTableHelper.java From datawave with Apache License 2.0 | 5 votes |
public QueryTestTableHelper(String instanceName, Logger log, RebuildingScannerTestHelper.TEARDOWN teardown, RebuildingScannerTestHelper.INTERRUPT interrupt) throws AccumuloSecurityException, AccumuloException, TableExistsException, TableNotFoundException { // create mock instance and connector InMemoryInstance i = new InMemoryInstance(instanceName); this.connector = RebuildingScannerTestHelper.getConnector(i, "root", new PasswordToken(""), teardown, interrupt); this.log = log; createTables(); }
Example #25
Source File: DiscoveryIteratorTest.java From datawave with Apache License 2.0 | 5 votes |
@Test public void testHappyPath() throws Throwable { Connector con = new InMemoryInstance("DiscoveryIteratorTest").getConnector("root", new PasswordToken("")); con.tableOperations().create("index"); writeSample(con.createBatchWriter("index", new BatchWriterConfig().setMaxLatency(0, TimeUnit.SECONDS).setMaxMemory(0).setMaxWriteThreads(1))); Scanner s = con.createScanner("index", new Authorizations("FOO")); s.addScanIterator(new IteratorSetting(50, DiscoveryIterator.class)); s.setRange(new Range()); Iterator<Map.Entry<Key,Value>> itr = s.iterator(); assertTrue(itr.hasNext()); Map.Entry<Key,Value> e = itr.next(); assertFalse(itr.hasNext()); Key key = e.getKey(); assertEquals("term", key.getRow().toString()); assertEquals("field", key.getColumnFamily().toString()); // see DiscoveryIterator for why this has a max unsigned char tacked on the end assertEquals("20130101\uffff", key.getColumnQualifier().toString()); Value value = e.getValue(); assertTrue(value.getSize() > 0); DataInputBuffer in = new DataInputBuffer(); in.reset(value.get(), value.getSize()); ArrayWritable valWrapper = new ArrayWritable(DiscoveredThing.class); valWrapper.readFields(in); Writable[] values = valWrapper.get(); assertEquals(3, values.length); Set<String> types = Sets.newHashSet("t1", "t2", "t3"); for (int i = 0; i < 3; ++i) { DiscoveredThing thing = (DiscoveredThing) values[i]; assertEquals("term", thing.getTerm()); assertEquals("field", thing.getField()); assertTrue(types.remove(thing.getType())); assertEquals("20130101", thing.getDate()); assertEquals("FOO", thing.getColumnVisibility()); assertEquals(240L, thing.getCount()); } }
Example #26
Source File: MetricsDailySummaryReducer.java From datawave with Apache License 2.0 | 5 votes |
public static void configureJob(Job job, int numDays, String instance, String zookeepers, String userName, String password, String outputTable) throws AccumuloSecurityException { job.setNumReduceTasks(Math.min(numDays, 100)); // Cap the number of reducers at 100, just in case we have a large day range (shouldn't really happen // though) job.setReducerClass(MetricsDailySummaryReducer.class); job.setOutputFormatClass(AccumuloOutputFormat.class); AccumuloOutputFormat.setZooKeeperInstance(job, ClientConfiguration.loadDefault().withInstance(instance).withZkHosts(zookeepers)); AccumuloOutputFormat.setConnectorInfo(job, userName, new PasswordToken(password)); AccumuloOutputFormat.setCreateTables(job, true); AccumuloOutputFormat.setDefaultTableName(job, outputTable); }
Example #27
Source File: TestCardinalityWithQuery.java From datawave with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { instance = new InMemoryInstance(); connector = instance.getConnector("root", new PasswordToken("")); SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd"); Date date = format.parse("20190101"); timestamp = date.getTime(); connector.tableOperations().create(SHARD_TABLE_NAME); connector.tableOperations().create(SHARD_INDEX_TABLE_NAME); connector.tableOperations().create(METADATA_TABLE_NAME); }
Example #28
Source File: DateIndexHelperTest.java From datawave with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { // Set logging levels Logger.getRootLogger().setLevel(Level.OFF); TimeZone.setDefault(TimeZone.getTimeZone("GMT")); System.setProperty("file.encoding", "UTF8"); // create mock instance and connector InMemoryInstance i = new InMemoryInstance(DateIndexHelperTest.class.getName()); connector = i.getConnector("root", new PasswordToken("")); recordWriter = new MockAccumuloRecordWriter(); TableOperations tops = connector.tableOperations(); tops.create(TableName.DATE_INDEX); tops = connector.tableOperations(); tops.create("FOO_TABLE"); // unused except for testing the cache key BatchWriterConfig bwCfg = new BatchWriterConfig().setMaxLatency(1, TimeUnit.SECONDS).setMaxMemory(1000L).setMaxWriteThreads(1); recordWriter.addWriter(new Text(TableName.DATE_INDEX), connector.createBatchWriter(TableName.DATE_INDEX, bwCfg)); // intialize some mappings write("20100101", new int[] {1}, "test", "LOADED", "LOAD_DATE", "20100102", "A"); write("20100102", new int[] {5}, "test", "LOADED", "LOAD_DATE", "20100102", "BB"); write("20100103", new int[] {1, 3}, "test", "LOADED", "LOAD_DATE", "20100104", "CCCC"); dumpTable(auths); }
Example #29
Source File: MockMetadataHelper.java From datawave with Apache License 2.0 | 5 votes |
private static Connector getConnector() { try { return new InMemoryInstance().getConnector("root", new PasswordToken("")); } catch (AccumuloException | AccumuloSecurityException e) { throw new RuntimeException(e); } }
Example #30
Source File: AccumuloConnectionFactoryBean.java From datawave with Apache License 2.0 | 5 votes |
private void setupMockAccumuloUser(ConnectionPoolConfiguration conf, AccumuloConnectionPool pool, HashMap<String,Pair<String,PasswordToken>> instances) throws Exception { Connector c = null; try { c = pool.borrowObject(new HashMap<>()); Pair<String,PasswordToken> pair = instances.get(cache.getInstance().getInstanceID()); String user = "root"; PasswordToken password = new PasswordToken(new byte[0]); if (pair != null && user.equals(pair.getFirst())) password = pair.getSecond(); SecurityOperations security = cache.getInstance().getConnector(user, password).securityOperations(); Set<String> users = security.listLocalUsers(); if (!users.contains(conf.getUsername())) { security.createLocalUser(conf.getUsername(), new PasswordToken(conf.getPassword())); security.changeUserAuthorizations(conf.getUsername(), c.securityOperations().getUserAuthorizations(conf.getUsername())); } else { PasswordToken newPassword = new PasswordToken(conf.getPassword()); // If we're changing root's password, and trying to change then keep track of that. If we have multiple instances // that specify mismatching passwords, then throw an error. if (user.equals(conf.getUsername())) { if (pair != null && !newPassword.equals(pair.getSecond())) throw new IllegalStateException( "Invalid AccumuloConnectionFactoryBean configuration--multiple pools are configured with different root passwords!"); instances.put(cache.getInstance().getInstanceID(), new Pair<>(conf.getUsername(), newPassword)); } // match root's password on mock to the password on the actual Accumulo instance security.changeLocalUserPassword(conf.getUsername(), newPassword); } } finally { pool.returnObject(c); } }