Java Code Examples for org.apache.cassandra.hadoop.ConfigHelper#setOutputInitialAddress()
The following examples show how to use
org.apache.cassandra.hadoop.ConfigHelper#setOutputInitialAddress() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRingCache.java From stratio-cassandra with Apache License 2.0 | 6 votes |
private void setup(String server, int port) throws Exception { /* Establish a thrift connection to the cassandra instance */ TSocket socket = new TSocket(server, port); System.out.println(" connected to " + server + ":" + port + "."); TBinaryProtocol binaryProtocol = new TBinaryProtocol(new TFramedTransport(socket)); Cassandra.Client cassandraClient = new Cassandra.Client(binaryProtocol); socket.open(); thriftClient = cassandraClient; String seed = DatabaseDescriptor.getSeeds().iterator().next().getHostAddress(); conf = new Configuration(); ConfigHelper.setOutputPartitioner(conf, DatabaseDescriptor.getPartitioner().getClass().getName()); ConfigHelper.setOutputInitialAddress(conf, seed); ConfigHelper.setOutputRpcPort(conf, Integer.toString(DatabaseDescriptor.getRpcPort())); }
Example 2
Source File: CassandraParams.java From hdfs2cass with Apache License 2.0 | 6 votes |
private void configure(final JobConf conf) { ConfigHelper.setOutputInitialAddress(conf, this.getSeedNodeHost()); CrunchConfigHelper.setOutputColumnFamily(conf, this.getKeyspace(), this.getColumnFamily()); ConfigHelper.setOutputPartitioner(conf, this.getPartitioner()); if (this.getStreamThrottleMBits().isPresent()) { conf.set("mapreduce.output.bulkoutputformat.streamthrottlembits", this.getStreamThrottleMBits().get().toString()); } if (this.getCompressionClass().isPresent()) { ConfigHelper.setOutputCompressionClass(conf, this.getCompressionClass().get()); } if (this.getRpcPort().isPresent()) { ConfigHelper.setOutputRpcPort(conf, String.valueOf(this.getRpcPort().get())); } conf.setJarByClass(BulkLoader.class); }
Example 3
Source File: CqlNativeStorage.java From stratio-cassandra with Apache License 2.0 | 5 votes |
/** set store configuration settings */ public void setStoreLocation(String location, Job job) throws IOException { conf = HadoopCompat.getConfiguration(job); setLocationFromUri(location); if (username != null && password != null) ConfigHelper.setOutputKeyspaceUserNameAndPassword(conf, username, password); if (splitSize > 0) ConfigHelper.setInputSplitSize(conf, splitSize); if (partitionerClass!= null) ConfigHelper.setOutputPartitioner(conf, partitionerClass); if (rpcPort != null) { ConfigHelper.setOutputRpcPort(conf, rpcPort); ConfigHelper.setInputRpcPort(conf, rpcPort); } if (initHostAddress != null) { ConfigHelper.setOutputInitialAddress(conf, initHostAddress); ConfigHelper.setInputInitialAddress(conf, initHostAddress); } ConfigHelper.setOutputColumnFamily(conf, keyspace, column_family); CqlConfigHelper.setOutputCql(conf, outputQuery); setConnectionInformation(); if (ConfigHelper.getOutputRpcPort(conf) == 0) throw new IOException("PIG_OUTPUT_RPC_PORT or PIG_RPC_PORT environment variable not set"); if (ConfigHelper.getOutputInitialAddress(conf) == null) throw new IOException("PIG_OUTPUT_INITIAL_ADDRESS or PIG_INITIAL_ADDRESS environment variable not set"); if (ConfigHelper.getOutputPartitioner(conf) == null) throw new IOException("PIG_OUTPUT_PARTITIONER or PIG_PARTITIONER environment variable not set"); initSchema(storeSignature); }
Example 4
Source File: WordCount.java From stratio-cassandra with Apache License 2.0 | 4 votes |
public int run(String[] args) throws Exception { String outputReducerType = "filesystem"; if (args != null && args[0].startsWith(OUTPUT_REDUCER_VAR)) { String[] s = args[0].split("="); if (s != null && s.length == 2) outputReducerType = s[1]; } logger.info("output reducer type: " + outputReducerType); // use a smaller page size that doesn't divide the row count evenly to exercise the paging logic better ConfigHelper.setRangeBatchSize(getConf(), 99); for (int i = 0; i < WordCountSetup.TEST_COUNT; i++) { String columnName = "text" + i; Job job = new Job(getConf(), "wordcount"); job.setJarByClass(WordCount.class); job.setMapperClass(TokenizerMapper.class); if (outputReducerType.equalsIgnoreCase("filesystem")) { job.setCombinerClass(ReducerToFilesystem.class); job.setReducerClass(ReducerToFilesystem.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH_PREFIX + i)); } else { job.setReducerClass(ReducerToCassandra.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); job.setOutputKeyClass(ByteBuffer.class); job.setOutputValueClass(List.class); job.setOutputFormatClass(ColumnFamilyOutputFormat.class); ConfigHelper.setOutputColumnFamily(job.getConfiguration(), KEYSPACE, OUTPUT_COLUMN_FAMILY); job.getConfiguration().set(CONF_COLUMN_NAME, "sum"); } job.setInputFormatClass(ColumnFamilyInputFormat.class); ConfigHelper.setInputRpcPort(job.getConfiguration(), "9160"); ConfigHelper.setInputInitialAddress(job.getConfiguration(), "localhost"); ConfigHelper.setInputPartitioner(job.getConfiguration(), "Murmur3Partitioner"); ConfigHelper.setInputColumnFamily(job.getConfiguration(), KEYSPACE, COLUMN_FAMILY); SlicePredicate predicate = new SlicePredicate().setColumn_names(Arrays.asList(ByteBufferUtil.bytes(columnName))); ConfigHelper.setInputSlicePredicate(job.getConfiguration(), predicate); if (i == 4) { IndexExpression expr = new IndexExpression(ByteBufferUtil.bytes("int4"), IndexOperator.EQ, ByteBufferUtil.bytes(0)); ConfigHelper.setInputRange(job.getConfiguration(), Arrays.asList(expr)); } if (i == 5) { // this will cause the predicate to be ignored in favor of scanning everything as a wide row ConfigHelper.setInputColumnFamily(job.getConfiguration(), KEYSPACE, COLUMN_FAMILY, true); } ConfigHelper.setOutputInitialAddress(job.getConfiguration(), "localhost"); ConfigHelper.setOutputPartitioner(job.getConfiguration(), "Murmur3Partitioner"); job.waitForCompletion(true); } return 0; }