Java Code Examples for kafka.javaapi.producer.Producer#send()
The following examples show how to use
kafka.javaapi.producer.Producer#send() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SimpleKafkaPublisher.java From twill with Apache License 2.0 | 6 votes |
@Override public ListenableFuture<Integer> send() { try { int size = messages.size(); Producer<Integer, ByteBuffer> kafkaProducer = producer.get(); if (kafkaProducer == null) { return Futures.immediateFailedFuture(new IllegalStateException("No kafka producer available.")); } kafkaProducer.send(messages); return Futures.immediateFuture(size); } catch (Exception e) { return Futures.immediateFailedFuture(e); } finally { messages.clear(); } }
Example 2
Source File: StageToKafkaDriver.java From geowave with Apache License 2.0 | 6 votes |
@Override protected void processFile( final URL file, final String typeName, final GeoWaveAvroFormatPlugin<?, ?> plugin, final StageKafkaData<?> runData) { try { final Producer<String, Object> producer = (Producer<String, Object>) runData.getProducer(typeName, plugin); try (final CloseableIterator<?> avroRecords = plugin.toAvroObjects(file)) { while (avroRecords.hasNext()) { final Object avroRecord = avroRecords.next(); final KeyedMessage<String, Object> data = new KeyedMessage<>(typeName, avroRecord); producer.send(data); } } } catch (final Exception e) { LOGGER.info( "Unable to send file [" + file.getPath() + "] to Kafka topic: " + e.getMessage(), e); } }
Example 3
Source File: KafkaConnPoolUtilsTest.java From yuzhouwan with Apache License 2.0 | 6 votes |
@Ignore @Test public void getConnTest() throws Exception { PropUtils p = PropUtils.getInstance(); int kafkaConnPoolSize = Integer.parseInt(p.getProperty("kafka.conn.pool.size")); Producer<String, byte[]> conn = KafkaConnPoolUtils.getInstance().getConn(); String topic = p.getProperty("kafka.topic"); for (int i = 0, max = 1000000000; i < max; i++) { System.out.println(String.format("Sending %s/%s ...", i, max)); Thread.sleep(1000); conn.send(new KeyedMessage<>(topic, ("{\"appId\":1,\"attemptId\":\"2\",\"callId\":\"" + i + "\",\"description\":\"yuzhouwan\"}") .getBytes())); } for (int i = 1; i < 2 * kafkaConnPoolSize; i++) KafkaConnPoolUtils.getInstance().getConn(); }
Example 4
Source File: NativeProducer.java From spring-kafka-demo with Apache License 2.0 | 6 votes |
public static void main(String[] args) { String topic= "test"; long events = 100; Random rand = new Random(); Properties props = new Properties(); props.put("metadata.broker.list", "localhost:9092"); props.put("serializer.class", "kafka.serializer.StringEncoder"); props.put("request.required.acks", "1"); ProducerConfig config = new ProducerConfig(props); Producer<String, String> producer = new Producer<String, String>(config); for (long nEvents = 0; nEvents < events; nEvents++) { String msg = "NativeMessage-" + rand.nextInt() ; KeyedMessage<String, String> data = new KeyedMessage<String, String>(topic, nEvents + "", msg); producer.send(data); } producer.close(); }
Example 5
Source File: KafkaEventPublisherClient.java From product-cep with Apache License 2.0 | 6 votes |
public static void publish(String url, String topic, String testCaseFolderName, String dataFileName) { log.info("Starting Kafka EventPublisher Client"); Properties props = new Properties(); props.put("metadata.broker.list", url); props.put("producer.type", "sync"); props.put("serializer.class", "kafka.serializer.StringEncoder"); ProducerConfig config = new ProducerConfig(props); Producer<String, Object> producer = new Producer<String, Object>(config); try { List<String> messagesList = readMsg(getTestDataFileLocation(testCaseFolderName, dataFileName)); for (String message : messagesList) { log.info(String.format("Sending message: %s", message)); KeyedMessage<String, Object> data = new KeyedMessage<String, Object>(topic, message); producer.send(data); Thread.sleep(100); } Thread.sleep(1000); } catch (Throwable t) { log.error("Error when sending the messages", t); } finally { producer.close(); } }
Example 6
Source File: KafkaUtilsTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
private void createTopicAndSendMessage(String key, String value) { Properties p = new Properties(); p.setProperty("metadata.broker.list", broker.getBrokerConnectionString()); p.setProperty("serializer.class", "kafka.serializer.StringEncoder"); ProducerConfig producerConfig = new ProducerConfig(p); Producer<String, String> producer = new Producer<String, String>(producerConfig); producer.send(new KeyedMessage<String, String>(config.topic, key, value)); }
Example 7
Source File: StringMessageHandler.java From mod-kafka with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} */ @Override public void send(Producer producer, String topic, String partition, JsonObject message) { producer.send(new KeyedMessage<String, String>( topic, partition, message.getString(PAYLOAD))); }
Example 8
Source File: AlertsSearcher.java From opensoc-streaming with Apache License 2.0 | 5 votes |
private void doSenderWork( SearchHit hit ) { String kafkaBrokerHostName = configProps.getProperty("kafkaBrokerHostName", "localhost" ); String kafkaBrokerHostPort = configProps.getProperty("kafkaBrokerHostPort", "9092" ); String kafkaTopicName = configProps.getProperty("kafkaTopicName", "test" ); logger.debug( "kafkaBrokerHostName: " + kafkaBrokerHostName ); logger.debug( "kafkaBrokerHostPort: " + kafkaBrokerHostPort ); logger.debug( "kafkaTopicName: " + kafkaTopicName ); String sourceData = hit.getSourceAsString(); logger.debug( "Source Data: " + sourceData ); Properties props = new Properties(); props.put("metadata.broker.list", kafkaBrokerHostName + ":" + kafkaBrokerHostPort ); props.put("serializer.class", "kafka.serializer.StringEncoder"); // props.put("partitioner.class", "example.producer.SimplePartitioner"); props.put("request.required.acks", "1"); ProducerConfig config = new ProducerConfig(props); Producer<String, String> producer = new Producer<String, String>(config); KeyedMessage<String, String> data = new KeyedMessage<String, String>(kafkaTopicName, "", sourceData ); producer.send(data); }
Example 9
Source File: KafkaAvroWriter.java From hiped2 with Apache License 2.0 | 5 votes |
/** * The MapReduce driver - setup and launch the job. * * @param args the command-line arguments * @return the process exit code * @throws Exception if something goes wrong */ public int run(final String[] args) throws Exception { Cli cli = Cli.builder().setArgs(args).addOptions(Options.values()).build(); int result = cli.runCmd(); if (result != 0) { return result; } File inputFile = new File(cli.getArgValueAsString(Options.STOCKSFILE)); String brokerList = cli.getArgValueAsString(Options.BROKER_LIST); String kTopic = cli.getArgValueAsString(Options.TOPIC); Properties props = new Properties(); props.put("metadata.broker.list", brokerList); props.put("serializer.class", kafka.serializer.DefaultEncoder.class.getName()); ProducerConfig config = new ProducerConfig(props); Producer<Integer, byte[]> producer = new Producer<Integer, byte[]>(config); for (Stock stock : AvroStockUtils.fromCsvFile(inputFile)) { KeyedMessage<Integer, byte[]> msg = new KeyedMessage<Integer, byte[]>(kTopic, toBytes(stock)); System.out.println("Sending " + msg + " to kafka @ topic " + kTopic); producer.send(msg); } producer.close(); System.out.println("done!"); return 0; }
Example 10
Source File: TopicReporter.java From metrics-kafka with Apache License 2.0 | 5 votes |
private void send(Producer producer,String header, String topic, String message) { final Long time = TimeUnit.MILLISECONDS.toSeconds(clock.time() - startTime); try { producer.send(new KeyedMessage(topic, format("%s\n%d,%s", header, time, message).getBytes("UTF-8"))); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } }
Example 11
Source File: Kafka.java From product-cep with Apache License 2.0 | 5 votes |
public static void main(String args[]) { log.info("Command line arguments passed: " + Arrays.deepToString(args)); log.info("Starting Kafka Client"); String url = args[0]; String topic = args[1]; String filePath = args[2]; String sampleNumber = args[3]; Properties props = new Properties(); props.put("metadata.broker.list", url); props.put("serializer.class", "kafka.serializer.StringEncoder"); ProducerConfig config = new ProducerConfig(props); Producer<String, Object> producer = new Producer<String, Object>(config); try { filePath = KafkaUtil.getEventFilePath(sampleNumber, topic, filePath); readMsg(filePath); for (String message : messagesList) { System.out.println("Sending message:"); System.out.println(message); KeyedMessage<String, Object> data = new KeyedMessage<String, Object>(topic, message); producer.send(data); } Thread.sleep(500); } catch (Throwable t) { log.error("Error when sending the messages", t); } finally { producer.close(); } }
Example 12
Source File: ScribeConsumerClusterTestHelper.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
public void createKafkaData(int startNum) { //Write numOfMessages to Kafka Properties producerProps = new Properties(); producerProps.put("metadata.broker.list", "localhost:9092"); producerProps.put("serializer.class", "kafka.serializer.StringEncoder"); producerProps.put("request.required.acks", "1"); Producer<String, String> producer = new Producer<String, String>(new ProducerConfig(producerProps)); for(int i =startNum ; i < startNum+numOfMessages; i++) { KeyedMessage<String, String> data = new KeyedMessage<String, String>(TOPIC,"Neverwinter"+Integer.toString(i)); producer.send(data); } producer.close(); }
Example 13
Source File: ProducerExample.java From pulsar with Apache License 2.0 | 5 votes |
private static void publishMessage(Arguments arguments) { // (2) Create producer Properties properties2 = new Properties(); properties2.put(BROKER_URL, arguments.serviceUrl); properties2.put(PRODUCER_TYPE, "sync"); properties2.put(SERIALIZER_CLASS, TestEncoder.class.getName()); properties2.put(KEY_SERIALIZER_CLASS, StringEncoder.class.getName()); properties2.put(PARTITIONER_CLASS, TestPartitioner.class.getName()); properties2.put(COMPRESSION_CODEC, "gzip"); // compression: ZLIB properties2.put(QUEUE_ENQUEUE_TIMEOUT_MS, "-1"); // block queue if full => -1 = true properties2.put(QUEUE_BUFFERING_MAX_MESSAGES, "6000"); // queue max message properties2.put(QUEUE_BUFFERING_MAX_MS, "100"); // batch delay properties2.put(BATCH_NUM_MESSAGES, "500"); // batch msg properties2.put(CLIENT_ID, "test"); ProducerConfig config = new ProducerConfig(properties2); Producer<String, Tweet> producer = new Producer<>(config); String name = "user"; String msg = arguments.messageValue; for (int i = 0; i < arguments.totalMessages; i++) { String sendMessage = msg + i; Tweet tweet = new Tweet(name, sendMessage); KeyedMessage<String, Tweet> message = new KeyedMessage<>(arguments.topicName, name, tweet); producer.send(message); } producer.close(); log.info("Successfully published messages {}", arguments.totalMessages); }
Example 14
Source File: SendMessageKafka.java From storm-kafka-examples with Apache License 2.0 | 5 votes |
public static void main(String[] args) { Properties props = new Properties(); props.put("zookeeper.connect", "wxb-1:2181,wxb-1:2181,wxb-12181"); props.put("serializer.class", "kafka.serializer.StringEncoder"); props.put("producer.type", "async"); props.put("compression.codec", "1"); props.put( "metadata.broker.list", "wxb-1:6667,wxb-2:6667,wxb-3:6667"); ProducerConfig config = new ProducerConfig(props); Producer<String, String> producer = new Producer<String, String>(config); DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); Random r = new Random(); for (int i = 0; i < 1000; i++) { int id = r.nextInt(10000000); int memberid = r.nextInt(100000); int totalprice = r.nextInt(1000) + 100; int preferential = r.nextInt(100); int sendpay = r.nextInt(3); StringBuffer data = new StringBuffer(); data.append(String.valueOf(id)).append("\t") .append(String.valueOf(memberid)).append("\t") .append(String.valueOf(totalprice)).append("\t") .append(String.valueOf(preferential)).append("\t") .append(String.valueOf(sendpay)).append("\t") .append(df.format(new Date())); System.out.println(data.toString()); producer.send(new KeyedMessage<String, String>("order", data .toString())); } producer.close(); System.out.println("send over ------------------"); }
Example 15
Source File: ProducerDemo.java From KafkaExample with Apache License 2.0 | 5 votes |
public static void sendOne(Producer<String, String> producer, String topic) throws InterruptedException { boolean sleepFlag = false; KeyedMessage<String, String> message1 = new KeyedMessage<String, String>(topic, "0", "test 0"); producer.send(message1); if(sleepFlag) Thread.sleep(5000); KeyedMessage<String, String> message2 = new KeyedMessage<String, String>(topic, "1", "test 1"); producer.send(message2); if(sleepFlag) Thread.sleep(5000); KeyedMessage<String, String> message3 = new KeyedMessage<String, String>(topic, "2", "test 2"); producer.send(message3); if(sleepFlag) Thread.sleep(5000); KeyedMessage<String, String> message4 = new KeyedMessage<String, String>(topic, "3", "test 3"); producer.send(message4); if(sleepFlag) Thread.sleep(5000); KeyedMessage<String, String> message5 = new KeyedMessage<String, String>(topic, "4", "test 4"); producer.send(message5); if(sleepFlag) Thread.sleep(5000); KeyedMessage<String, String> message6 = new KeyedMessage<String, String>(topic, "5", "test 5"); producer.send(message6); if(sleepFlag) Thread.sleep(5000); KeyedMessage<String, String> message7 = new KeyedMessage<String, String>(topic, "6", "test 6"); producer.send(message7); if(sleepFlag) Thread.sleep(5000); KeyedMessage<String, String> message8 = new KeyedMessage<String, String>(topic, "7", "test 7"); producer.send(message8); if(sleepFlag) Thread.sleep(5000); KeyedMessage<String, String> message9 = new KeyedMessage<String, String>(topic, "8", "test 8"); producer.send(message9); if(sleepFlag) Thread.sleep(5000); producer.close(); }
Example 16
Source File: ProcessStreamingData.java From spark-streaming-direct-kafka with Apache License 2.0 | 5 votes |
public void publishMessagesToKafka(Producer producer, byte[] message) { try { List<KeyedMessage<String, byte[]>> keyedMessageList = Lists.newArrayListWithCapacity(1); String topic = config.getDefaultProducerKafkaTopicName(); keyedMessageList.add(new KeyedMessage<>(topic, message)); producer.send(keyedMessageList); } catch (Exception e) { logger.error("Error occurred while publishing to error kafka queue {}", e); } }
Example 17
Source File: Ingress.java From warp10-platform with Apache License 2.0 | 4 votes |
private void sendDataMessage(KafkaDataMessage msg) throws IOException { AtomicLong dms = this.dataMessagesSize.get(); List<KeyedMessage<byte[], byte[]>> msglist = this.dataMessages.get(); if (null != msg) { // // Build key // byte[] bytes = new byte[16]; GTSHelper.fillGTSIds(bytes, 0, msg.getClassId(), msg.getLabelsId()); //ByteBuffer bb = ByteBuffer.wrap(new byte[16]).order(ByteOrder.BIG_ENDIAN); //bb.putLong(encoder.getClassId()); //bb.putLong(encoder.getLabelsId()); TSerializer serializer = new TSerializer(new TCompactProtocol.Factory()); byte[] msgbytes = null; try { msgbytes = serializer.serialize(msg); } catch (TException te) { throw new IOException(te); } // // Encrypt value if the AES key is defined // if (null != this.aesDataKey) { msgbytes = CryptoUtils.wrap(this.aesDataKey, msgbytes); } // // Compute MAC if the SipHash key is defined // if (null != this.siphashDataKey) { msgbytes = CryptoUtils.addMAC(this.siphashDataKey, msgbytes); } //KeyedMessage<byte[], byte[]> message = new KeyedMessage<byte[], byte[]>(this.dataTopic, bb.array(), msgbytes); KeyedMessage<byte[], byte[]> message = new KeyedMessage<byte[], byte[]>(this.dataTopic, bytes, msgbytes); msglist.add(message); //this.dataMessagesSize.get().addAndGet(bb.array().length + msgbytes.length); dms.addAndGet(bytes.length + msgbytes.length); Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_INGRESS_KAFKA_DATA_MESSAGES, Sensision.EMPTY_LABELS, 1); } if (msglist.size() > 0 && (null == msg || dms.get() > DATA_MESSAGES_THRESHOLD)) { Producer<byte[],byte[]> producer = getDataProducer(); //this.dataProducer.send(msglist); try { // // How long it takes to send messages to Kafka // long nano = System.nanoTime(); producer.send(msglist); nano = System.nanoTime() - nano; Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_INGRESS_KAFKA_DATA_PRODUCER_SEND, Sensision.EMPTY_LABELS, nano); } catch (Throwable t) { throw t; } finally { recycleDataProducer(producer); } Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_INGRESS_KAFKA_DATA_SEND, Sensision.EMPTY_LABELS, 1); msglist.clear(); dms.set(0L); } }
Example 18
Source File: DataFlowFromCsvMain.java From Decision with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws IOException, NumberFormatException, InterruptedException { if (args.length < 4) { log.info("Usage: \n param 1 - path to file \n param 2 - stream name to send the data \n param 3 - time in ms to wait to send each data \n param 4 - broker list"); } else { Producer<String, String> producer = new Producer<String, String>(createProducerConfig(args[3])); Gson gson = new Gson(); Reader in = new FileReader(args[0]); CSVParser parser = CSVFormat.DEFAULT.parse(in); List<String> columnNames = new ArrayList<>(); for (CSVRecord csvRecord : parser.getRecords()) { if (columnNames.size() == 0) { Iterator<String> iterator = csvRecord.iterator(); while (iterator.hasNext()) { columnNames.add(iterator.next()); } } else { StratioStreamingMessage message = new StratioStreamingMessage(); message.setOperation(STREAM_OPERATIONS.MANIPULATION.INSERT.toLowerCase()); message.setStreamName(args[1]); message.setTimestamp(System.currentTimeMillis()); message.setSession_id(String.valueOf(System.currentTimeMillis())); message.setRequest_id(String.valueOf(System.currentTimeMillis())); message.setRequest("dummy request"); List<ColumnNameTypeValue> sensorData = new ArrayList<>(); for (int i = 0; i < columnNames.size(); i++) { // Workaround Object value = null; try { value = Double.valueOf(csvRecord.get(i)); } catch (NumberFormatException e) { value = csvRecord.get(i); } sensorData.add(new ColumnNameTypeValue(columnNames.get(i), null, value)); } message.setColumns(sensorData); String json = gson.toJson(message); log.info("Sending data: {}", json); producer.send(new KeyedMessage<String, String>(InternalTopic.TOPIC_DATA.getTopicName(), STREAM_OPERATIONS.MANIPULATION.INSERT, json)); log.info("Sleeping {} ms...", args[2]); Thread.sleep(Long.valueOf(args[2])); } } log.info("Program completed."); } }
Example 19
Source File: Ingress.java From warp10-platform with Apache License 2.0 | 4 votes |
/** * Push a metadata message onto the buffered list of Kafka messages * and flush the list to Kafka if it has reached a threshold. * * @param key Key of the message to queue * @param value Value of the message to queue */ private void pushMetadataMessage(byte[] key, byte[] value) throws IOException { AtomicLong mms = this.metadataMessagesSize.get(); List<KeyedMessage<byte[], byte[]>> msglist = this.metadataMessages.get(); if (null != key && null != value) { // // Add key as a prefix of value // byte[] kv = Arrays.copyOf(key, key.length + value.length); System.arraycopy(value, 0, kv, key.length, value.length); value = kv; // // Encrypt value if the AES key is defined // if (null != this.AES_KAFKA_META) { value = CryptoUtils.wrap(this.AES_KAFKA_META, value); } // // Compute MAC if the SipHash key is defined // if (null != this.SIPHASH_KAFKA_META) { value = CryptoUtils.addMAC(this.SIPHASH_KAFKA_META, value); } KeyedMessage<byte[], byte[]> message = new KeyedMessage<byte[], byte[]>(this.metaTopic, Arrays.copyOf(key, key.length), value); msglist.add(message); mms.addAndGet(key.length + value.length); Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_INGRESS_KAFKA_META_MESSAGES, Sensision.EMPTY_LABELS, 1); } if (msglist.size() > 0 && (null == key || null == value || mms.get() > METADATA_MESSAGES_THRESHOLD)) { Producer<byte[],byte[]> producer = this.metaProducerPool.getProducer(); try { // // How long it takes to send messages to Kafka // long nano = System.nanoTime(); producer.send(msglist); nano = System.nanoTime() - nano; Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_INGRESS_KAFKA_METADATA_PRODUCER_SEND, Sensision.EMPTY_LABELS, nano); } catch (Throwable t) { // // We need to remove the IDs of Metadata in 'msglist' from the cache so they get a chance to be // pushed later // for (KeyedMessage<byte[],byte[]> msg: msglist) { synchronized(this.metadataCache) { this.metadataCache.remove(new BigInteger(msg.key())); } } throw t; } finally { this.metaProducerPool.recycleProducer(producer); } Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_INGRESS_KAFKA_META_SEND, Sensision.EMPTY_LABELS, 1); msglist.clear(); mms.set(0L); // Update sensision metric with size of metadata cache Sensision.set(SensisionConstants.SENSISION_CLASS_CONTINUUM_INGRESS_METADATA_CACHED, Sensision.EMPTY_LABELS, this.metadataCache.size()); } }
Example 20
Source File: TwitterProducer.java From lsiem with Apache License 2.0 | 4 votes |
private void start(Context context) { /** Producer properties **/ Properties props = new Properties(); props.put("metadata.broker.list", context.getString(TwitterSourceConstant.BROKER_LIST)); props.put("serializer.class", context.getString(TwitterSourceConstant.SERIALIZER)); props.put("request.required.acks", context.getString(TwitterSourceConstant.REQUIRED_ACKS)); ProducerConfig config = new ProducerConfig(props); final Producer<String, String> producer = new Producer<String, String>(config); /** Twitter properties **/ consumerKey = context.getString(TwitterSourceConstant.CONSUMER_KEY_KEY); consumerSecret = context.getString(TwitterSourceConstant.CONSUMER_SECRET_KEY); accessToken = context.getString(TwitterSourceConstant.ACCESS_TOKEN_KEY); accessTokenSecret = context.getString(TwitterSourceConstant.ACCESS_TOKEN_SECRET_KEY); ConfigurationBuilder cb = new ConfigurationBuilder(); cb.setOAuthConsumerKey(consumerKey); cb.setOAuthConsumerSecret(consumerSecret); cb.setOAuthAccessToken(accessToken); cb.setOAuthAccessTokenSecret(accessTokenSecret); cb.setJSONStoreEnabled(true); cb.setIncludeEntitiesEnabled(true); twitterStream = new TwitterStreamFactory(cb.build()).getInstance(); final Map<String, String> headers = new HashMap<String, String>(); /** Twitter listener **/ StatusListener listener = new StatusListener() { // The onStatus method is executed every time a new tweet comes // in. public void onStatus(Status status) { // The EventBuilder is used to build an event using the // the raw JSON of a tweet logger.info(status.getUser().getScreenName() + ": " + status.getText()); //delete uncomment sign KeyedMessage<String, String> data = new KeyedMessage<String, String>(context.getString(TwitterSourceConstant.KAFKA_TOPIC) , DataObjectFactory.getRawJSON(status)); producer.send(data); } public void onDeletionNotice(StatusDeletionNotice statusDeletionNotice) {} public void onTrackLimitationNotice(int numberOfLimitedStatuses) {} public void onScrubGeo(long userId, long upToStatusId) {} public void onException(Exception ex) { logger.info("Shutting down Twitter sample stream..."); //twitterStream.shutdown(); } public void onStallWarning(StallWarning warning) {} }; /** Bind the listener **/ twitterStream.addListener(listener); /** GOGOGO **/ twitterStream.sample(); }