Java Code Examples for org.apache.storm.StormSubmitter#submitTopologyWithProgressBar()
The following examples show how to use
org.apache.storm.StormSubmitter#submitTopologyWithProgressBar() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StatefulWindowingTopology.java From storm-net-adapter with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomIntegerSpout()); builder.setBolt("sumbolt", new WindowSumBolt().withWindow(new Count(5), new Count(3)) .withMessageIdField("msgid"), 1).shuffleGrouping("spout"); builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("sumbolt"); Config conf = new Config(); conf.setDebug(false); //conf.put(Config.TOPOLOGY_STATE_PROVIDER, "org.apache.storm.redis.state.RedisKeyValueStateProvider"); String topoName = "test"; if (args != null && args.length > 0) { topoName = args[0]; } conf.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); }
Example 2
Source File: MultipleLoggerTopology.java From storm-net-adapter with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("word", new TestWordSpout(), 10); builder.setBolt("exclaim1", new ExclamationLoggingBolt(), 3).shuffleGrouping("word"); builder.setBolt("exclaim2", new ExclamationLoggingBolt(), 2).shuffleGrouping("exclaim1"); Config conf = new Config(); conf.setDebug(true); String topoName = MultipleLoggerTopology.class.getName(); if (args != null && args.length > 0) { topoName = args[0]; } conf.setNumWorkers(2); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); }
Example 3
Source File: TridentWordCount.java From storm-net-adapter with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { Config conf = new Config(); conf.setMaxSpoutPending(20); String topoName = "wordCounter"; if (args.length > 0) { topoName = args[0]; } conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, buildTopology()); try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { for (int i = 0; i < 10; i++) { System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped")); Thread.sleep(1000); } } }
Example 4
Source File: TridentMapExample.java From storm-net-adapter with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { Config conf = new Config(); conf.setMaxSpoutPending(20); String topoName = "wordCounter"; if (args.length > 0) { topoName = args[0]; } conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, buildTopology()); try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { for (int i = 0; i < 10; i++) { System.out.println("DRPC RESULT: " + drpc.execute("words", "CAT THE DOG JUMPED")); Thread.sleep(1000); } } }
Example 5
Source File: SlidingWindowTopology.java From storm-net-adapter with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("integer", new RandomIntegerSpout(), 1); builder.setBolt("slidingsum", new SlidingWindowSumBolt().withWindow(Count.of(30), Count.of(10)), 1) .shuffleGrouping("integer"); builder.setBolt("tumblingavg", new TumblingWindowAvgBolt().withTumblingWindow(Count.of(3)), 1) .shuffleGrouping("slidingsum"); builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("tumblingavg"); Config conf = new Config(); conf.setDebug(true); String topoName = "test"; if (args != null && args.length > 0) { topoName = args[0]; } conf.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); }
Example 6
Source File: WordCountTopologyNode.java From storm-net-adapter with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentence(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.setDebug(true); String topoName = "word-count"; if (args != null && args.length > 0) { topoName = args[0]; } conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); }
Example 7
Source File: TypedTupleExample.java From storm-net-adapter with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamBuilder builder = new StreamBuilder(); /** * The spout emits sequences of (Integer, Long, Long). TupleValueMapper can be used to extract fields * from the values and produce a stream of typed tuple (Tuple3<Integer, Long, Long> in this case. */ Stream<Tuple3<Integer, Long, Long>> stream = builder.newStream(new RandomIntegerSpout(), TupleValueMappers.of(0, 1, 2)); PairStream<Long, Integer> pairs = stream.mapToPair(t -> Pair.of(t._2 / 10000, t._1)); pairs.window(TumblingWindows.of(Count.of(10))).groupByKey().print(); String topoName = "test"; if (args.length > 0) { topoName = args[0]; } Config config = new Config(); config.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); }
Example 8
Source File: InOrderDeliveryTest.java From storm-net-adapter with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new InOrderSpout(), 8); builder.setBolt("count", new Check(), 8).fieldsGrouping("spout", new Fields("c1")); Config conf = new Config(); conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class); String name = "in-order-test"; if (args != null && args.length > 0) { name = args[0]; } conf.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology()); Map<String, Object> clusterConf = Utils.readStormConfig(); clusterConf.putAll(Utils.readCommandLineOpts()); Nimbus.Iface client = NimbusClient.getConfiguredClient(clusterConf).getClient(); //Sleep for 50 mins for (int i = 0; i < 50; i++) { Thread.sleep(30 * 1000); printMetrics(client, name); } kill(client, name); }
Example 9
Source File: BranchExample.java From storm-net-adapter with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public static void main(String[] args) throws Exception { StreamBuilder builder = new StreamBuilder(); Stream<Integer>[] evenAndOdd = builder /* * Create a stream of random numbers from a spout that * emits random integers by extracting the tuple value at index 0. */ .newStream(new RandomIntegerSpout(), new ValueMapper<Integer>(0)) /* * Split the stream of numbers into streams of * even and odd numbers. The first stream contains even * and the second contains odd numbers. */ .branch(x -> (x % 2) == 0, x -> (x % 2) == 1); evenAndOdd[0].forEach(x -> LOG.info("EVEN> " + x)); evenAndOdd[1].forEach(x -> LOG.info("ODD > " + x)); Config config = new Config(); String topoName = "branchExample"; if (args.length > 0) { topoName = args[0]; } config.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); }
Example 10
Source File: PersistentWindowingTopology.java From storm-net-adapter with Apache License 2.0 | 5 votes |
/** * Create and deploy the topology. * * @param args args * @throws Exception exception */ public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); // generate random numbers builder.setSpout("spout", new RandomIntegerSpout()); // emits sliding window and global averages builder.setBolt("avgbolt", new AvgBolt() .withWindow(new Duration(10, TimeUnit.SECONDS), new Duration(2, TimeUnit.SECONDS)) // persist the window in state .withPersistence() // max number of events to be cached in memory .withMaxEventsInMemory(25000), 1) .shuffleGrouping("spout"); // print the values to stdout builder.setBolt("printer", (x, y) -> System.out.println(x.getValue(0)), 1).shuffleGrouping("avgbolt"); Config conf = new Config(); conf.setDebug(false); // checkpoint the state every 5 seconds conf.put(Config.TOPOLOGY_STATE_CHECKPOINT_INTERVAL, 5000); // use redis for state persistence conf.put(Config.TOPOLOGY_STATE_PROVIDER, "org.apache.storm.redis.state.RedisKeyValueStateProvider"); String topoName = "test"; if (args != null && args.length > 0) { topoName = args[0]; } conf.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); }
Example 11
Source File: StatefulTopology.java From storm-net-adapter with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomIntegerSpout()); builder.setBolt("partialsum", new StatefulSumBolt("partial"), 1).shuffleGrouping("spout"); builder.setBolt("printer", new PrinterBolt(), 2).shuffleGrouping("partialsum"); builder.setBolt("total", new StatefulSumBolt("total"), 1).shuffleGrouping("printer"); Config conf = new Config(); conf.setDebug(false); String topoName = "test"; if (args != null && args.length > 0) { topoName = args[0]; } conf.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); }
Example 12
Source File: FastWordCountTopology.java From storm-net-adapter with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new FastRandomSentenceSpout(), 4); builder.setBolt("split", new SplitSentence(), 4).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 4).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class); String name = "wc-test"; if (args != null && args.length > 0) { name = args[0]; } conf.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology()); Map<String, Object> clusterConf = Utils.readStormConfig(); clusterConf.putAll(Utils.readCommandLineOpts()); Nimbus.Iface client = NimbusClient.getConfiguredClient(clusterConf).getClient(); //Sleep for 5 mins for (int i = 0; i < 10; i++) { Thread.sleep(30 * 1000); printMetrics(client, name); } kill(client, name); }
Example 13
Source File: JoinExample.java From storm-net-adapter with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamBuilder builder = new StreamBuilder(); // a stream of (number, square) pairs PairStream<Integer, Integer> squares = builder .newStream(new NumberSpout(x -> x * x), new PairValueMapper<>(0, 1)); // a stream of (number, cube) pairs PairStream<Integer, Integer> cubes = builder .newStream(new NumberSpout(x -> x * x * x), new PairValueMapper<>(0, 1)); // create a windowed stream of five seconds duration squares.window(TumblingWindows.of(Duration.seconds(5))) /* * Join the squares and the cubes stream within the window. * The values in the squares stream having the same key as that * of the cubes stream within the window will be joined together. */ .join(cubes) /** * The results should be of the form (number, (square, cube)) */ .print(); Config config = new Config(); String topoName = JoinExample.class.getName(); if (args.length > 0) { topoName = args[0]; } config.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); }
Example 14
Source File: GroupByKeyAndWindowExample.java From storm-net-adapter with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamBuilder builder = new StreamBuilder(); // a stream of stock quotes builder.newStream(new StockQuotes(), new PairValueMapper<String, Double>(0, 1)) /* * The elements having the same key within the window will be grouped * together and the corresponding values will be merged. * * The result is a PairStream<String, Iterable<Double>> with * 'stock symbol' as the key and 'stock prices' for that symbol within the window as the value. */ .groupByKeyAndWindow(SlidingWindows.of(Count.of(6), Count.of(3))) .print(); // a stream of stock quotes builder.newStream(new StockQuotes(), new PairValueMapper<String, Double>(0, 1)) /* * The elements having the same key within the window will be grouped * together and their values will be reduced using the given reduce function. * * Here the result is a PairStream<String, Double> with * 'stock symbol' as the key and the maximum price for that symbol within the window as the value. */ .reduceByKeyAndWindow((x, y) -> x > y ? x : y, SlidingWindows.of(Count.of(6), Count.of(3))) .print(); Config config = new Config(); String topoName = GroupByKeyAndWindowExample.class.getName(); if (args.length > 0) { topoName = args[0]; } config.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); }
Example 15
Source File: BlobStoreAPIWordCountTopology.java From storm-net-adapter with Apache License 2.0 | 5 votes |
public void buildAndLaunchWordCountTopology(String[] args) { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentenceSpout(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("filter", new FilterWords(), 6).shuffleGrouping("split"); Config conf = new Config(); conf.setDebug(true); try { conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } catch (InvalidTopologyException | AuthorizationException | AlreadyAliveException exp) { throw new RuntimeException(exp); } }
Example 16
Source File: RTJoinExampleTopology.java From streamline with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { // if (!NimbusClient.isLocalOverride()) { // throw new IllegalStateException("This example only works in local mode. " // + "Run with storm local not storm jar"); // } FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender")); FeederSpout ageSpout = new FeederSpout(new Fields("id", "age")); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("genderSpout", genderSpout); builder.setSpout("ageSpout", ageSpout); // inner join of 'age' and 'gender' records on 'id' field RealtimeJoinBolt joiner = new RealtimeJoinBolt(StreamKind.SOURCE) .select("genderSpout:id,ageSpout:id,gender,age") .from("genderSpout", 5, false) .outerJoin("ageSpout", Duration.ofSeconds(5), false, Cmp.equal("genderSpout:id", "ageSpout:id") ) .withOutputStream("jstream"); builder.setBolt("joiner", joiner) .fieldsGrouping("genderSpout", new Fields("id")) .fieldsGrouping("ageSpout", new Fields("id")) ; builder.setBolt("printer", new PrinterBolt() ).shuffleGrouping("joiner", "jstream"); Config conf = new Config(); StormSubmitter.submitTopologyWithProgressBar("join-example", conf, builder.createTopology()); generateGenderData(genderSpout); generateAgeData(ageSpout); }
Example 17
Source File: WindowedWordCount.java From storm-net-adapter with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamBuilder builder = new StreamBuilder(); // A stream of random sentences builder.newStream(new RandomSentenceSpout(), new ValueMapper<String>(0), 2) /* * a two seconds tumbling window */ .window(TumblingWindows.of(Duration.seconds(2))) /* * split the sentences to words */ .flatMap(s -> Arrays.asList(s.split(" "))) /* * create a stream of (word, 1) pairs */ .mapToPair(w -> Pair.of(w, 1)) /* * compute the word counts in the last two second window */ .countByKey() /* * emit the count for the words that occurred * at-least five times in the last two seconds */ .filter(x -> x.getSecond() >= 5) /* * print the results to stdout */ .print(); Config config = new Config(); String topoName = "test"; if (args.length > 0) { topoName = args[0]; } config.setNumWorkers(1); StormSubmitter.submitTopologyWithProgressBar(topoName, config, builder.build()); }
Example 18
Source File: PirkTopology.java From incubator-retired-pirk with Apache License 2.0 | 5 votes |
public static void runPirkTopology() throws PIRException { // Set up Kafka parameters logger.info("Configuring Kafka."); String zkRoot = "/" + kafkaTopic + "_pirk_storm"; BrokerHosts zkHosts = new ZkHosts(brokerZk); SpoutConfig kafkaConfig = new SpoutConfig(zkHosts, kafkaTopic, zkRoot, kafkaClientId); kafkaConfig.ignoreZkOffsets = forceFromStart; // Create conf logger.info("Retrieving Query and generating Storm conf."); Config conf = createStormConf(); Query query = StormUtils.getQuery(useHdfs, hdfsUri, queryFile); conf.put(StormConstants.N_SQUARED_KEY, query.getNSquared().toString()); conf.put(StormConstants.QUERY_INFO_KEY, query.getQueryInfo().toMap()); // Configure this for different types of input data on Kafka. kafkaConfig.scheme = new SchemeAsMultiScheme(new PirkHashScheme(conf)); // Create topology StormTopology topology = getPirkTopology(kafkaConfig); // Run topology logger.info("Submitting Pirk topology to Storm..."); try { StormSubmitter.submitTopologyWithProgressBar(topologyName, conf, topology); } catch (AlreadyAliveException | InvalidTopologyException | AuthorizationException e) { throw new PIRException(e); } }
Example 19
Source File: ResourceAwareExampleTopology.java From storm-net-adapter with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); //A topology can set resources in terms of CPU and Memory for each component // These can be chained (like with setting the CPU requirement) SpoutDeclarer spout = builder.setSpout("word", new TestWordSpout(), 10).setCPULoad(20); // Or done separately like with setting the // onheap and offheap memory requirement spout.setMemoryLoad(64, 16); //On heap memory is used to help calculate the heap of the java process for the worker // off heap memory is for things like JNI memory allocated off heap, or when using the // ShellBolt or ShellSpout. In this case the 16 MB of off heap is just as an example // as we are not using it. // Some times a Bolt or Spout will have some memory that is shared between the instances // These are typically caches, but could be anything like a static database that is memory // mapped into the processes. These can be declared separately and added to the bolts and // spouts that use them. Or if only one uses it they can be created inline with the add SharedOnHeap exclaimCache = new SharedOnHeap(100, "exclaim-cache"); SharedOffHeapWithinNode notImplementedButJustAnExample = new SharedOffHeapWithinNode(500, "not-implemented-node-level-cache"); //If CPU or memory is not set the values stored in topology.component.resources.onheap.memory.mb, // topology.component.resources.offheap.memory.mb and topology.component.cpu.pcore.percent // will be used instead builder .setBolt("exclaim1", new ExclamationBolt(), 3) .shuffleGrouping("word") .addSharedMemory(exclaimCache); builder .setBolt("exclaim2", new ExclamationBolt(), 2) .shuffleGrouping("exclaim1") .setMemoryLoad(100) .addSharedMemory(exclaimCache) .addSharedMemory(notImplementedButJustAnExample); Config conf = new Config(); conf.setDebug(true); //Under RAS the number of workers is determined by the scheduler and the settings in the conf are ignored //conf.setNumWorkers(3); //Instead the scheduler lets you set the maximum heap size for any worker. conf.setTopologyWorkerMaxHeapSize(1024.0); //The scheduler generally will try to pack executors into workers until the max heap size is met, but // this can vary depending on the specific scheduling strategy selected. // The reason for this is to try and balance the maximum pause time GC might take (which is larger for larger heaps) // against better performance because of not needing to serialize/deserialize tuples. //The priority of a topology describes the importance of the topology in decreasing importance // starting from 0 (i.e. 0 is the highest priority and the priority importance decreases as the priority number increases). //Recommended range of 0-29 but no hard limit set. // If there are not enough resources in a cluster the priority in combination with how far over a guarantees // a user is will decide which topologies are run and which ones are not. conf.setTopologyPriority(29); //set to use the default resource aware strategy when using the MultitenantResourceAwareBridgeScheduler conf.setTopologyStrategy( "org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy"); String topoName = "test"; if (args != null && args.length > 0) { topoName = args[0]; } StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology()); }
Example 20
Source File: WordCountTopologyCsharp.java From storm-net-adapter with Apache License 2.0 | 3 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("generator", new Generator(), 1); builder.setBolt("splitter", new Splitter(), 5).fieldsGrouping("generator", new Fields("word")); builder.setBolt("counter", new Counter(), 8).fieldsGrouping("splitter", new Fields("word", "count")); Config conf = new Config(); //conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("WordCount", conf, builder.createTopology()); Thread.sleep(10000); cluster.shutdown(); } }