Java Code Examples for backtype.storm.topology.BoltDeclarer#shuffleGrouping()
The following examples show how to use
backtype.storm.topology.BoltDeclarer#shuffleGrouping() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HdfsAuthLogMonitoringMain.java From eagle with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception{ System.setProperty("config.resource", "/application.conf"); Config config = ConfigFactory.load(); KafkaSpoutProvider provider = new KafkaSpoutProvider(); IRichSpout spout = provider.getSpout(config); SecurityLogParserBolt bolt = new SecurityLogParserBolt(); TopologyBuilder builder = new TopologyBuilder(); int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM); int numOfParserTasks = config.getInt(PARSER_TASK_NUM); int numOfSinkTasks = config.getInt(SINK_TASK_NUM); builder.setSpout("ingest", spout, numOfSpoutTasks); BoltDeclarer boltDeclarer = builder.setBolt("parserBolt", bolt, numOfParserTasks); boltDeclarer.shuffleGrouping("ingest"); KafkaBolt kafkaBolt = new KafkaBolt(); BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", kafkaBolt, numOfSinkTasks); kafkaBoltDeclarer.shuffleGrouping("parserBolt"); StormTopology topology = builder.createTopology(); TopologySubmitter.submit(topology, config); }
Example 2
Source File: PerformanceTestTopology.java From jstorm with Apache License 2.0 | 6 votes |
public static void SetRemoteTopology() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException { String streamName = (String) conf.get(Config.TOPOLOGY_NAME); if (streamName == null) { String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); streamName = className[className.length - 1]; } TopologyBuilder builder = new TopologyBuilder(); int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int bolt_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2); builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint); BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint); // localFirstGrouping is only for jstorm // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME); boltDeclarer.shuffleGrouping("spout"); // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60); conf.put(Config.STORM_CLUSTER_MODE, "distributed"); StormSubmitter.submitTopology(streamName, conf, builder.createTopology()); }
Example 3
Source File: PerformanceTestTopology.java From jstorm with Apache License 2.0 | 6 votes |
public static void SetRemoteTopology() throws Exception { String streamName = (String) conf.get(Config.TOPOLOGY_NAME); if (streamName == null) { String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); streamName = className[className.length - 1]; } TopologyBuilder builder = new TopologyBuilder(); int spout_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int bolt_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2); builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint); BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint); // localFirstGrouping is only for jstorm // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME); boltDeclarer.shuffleGrouping("spout"); // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60); StormSubmitter.submitTopology(streamName, conf, builder.createTopology()); }
Example 4
Source File: PerformanceTestTopology.java From jstorm with Apache License 2.0 | 6 votes |
public static void SetRemoteTopology() throws Exception { String streamName = (String) conf.get(Config.TOPOLOGY_NAME); if (streamName == null) { String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); streamName = className[className.length - 1]; } TopologyBuilder builder = new TopologyBuilder(); int spout_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int bolt_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2); builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint); BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint); // localFirstGrouping is only for jstorm // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME); boltDeclarer.shuffleGrouping("spout"); // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60); StormSubmitter.submitTopology(streamName, conf, builder.createTopology()); }
Example 5
Source File: JStormApplication.java From PoseidonX with Apache License 2.0 | 5 votes |
private void setDefaultBoltGrouping(BoltDeclarer bolt, String strname) throws StreamingException { IRichOperator operator = getOperatorByOutputStreamName(strname); if (operator == null) { StreamingException exception = new StreamingException(ErrorCode.PLATFORM_INVALID_TOPOLOGY); LOG.error("Can't find opertor by stream name : {} .", strname, exception); throw exception; } bolt.shuffleGrouping(operator.getOperatorId(), strname); }
Example 6
Source File: BatchMetaTopology.java From jstorm with Apache License 2.0 | 5 votes |
public static TopologyBuilder SetBuilder() { BatchTopologyBuilder batchTopologyBuilder = new BatchTopologyBuilder( topologyName); MetaSpoutConfig metaSpoutConfig = getMetaSpoutConfig(conf); BoltDeclarer rebalanceDeclarer = batchTopologyBuilder.setBolt( BatchMetaRebalance.BOLT_NAME, new BatchMetaRebalance(), 1); IBatchSpout batchSpout = new BatchMetaSpout(metaSpoutConfig); int spoutParal = JStormUtils.parseInt( conf.get("topology.spout.parallel"), 1); BoltDeclarer spoutDeclarer = batchTopologyBuilder.setSpout( BatchMetaSpout.SPOUT_NAME, batchSpout, spoutParal); spoutDeclarer.allGrouping(BatchMetaRebalance.BOLT_NAME, BatchMetaRebalance.REBALANCE_STREAM_ID); int boltParallel = JStormUtils.parseInt( conf.get("topology.bolt.parallel"), 1); BoltDeclarer transformDeclarer = batchTopologyBuilder.setBolt( TransformBolt.BOLT_NAME, new TransformBolt(), boltParallel); transformDeclarer.shuffleGrouping(BatchMetaSpout.SPOUT_NAME); BoltDeclarer countDeclarer = batchTopologyBuilder.setBolt( CountBolt.COUNT_BOLT_NAME, new CountBolt(), boltParallel); countDeclarer.shuffleGrouping(TransformBolt.BOLT_NAME); BoltDeclarer sumDeclarer = batchTopologyBuilder.setBolt( CountBolt.SUM_BOLT_NAME, new CountBolt(), boltParallel); sumDeclarer.shuffleGrouping(TransformBolt.BOLT_NAME); BoltDeclarer dbDeclarer = batchTopologyBuilder.setBolt( DBBolt.BOLT_NAME, new DBBolt(), 1); dbDeclarer.shuffleGrouping(CountBolt.COUNT_BOLT_NAME).shuffleGrouping( CountBolt.SUM_BOLT_NAME); return batchTopologyBuilder.getTopologyBuilder(); }
Example 7
Source File: MRHistoryJobApplication.java From eagle with Apache License 2.0 | 4 votes |
@Override public StormTopology execute(Config config, StormEnvironment environment) { //1. trigger prepare conf MRHistoryJobConfig appConfig = MRHistoryJobConfig.newInstance(config); com.typesafe.config.Config jhfAppConf = appConfig.getConfig(); //2. prepare JobHistoryContentFilter final JobHistoryContentFilterBuilder builder = JobHistoryContentFilterBuilder.newBuilder().acceptJobFile().acceptJobConfFile(); String[] confKeyPatternsSplit = jhfAppConf.getString("MRConfigureKeys.jobConfigKey").split(","); List<String> confKeyPatterns = new ArrayList<>(confKeyPatternsSplit.length); for (String confKeyPattern : confKeyPatternsSplit) { confKeyPatterns.add(confKeyPattern.trim()); } confKeyPatterns.add(Constants.JobConfiguration.CASCADING_JOB); confKeyPatterns.add(Constants.JobConfiguration.HIVE_JOB); confKeyPatterns.add(Constants.JobConfiguration.PIG_JOB); confKeyPatterns.add(Constants.JobConfiguration.SCOOBI_JOB); String jobNameKey = jhfAppConf.getString("MRConfigureKeys.jobNameKey"); builder.setJobNameKey(jobNameKey); for (String key : confKeyPatterns) { builder.includeJobKeyPatterns(Pattern.compile(key)); } JobHistoryContentFilter filter = builder.build(); //3. prepare topology TopologyBuilder topologyBuilder = new TopologyBuilder(); String spoutName = "mrHistoryJobSpout"; int tasks = jhfAppConf.getInt("stormConfig.mrHistoryJobSpoutTasks"); JobHistorySpout jobHistorySpout = new JobHistorySpout(filter, appConfig); topologyBuilder.setSpout( spoutName, jobHistorySpout, tasks ).setNumTasks(tasks); StormStreamSink jobSinkBolt = environment.getStreamSink("MAP_REDUCE_JOB_STREAM", config); String jobSinkBoltName = "JobKafkaSink"; BoltDeclarer jobKafkaBoltDeclarer = topologyBuilder.setBolt(jobSinkBoltName, jobSinkBolt, jhfAppConf.getInt("stormConfig.jobKafkaSinkTasks")) .setNumTasks(jhfAppConf.getInt("stormConfig.jobKafkaSinkTasks")); String spoutToJobSinkName = spoutName + "_to_" + jobSinkBoltName; jobKafkaBoltDeclarer.shuffleGrouping(spoutName, spoutToJobSinkName); StormStreamSink taskAttemptSinkBolt = environment.getStreamSink("MAP_REDUCE_TASK_ATTEMPT_STREAM", config); String taskAttemptSinkBoltName = "TaskAttemptKafkaSink"; BoltDeclarer taskAttemptKafkaBoltDeclarer = topologyBuilder.setBolt(taskAttemptSinkBoltName, taskAttemptSinkBolt, jhfAppConf.getInt("stormConfig.taskAttemptKafkaSinkTasks")) .setNumTasks(jhfAppConf.getInt("stormConfig.taskAttemptKafkaSinkTasks")); String spoutToTaskAttemptSinkName = spoutName + "_to_" + taskAttemptSinkBoltName; taskAttemptKafkaBoltDeclarer.shuffleGrouping(spoutName, spoutToTaskAttemptSinkName); List<StreamPublisher> streamPublishers = new ArrayList<>(); //streamPublishers.add(new JobStreamPublisher(spoutToJobSinkName)); streamPublishers.add(new TaskAttemptStreamPublisher(spoutToTaskAttemptSinkName)); streamPublishers.add(new JobRpcAnalysisStreamPublisher(spoutToJobSinkName)); jobHistorySpout.setStreamPublishers(streamPublishers); return topologyBuilder.createTopology(); }
Example 8
Source File: HdfsAuditLogApplication.java From eagle with Apache License 2.0 | 4 votes |
@Override public StormTopology execute(Config config, StormEnvironment environment) { TopologyBuilder builder = new TopologyBuilder(); KafkaSpoutProvider provider = new KafkaSpoutProvider(); IRichSpout spout = provider.getSpout(config); int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM); int numOfParserTasks = config.getInt(PARSER_TASK_NUM); int numOfSensitivityJoinTasks = config.getInt(SENSITIVITY_JOIN_TASK_NUM); int numOfIPZoneJoinTasks = config.getInt(IPZONE_JOIN_TASK_NUM); int numOfSinkTasks = config.getInt(SINK_TASK_NUM); builder.setSpout("ingest", spout, numOfSpoutTasks).setNumTasks(numOfSpoutTasks); BaseRichBolt parserBolt = getParserBolt(config); BoltDeclarer boltDeclarer = builder.setBolt("parserBolt", parserBolt, numOfParserTasks).setNumTasks(numOfParserTasks).shuffleGrouping("ingest"); boltDeclarer.shuffleGrouping("ingest"); HdfsSensitivityDataEnrichBolt sensitivityDataJoinBolt = new HdfsSensitivityDataEnrichBolt(config); BoltDeclarer sensitivityDataJoinBoltDeclarer = builder.setBolt("sensitivityJoin", sensitivityDataJoinBolt, numOfSensitivityJoinTasks).setNumTasks(numOfSensitivityJoinTasks); // sensitivityDataJoinBoltDeclarer.fieldsGrouping("parserBolt", new Fields("f1")); sensitivityDataJoinBoltDeclarer.shuffleGrouping("parserBolt"); // ------------------------------ // sensitivityJoin -> ipZoneJoin // ------------------------------ IPZoneDataEnrichBolt ipZoneDataJoinBolt = new IPZoneDataEnrichBolt(config); BoltDeclarer ipZoneDataJoinBoltDeclarer = builder.setBolt("ipZoneJoin", ipZoneDataJoinBolt, numOfIPZoneJoinTasks).setNumTasks(numOfIPZoneJoinTasks); // ipZoneDataJoinBoltDeclarer.fieldsGrouping("sensitivityJoin", new Fields("user")); ipZoneDataJoinBoltDeclarer.shuffleGrouping("sensitivityJoin"); // ------------------------ // ipZoneJoin -> kafkaSink // ------------------------ StormStreamSink sinkBolt = environment.getStreamSink("HDFS_AUDIT_LOG_ENRICHED_STREAM", config); BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", sinkBolt, numOfSinkTasks).setNumTasks(numOfSinkTasks); kafkaBoltDeclarer.shuffleGrouping("ipZoneJoin"); if (config.hasPath(TRAFFIC_MONITOR_ENABLED) && config.getBoolean(TRAFFIC_MONITOR_ENABLED)) { builder.setSpout("trafficSpout", environment.getStreamSource("HADOOP_JMX_RESOURCE_STREAM", config), 1) .setNumTasks(1); builder.setBolt("trafficParserBolt", new TrafficParserBolt(config), 1) .setNumTasks(1) .shuffleGrouping("trafficSpout"); builder.setBolt("trafficSinkBolt", environment.getStreamSink("HDFS_AUDIT_LOG_TRAFFIC_STREAM", config), 1) .setNumTasks(1) .shuffleGrouping("trafficParserBolt"); } return builder.createTopology(); }
Example 9
Source File: AbstractHdfsAuditLogApplication.java From eagle with Apache License 2.0 | 4 votes |
@Override public StormTopology execute(Config config, StormEnvironment environment) { TopologyBuilder builder = new TopologyBuilder(); KafkaSpoutProvider provider = new KafkaSpoutProvider(); IRichSpout spout = provider.getSpout(config); int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM); int numOfParserTasks = config.getInt(PARSER_TASK_NUM); int numOfSensitivityJoinTasks = config.getInt(SENSITIVITY_JOIN_TASK_NUM); int numOfIPZoneJoinTasks = config.getInt(IPZONE_JOIN_TASK_NUM); int numOfSinkTasks = config.getInt(SINK_TASK_NUM); int numOfTrafficMonitorTasks = config.hasPath(TRAFFIC_MONITOR_TASK_NUM) ? config.getInt(TRAFFIC_MONITOR_TASK_NUM) : numOfParserTasks; builder.setSpout("ingest", spout, numOfSpoutTasks).setNumTasks(numOfSpoutTasks); // --------------------- // ingest -> parserBolt // --------------------- BaseRichBolt parserBolt = getParserBolt(config); BoltDeclarer boltDeclarer = builder.setBolt("parserBolt", parserBolt, numOfParserTasks).setNumTasks(numOfParserTasks).shuffleGrouping("ingest"); boltDeclarer.shuffleGrouping("ingest"); // Boolean useDefaultPartition = !config.hasPath("eagleProps.useDefaultPartition") || config.getBoolean("eagleProps.useDefaultPartition"); // if (useDefaultPartition) { // boltDeclarer.fieldsGrouping("ingest", new Fields(StringScheme.STRING_SCHEME_KEY)); // } else { // boltDeclarer.customGrouping("ingest", new CustomPartitionGrouping(createStrategy(config))); // } // ------------------------------ // parserBolt -> sensitivityJoin // ------------------------------ HdfsSensitivityDataEnrichBolt sensitivityDataJoinBolt = new HdfsSensitivityDataEnrichBolt(config); BoltDeclarer sensitivityDataJoinBoltDeclarer = builder.setBolt("sensitivityJoin", sensitivityDataJoinBolt, numOfSensitivityJoinTasks).setNumTasks(numOfSensitivityJoinTasks); // sensitivityDataJoinBoltDeclarer.fieldsGrouping("parserBolt", new Fields("f1")); sensitivityDataJoinBoltDeclarer.shuffleGrouping("parserBolt"); if (config.hasPath(TRAFFIC_MONITOR_ENABLED) && config.getBoolean(TRAFFIC_MONITOR_ENABLED)) { HadoopLogAccumulatorBolt auditLogAccumulator = new HadoopLogAccumulatorBolt(config); BoltDeclarer auditLogAccumulatorDeclarer = builder.setBolt("logAccumulator", auditLogAccumulator, numOfTrafficMonitorTasks); auditLogAccumulatorDeclarer.setNumTasks(numOfTrafficMonitorTasks).shuffleGrouping("parserBolt"); } // ------------------------------ // sensitivityJoin -> ipZoneJoin // ------------------------------ IPZoneDataEnrichBolt ipZoneDataJoinBolt = new IPZoneDataEnrichBolt(config); BoltDeclarer ipZoneDataJoinBoltDeclarer = builder.setBolt("ipZoneJoin", ipZoneDataJoinBolt, numOfIPZoneJoinTasks).setNumTasks(numOfIPZoneJoinTasks); // ipZoneDataJoinBoltDeclarer.fieldsGrouping("sensitivityJoin", new Fields("user")); ipZoneDataJoinBoltDeclarer.shuffleGrouping("sensitivityJoin"); // ------------------------ // ipZoneJoin -> kafkaSink // ------------------------ StormStreamSink sinkBolt = environment.getStreamSink("hdfs_audit_log_stream", config); BoltDeclarer kafkaBoltDeclarer = builder.setBolt("kafkaSink", sinkBolt, numOfSinkTasks).setNumTasks(numOfSinkTasks); kafkaBoltDeclarer.shuffleGrouping("ipZoneJoin"); return builder.createTopology(); }
Example 10
Source File: TopologyRunner.java From opensoc-streaming with Apache License 2.0 | 4 votes |
private boolean initializeErrorIndexBolt(String component_name) { try { Class loaded_class = Class.forName(config.getString("bolt.error.indexing.adapter")); IndexAdapter adapter = (IndexAdapter) loaded_class.newInstance(); String dateFormat = "yyyy.MM"; if (config.containsKey("bolt.alerts.indexing.timestamp")) { dateFormat = config.getString("bolt.alerts.indexing.timestamp"); } TelemetryIndexingBolt indexing_bolt = new TelemetryIndexingBolt() .withIndexIP(config.getString("es.ip")) .withIndexPort(config.getInt("es.port")) .withClusterName(config.getString("es.clustername")) .withIndexName( config.getString("bolt.error.indexing.indexname")) .withDocumentName( config.getString("bolt.error.indexing.documentname")) .withIndexTimestamp(dateFormat) .withBulk(config.getInt("bolt.error.indexing.bulk")) .withIndexAdapter(adapter) .withMetricConfiguration(config); BoltDeclarer declarer = builder .setBolt( component_name, indexing_bolt, config.getInt("bolt.error.indexing.parallelism.hint")) .setNumTasks(config.getInt("bolt.error.indexing.num.tasks")); for (String component : errorComponents) declarer.shuffleGrouping(component, "error"); return true; } catch (Exception e) { e.printStackTrace(); return false; } }