org.apache.storm.generated.StormTopology Java Examples
The following examples show how to use
org.apache.storm.generated.StormTopology.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: EcoTest.java From incubator-heron with Apache License 2.0 | 6 votes |
@Test public void testSubmit_StormTopologyType_BehavesAsExpected() throws Exception { FileInputStream mockStream = PowerMockito.mock(FileInputStream.class); FileInputStream mockPropsStream = PowerMockito.mock(FileInputStream.class); final String topologyName = "the name"; EcoTopologyDefinition topologyDefinition = new EcoTopologyDefinition(); topologyDefinition.setName(topologyName); when(mockEcoParser.parseFromInputStream(eq(mockStream), eq(mockPropsStream), eq(false))) .thenReturn(topologyDefinition); subject.submit(mockStream, mockPropsStream, false); verify(mockEcoParser).parseFromInputStream(same(mockStream), same(mockPropsStream), eq(false)); verify(mockEcoSubmitter).submitStormTopology(any(String.class), any(Config.class), any(StormTopology.class)); }
Example #2
Source File: TridentMinMaxOfDevicesTopology.java From storm-net-adapter with Apache License 2.0 | 6 votes |
/** * Creates a topology with device-id and count (which are whole numbers) as tuple fields in a stream and it finally * generates result stream based on min amd max with device-id and count values. */ public static StormTopology buildDevicesTopology() { String deviceID = "device-id"; String count = "count"; Fields allFields = new Fields(deviceID, count); RandomNumberGeneratorSpout spout = new RandomNumberGeneratorSpout(allFields, 10, 1000); TridentTopology topology = new TridentTopology(); Stream devicesStream = topology.newStream("devicegen-spout", spout). each(allFields, new Debug("##### devices")); devicesStream.minBy(deviceID). each(allFields, new Debug("#### device with min id")); devicesStream.maxBy(count). each(allFields, new Debug("#### device with max count")); return topology.build(); }
Example #3
Source File: MesosCommonTest.java From storm with Apache License 2.0 | 6 votes |
@Test public void testGetFullTopologyConfig() throws Exception { Map nimbusConf = new HashMap<>(); nimbusConf.put("TEST_NIMBUS_CONFIG", 1); nimbusConf.put("TEST_TOPOLOGY_OVERRIDE", 2); Map topologyConf = new HashMap<>(); TestUtils.initializeStormTopologyConfig(topologyConf); topologyConf.put("TEST_TOPOLIGY_CONFIG", 3); topologyConf.put("TEST_TOPOLOGY_OVERRIDE", 4); TopologyDetails info = new TopologyDetails("t1", topologyConf, new StormTopology(), 2, topologyOwner); Map result = MesosCommon.getFullTopologyConfig(nimbusConf, info); Map expectedResult = new HashMap<>(); TestUtils.initializeStormTopologyConfig(expectedResult); expectedResult.put("TEST_NIMBUS_CONFIG", 1); expectedResult.put("TEST_TOPOLIGY_CONFIG", 3); expectedResult.put("TEST_TOPOLOGY_OVERRIDE", 4); assertEquals(result, expectedResult); }
Example #4
Source File: StormTopologyUtil.java From atlas with Apache License 2.0 | 6 votes |
public static Map<String, Set<String>> getAdjacencyMap(StormTopology topology, boolean removeSystemComponent) { Map<String, Set<String>> adjacencyMap = new HashMap<>(); for (Map.Entry<String, Bolt> entry : topology.get_bolts().entrySet()) { String boltName = entry.getKey(); Map<GlobalStreamId, Grouping> inputs = entry.getValue().get_common().get_inputs(); for (Map.Entry<GlobalStreamId, Grouping> input : inputs.entrySet()) { String inputComponentId = input.getKey().get_componentId(); Set<String> components = adjacencyMap.containsKey(inputComponentId) ? adjacencyMap.get(inputComponentId) : new HashSet<String>(); components.add(boltName); components = removeSystemComponent ? removeSystemComponents(components) : components; if (!removeSystemComponent || !isSystemComponent(inputComponentId)) { adjacencyMap.put(inputComponentId, components); } } } return adjacencyMap; }
Example #5
Source File: StormAtlasHook.java From atlas with Apache License 2.0 | 6 votes |
private void addTopologyOutputs(StormTopology stormTopology, String topologyOwner, Map stormConf, AtlasEntity topology, AtlasEntityExtInfo entityExtInfo) { List<AtlasEntity> outputs = new ArrayList<>(); Map<String, Bolt> bolts = stormTopology.get_bolts(); Set<String> boltNames = StormTopologyUtil.getTerminalUserBoltNames(stormTopology); for (String boltName : boltNames) { Serializable instance = Utils.javaDeserialize(bolts.get(boltName).get_bolt_object().get_serialized_java(), Serializable.class); String dsType = instance.getClass().getSimpleName(); AtlasEntity dsEntity = addDataSet(dsType, topologyOwner, instance, stormConf, entityExtInfo); if (dsEntity != null) { outputs.add(dsEntity); } } topology.setRelationshipAttribute("outputs", AtlasTypeUtil.getAtlasRelatedObjectIds(outputs, RELATIONSHIP_PROCESS_DATASET_OUTPUTS)); }
Example #6
Source File: BeanDefinitionTest.java From breeze with Apache License 2.0 | 6 votes |
@Test public void brokenWithUnboundBolt() throws Exception { beansXml = "<breeze:topology id='t1'>" + "<breeze:spout id='s1' beanType='eu.icolumbo.breeze.TestBean' signature='ping()' outputFields='feed'/>" + "<breeze:bolt id='b1' beanType='eu.icolumbo.breeze.TestBean' signature='echo(other)'/>" + "</breeze:topology>"; refresh(); try { getBean(StormTopology.class); fail("no exception"); } catch (BeanCreationException e) { Throwable cause = e.getCause(); assertNotNull("cause", cause); String expected = "Can't resolve all input fields for: [[bolt 'b1']]"; assertEquals(expected, cause.getMessage()); } }
Example #7
Source File: ParserBoltTest.java From logparser with Apache License 2.0 | 6 votes |
@Test public void runRest() throws InterruptedException, NoSuchMethodException { TopologyBuilder builder = new TopologyBuilder(); // ---------- builder.setSpout("Spout", new TestApacheLogsSpout()); // ---------- HttpdLoglineParserBolt parserBolt = new HttpdLoglineParserBolt(TestCase.getLogFormat(), INPUT_FIELD_NAME, OUTPUT_FIELD_NAME); builder.setBolt("Parser", parserBolt, 1).shuffleGrouping("Spout"); // ---------- builder.setBolt("Printer", new ValidateOutput(), 1).shuffleGrouping("Parser"); // ---------- StormTopology topology = builder.createTopology(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("Unit test", new HashMap<String, String>(), topology); Thread.sleep(10000L); // Run for 10 seconds cluster.killTopology("Unit test"); cluster.shutdown(); }
Example #8
Source File: AbstractStormSuite.java From elasticsearch-hadoop with Apache License 2.0 | 6 votes |
public static void run(final String name, final StormTopology topo, final Counter hasCompleted) throws Exception { Thread th = new Thread(new Runnable() { @Override public void run() { try { start(name, topo); hasCompleted.waitForZero(TimeValue.timeValueSeconds(20)); } finally { stop(name); } } }, "test-storm-runner"); th.setDaemon(true); copyPropertiesIntoCfg(cfg); th.start(); }
Example #9
Source File: StormAtlasHook.java From atlas with Apache License 2.0 | 6 votes |
@Override public void notify(TopologyInfo topologyInfo, Map stormConf, StormTopology stormTopology) throws IllegalAccessException { if (LOG.isDebugEnabled()) { LOG.debug("==> StormAtlasHook.notify({}, {}, {})", topologyInfo, stormConf, stormTopology); } try { activatePluginClassLoader(); stormHook.notify(topologyInfo, stormConf, stormTopology); } finally { deactivatePluginClassLoader(); } if (LOG.isDebugEnabled()) { LOG.debug("<== StormAtlasHook.notify({}, {}, {})", topologyInfo, stormConf, stormTopology); } }
Example #10
Source File: TridentWindowingInmemoryStoreTopology.java From storm-net-adapter with Apache License 2.0 | 6 votes |
public static StormTopology buildTopology(WindowsStoreFactory windowStore, WindowConfig windowConfig) throws Exception { FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"), new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"), new Values("how many apples can you eat"), new Values("to be or not to be the person")); spout.setCycle(true); TridentTopology topology = new TridentTopology(); Stream stream = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"), new Split(), new Fields("word")) .window(windowConfig, windowStore, new Fields("word"), new CountAsAggregator(), new Fields("count")) .peek(new Consumer() { @Override public void accept(TridentTuple input) { LOG.info("Received tuple: [{}]", input); } }); return topology.build(); }
Example #11
Source File: TridentHBaseWindowingStoreTopology.java From storm-net-adapter with Apache License 2.0 | 6 votes |
public static StormTopology buildTopology(WindowsStoreFactory windowsStore) throws Exception { FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"), new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"), new Values("how many apples can you eat"), new Values("to be or not to be the person")); spout.setCycle(true); TridentTopology topology = new TridentTopology(); Stream stream = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"), new Split(), new Fields("word")) .window(TumblingCountWindow.of(1000), windowsStore, new Fields("word"), new CountAsAggregator(), new Fields("count")) .peek(new Consumer() { @Override public void accept(TridentTuple input) { LOG.info("Received tuple: [{}]", input); } }); return topology.build(); }
Example #12
Source File: FullPullerTopology.java From DBus with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { int result = parseCommandArgs(args); if (result != 0) { return; } FullPullPropertiesHolder.initialize(FullPullConstants.FULL_SPLITTER_TYPE, zkConnect, FullPullConstants.TOPOLOGY_ROOT + "/" + FullPullConstants.FULL_SPLITTING_PROPS_ROOT); FullPullPropertiesHolder.initialize(FullPullConstants.FULL_PULLER_TYPE, zkConnect, FullPullConstants.TOPOLOGY_ROOT + "/" + FullPullConstants.FULL_PULLING_PROPS_ROOT); commonConfSplit = FullPullPropertiesHolder.getCommonConf(FullPullConstants.FULL_SPLITTER_TYPE, fullSplitterTopologyId); commonConfPull = FullPullPropertiesHolder.getCommonConf(FullPullConstants.FULL_PULLER_TYPE, fullPullerTopologyId); splittingBoltParallel = Integer.valueOf(commonConfSplit.getProperty(FullPullConstants.SPLITTING_BOLT_PARALLEL)); pullingBoltParallel = Integer.valueOf(commonConfPull.getProperty(FullPullConstants.PULLING_BOLT_PARALLEL)); //生成topology FullPullerTopology topology = new FullPullerTopology(); StormTopology topo = topology.buildTopology(type); topology.start(topo, runAsLocal); }
Example #13
Source File: FullPullerTopology.java From DBus with Apache License 2.0 | 6 votes |
private void start(StormTopology topology, boolean runAsLocal) throws Exception { Config conf = new Config(); conf.put(FullPullConstants.FULL_SPLITTER_TOPOLOGY_ID, fullSplitterTopologyId); conf.put(FullPullConstants.FULL_PULLER_TOPOLOGY_ID, fullPullerTopologyId); conf.put(FullPullConstants.DS_NAME, topologyId); conf.put(FullPullConstants.ZKCONNECT, zkConnect); conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, commonConfSplit.getProperty(FullPullConstants.TOPOLOGY_WORKER_CHILDOPTS)); //设置message超时时间为,保证每个分片都能在该内拉完数据 conf.setMessageTimeoutSecs(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_MESSAGE_TIMEOUT))); conf.setMaxSpoutPending(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_MAX_SPOUT_PENDING))); conf.setNumWorkers(Integer.parseInt(commonConfSplit.getProperty(FullPullConstants.STORM_NUM_WORKERS))); conf.setDebug(true); if (runAsLocal) { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(topologyName, conf, topology); } else { StormSubmitter.submitTopology(topologyName, conf, topology); } }
Example #14
Source File: DBusLogProcessorTopology.java From DBus with Apache License 2.0 | 6 votes |
private void start(StormTopology topology, boolean runAsLocal) throws Exception { Config conf = new Config(); conf.put(com.creditease.dbus.commons.Constants.ZOOKEEPER_SERVERS, zkConnect); conf.put(Constants.TOPOLOGY_ID, topologyId); conf.setMessageTimeoutSecs(Integer.parseInt(properties.getProperty(Constants.LOG_MESSAGE_TIMEOUT))); //conf.setMaxSpoutPending(30); conf.setDebug(true); conf.setNumWorkers(Integer.parseInt(properties.getProperty(Constants.LOG_NUMWORKERS))); if (runAsLocal) { conf.setMaxTaskParallelism(10); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(topologyName, conf, topology); } else { StormSubmitter.submitTopology(topologyName, conf, topology); } }
Example #15
Source File: SinkTopology.java From DBus with Apache License 2.0 | 6 votes |
private StormTopology buildTopology() throws Exception { loadSinkerConf(); Integer spoutSize = Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_KAFKA_READ_SPOUT_PARALLEL)); Integer boltSize = Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_WRITE_BOUT_PARALLEL)); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("SinkerKafkaReadSpout", new SinkerKafkaReadSpout(), spoutSize); builder.setBolt("SinkerWriteBolt", new SinkerWriteBolt(), boltSize) .fieldsGrouping("SinkerKafkaReadSpout", "dataStream", new Fields("ns")) .allGrouping("SinkerKafkaReadSpout", "ctrlStream"); Config conf = new Config(); conf.setDebug(true); conf.setNumWorkers(1); return builder.createTopology(); }
Example #16
Source File: TridentWordCount.java From storm-net-adapter with Apache License 2.0 | 6 votes |
public static StormTopology buildTopology() { FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"), new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"), new Values("how many apples can you eat"), new Values("to be or not to be the person")); spout.setCycle(true); TridentTopology topology = new TridentTopology(); TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"), new Split(), new Fields("word")) .groupBy(new Fields("word")).persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count")) .parallelismHint(16); topology.newDRPCStream("words").each(new Fields("args"), new Split(), new Fields("word")) .groupBy(new Fields("word")) .stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")) .each(new Fields("count"), new FilterNull()) .project(new Fields("word", "count")); return topology.build(); }
Example #17
Source File: SinkTopology.java From DBus with Apache License 2.0 | 6 votes |
private void start(StormTopology topology, boolean runAsLocal) throws Exception { Config conf = new Config(); conf.put(Constants.ZOOKEEPER_SERVERS, zkConnect); conf.put(Constants.TOPOLOGY_ID, topologyId); conf.put(Constants.SINK_TYPE, sinkType); conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, sinkerConf.getProperty(SinkerConstants.TOPOLOGY_WORKER_CHILDOPTS)); conf.setMessageTimeoutSecs(Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_MESSAGE_TIMEOUT))); conf.setMaxSpoutPending(Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_MAX_SPOUT_PENDING))); conf.setDebug(true); conf.setNumWorkers(Integer.parseInt(sinkerConf.getProperty(SinkerConstants.STORM_NUM_WORKERS))); if (runAsLocal) { conf.setMaxTaskParallelism(10); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(topologyName, conf, topology); } else { StormSubmitter.submitTopology(topologyName, conf, topology); } }
Example #18
Source File: FluxTopologyComponent.java From metron with Apache License 2.0 | 6 votes |
private void startTopology(String topologyName, File topologyLoc, File templateFile, Properties properties) throws IOException, ClassNotFoundException, NoSuchMethodException, InvocationTargetException, InstantiationException, IllegalAccessException, TException, NoSuchFieldException{ TopologyDef topologyDef = loadYaml(topologyName, topologyLoc, templateFile, properties); Config conf = FluxBuilder.buildConfig(topologyDef); ExecutionContext context = new ExecutionContext(topologyDef, conf); StormTopology topology = FluxBuilder.buildTopology(context); assertNotNull(topology); topology.validate(); try { stormCluster.submitTopology(topologyName, conf, topology); } catch(Exception nne) { try { Thread.sleep(2000); } catch (InterruptedException e) { } stormCluster.submitTopology(topologyName, conf, topology); } }
Example #19
Source File: StatisticTopology.java From storm-statistic with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); /** * 设置spout和bolt的dag(有向无环图) */ KafkaSpout kafkaSpout = createKafkaSpout(); builder.setSpout("id_kafka_spout", kafkaSpout); builder.setBolt("id_convertIp_bolt", new ConvertIPBolt()).shuffleGrouping("id_kafka_spout"); // 通过不同的数据流转方式,来指定数据的上游组件 builder.setBolt("id_statistic_bolt", new StatisticBolt()).shuffleGrouping("id_convertIp_bolt"); // 通过不同的数据流转方式,来指定数据的上游组件 // 使用builder构建topology StormTopology topology = builder.createTopology(); String topologyName = KafkaStormTopology.class.getSimpleName(); // 拓扑的名称 Config config = new Config(); // Config()对象继承自HashMap,但本身封装了一些基本的配置 // 启动topology,本地启动使用LocalCluster,集群启动使用StormSubmitter if (args == null || args.length < 1) { // 没有参数时使用本地模式,有参数时使用集群模式 LocalCluster localCluster = new LocalCluster(); // 本地开发模式,创建的对象为LocalCluster localCluster.submitTopology(topologyName, config, topology); } else { StormSubmitter.submitTopology(topologyName, config, topology); } }
Example #20
Source File: StormSubmitter.java From twister2 with Apache License 2.0 | 6 votes |
/** * Submits a topology to run on the cluster. A topology runs forever or until * explicitly killed. * * @param name the name of the storm. * @param stormConfig the topology-specific configuration. See {@link Config}. * @param topology the processing to execute. */ @SuppressWarnings("rawtypes") public static void submitTopology( String name, Map stormConfig, StormTopology topology) { edu.iu.dsc.tws.api.config.Config config = ResourceAllocator.loadConfig(new HashMap<>()); // build JobConfig JobConfig jobConfig = new JobConfig(); jobConfig.putAll(stormConfig); Gson gson = new Gson(); String tg = gson.toJson(topology.getT2ComputeGraph()); jobConfig.put("storm-topology", topology.getT2ComputeGraph()); Twister2Job.Twister2JobBuilder jobBuilder = Twister2Job.newBuilder(); jobBuilder.setJobName(name); jobBuilder.setWorkerClass(Twister2StormWorker.class.getName()); jobBuilder.setConfig(jobConfig); jobBuilder.addComputeResource(1, 512, 1); // now submit the job Twister2Submitter.submitJob(jobBuilder.build(), config); }
Example #21
Source File: GeneralTopologyContext.java From incubator-heron with Apache License 2.0 | 6 votes |
/** * Gets the Thrift object representing the topology. * * @return the Thrift definition representing the topology */ @SuppressWarnings("deprecation") public StormTopology getRawTopology() { StormTopology stormTopology = new StormTopology(); Map<String, SpoutSpec> spouts = new HashMap<>(); for (TopologyAPI.Spout spout : this.delegate.getRawTopology().getSpoutsList()) { spouts.put(spout.getComp().getName(), new SpoutSpec(spout)); } Map<String, Bolt> bolts = new HashMap<>(); for (TopologyAPI.Bolt bolt : this.delegate.getRawTopology().getBoltsList()) { bolts.put(bolt.getComp().getName(), new Bolt(bolt)); } stormTopology.set_spouts(spouts); stormTopology.set_bolts(bolts); return stormTopology; }
Example #22
Source File: StormAtlasHook.java From incubator-atlas with Apache License 2.0 | 6 votes |
private void addTopologyOutputs(Referenceable topologyReferenceable, StormTopology stormTopology, String topologyOwner, Map stormConf, List<Referenceable> dependentEntities) throws Exception { final ArrayList<Referenceable> outputDataSets = new ArrayList<>(); Map<String, Bolt> bolts = stormTopology.get_bolts(); Set<String> terminalBoltNames = StormTopologyUtil.getTerminalUserBoltNames(stormTopology); for (String terminalBoltName : terminalBoltNames) { Serializable instance = Utils.javaDeserialize(bolts.get(terminalBoltName) .get_bolt_object().get_serialized_java(), Serializable.class); String dataSetType = instance.getClass().getSimpleName(); final Referenceable datasetRef = createDataSet(dataSetType, topologyOwner, instance, stormConf, dependentEntities); if (datasetRef != null) { outputDataSets.add(datasetRef); } } topologyReferenceable.set("outputs", outputDataSets); }
Example #23
Source File: KafkaStormIntegrationTest.java From incubator-retired-pirk with Apache License 2.0 | 6 votes |
private TestJob createPirkTestJob(final Config config) { final SpoutConfig kafkaConfig = setUpTestKafkaSpout(config); return new TestJob() { StormTopology topology = PirkTopology.getPirkTopology(kafkaConfig); @Override public void run(ILocalCluster iLocalCluster) throws Exception { iLocalCluster.submitTopology("pirk_integration_test", config, topology); logger.info("Pausing for setup."); // Thread.sleep(4000); // KafkaProducer producer = new KafkaProducer<String,String>(createKafkaProducerConfig()); // loadTestData(producer); // Thread.sleep(10000); while (OutputBolt.latch.getCount() == testCountDown) { Thread.sleep(1000); } testCountDown -= 1; logger.info("Finished..."); } }; }
Example #24
Source File: LocalCluster.java From incubator-heron with Apache License 2.0 | 6 votes |
@Override @SuppressWarnings({"rawtypes", "unchecked"}) public void submitTopology(String topoName, Map config, StormTopology stormTopology) throws AlreadyAliveException, InvalidTopologyException { assertNotAlive(); this.topologyName = topoName; this.conf = config; this.topology = stormTopology; simulator.submitTopology(topoName, ConfigUtils.translateConfig(config), stormTopology.getStormTopology()); }
Example #25
Source File: StormTopologyUtil.java From incubator-atlas with Apache License 2.0 | 6 votes |
public static Map<String, Set<String>> getAdjacencyMap(StormTopology topology, boolean removeSystemComponent) throws Exception { Map<String, Set<String>> adjacencyMap = new HashMap<>(); for (Map.Entry<String, Bolt> entry : topology.get_bolts().entrySet()) { String boltName = entry.getKey(); Map<GlobalStreamId, Grouping> inputs = entry.getValue().get_common().get_inputs(); for (Map.Entry<GlobalStreamId, Grouping> input : inputs.entrySet()) { String inputComponentId = input.getKey().get_componentId(); Set<String> components = adjacencyMap.containsKey(inputComponentId) ? adjacencyMap.get(inputComponentId) : new HashSet<String>(); components.add(boltName); components = removeSystemComponent ? removeSystemComponents(components) : components; if (!removeSystemComponent || !isSystemComponent(inputComponentId)) { adjacencyMap.put(inputComponentId, components); } } } return adjacencyMap; }
Example #26
Source File: StormTopologyUtil.java From incubator-atlas with Apache License 2.0 | 6 votes |
public static Set<String> getTerminalUserBoltNames(StormTopology topology) { Set<String> terminalBolts = new HashSet<>(); Set<String> inputs = new HashSet<>(); for (Map.Entry<String, Bolt> entry : topology.get_bolts().entrySet()) { String name = entry.getKey(); Set<GlobalStreamId> inputsForBolt = entry.getValue().get_common().get_inputs().keySet(); if (!isSystemComponent(name)) { for (GlobalStreamId streamId : inputsForBolt) { inputs.add(streamId.get_componentId()); } } } for (String boltName : topology.get_bolts().keySet()) { if (!isSystemComponent(boltName) && !inputs.contains(boltName)) { terminalBolts.add(boltName); } } return terminalBolts; }
Example #27
Source File: TridentMinMaxOfDevicesTopology.java From storm-net-adapter with Apache License 2.0 | 5 votes |
/** * Creates a topology which demonstrates min/max operations on tuples of stream which contain vehicle and driver fields * with values {@link TridentMinMaxOfDevicesTopology.Vehicle} and {@link TridentMinMaxOfDevicesTopology.Driver} respectively. */ public static StormTopology buildVehiclesTopology() { Fields driverField = new Fields(Driver.FIELD_NAME); Fields vehicleField = new Fields(Vehicle.FIELD_NAME); Fields allFields = new Fields(Vehicle.FIELD_NAME, Driver.FIELD_NAME); FixedBatchSpout spout = new FixedBatchSpout(allFields, 10, Vehicle.generateVehicles(20)); spout.setCycle(true); TridentTopology topology = new TridentTopology(); Stream vehiclesStream = topology.newStream("spout1", spout). each(allFields, new Debug("##### vehicles")); Stream slowVehiclesStream = vehiclesStream .min(new SpeedComparator()) .each(vehicleField, new Debug("#### slowest vehicle")); Stream slowDriversStream = slowVehiclesStream .project(driverField) .each(driverField, new Debug("##### slowest driver")); vehiclesStream .max(new SpeedComparator()) .each(vehicleField, new Debug("#### fastest vehicle")) .project(driverField) .each(driverField, new Debug("##### fastest driver")); vehiclesStream .max(new EfficiencyComparator()). each(vehicleField, new Debug("#### efficient vehicle")); return topology.build(); }
Example #28
Source File: StormAtlasHook.java From incubator-atlas with Apache License 2.0 | 5 votes |
private void addGraphConnections(StormTopology stormTopology, Map<String, Referenceable> nodeEntities) throws Exception { // adds connections between spouts and bolts Map<String, Set<String>> adjacencyMap = StormTopologyUtil.getAdjacencyMap(stormTopology, true); for (Map.Entry<String, Set<String>> entry : adjacencyMap.entrySet()) { String nodeName = entry.getKey(); Set<String> adjacencyList = adjacencyMap.get(nodeName); if (adjacencyList == null || adjacencyList.isEmpty()) { continue; } // add outgoing links Referenceable node = nodeEntities.get(nodeName); ArrayList<String> outputs = new ArrayList<>(adjacencyList.size()); outputs.addAll(adjacencyList); node.set("outputs", outputs); // add incoming links for (String adjacentNodeName : adjacencyList) { Referenceable adjacentNode = nodeEntities.get(adjacentNodeName); @SuppressWarnings("unchecked") ArrayList<String> inputs = (ArrayList<String>) adjacentNode.get("inputs"); if (inputs == null) { inputs = new ArrayList<>(); } inputs.add(nodeName); adjacentNode.set("inputs", inputs); } } }
Example #29
Source File: EcoSubmitterTest.java From incubator-heron with Apache License 2.0 | 5 votes |
@Test public void submitStormTopology_AllGood_BehavesAsExpected() throws Exception { Config config = new Config(); StormTopology topology = new StormTopology(); PowerMockito.spy(StormSubmitter.class); PowerMockito.doNothing().when(StormSubmitter.class, "submitTopology", any(String.class), any(Config.class), any(StormTopology.class)); subject.submitStormTopology("name", config, topology); PowerMockito.verifyStatic(times(1)); StormSubmitter.submitTopology(anyString(), any(Config.class), any(StormTopology.class)); }
Example #30
Source File: StormAtlasHookIT.java From incubator-atlas with Apache License 2.0 | 5 votes |
public void testAddEntities() throws Exception { StormTopology stormTopology = StormTestUtil.createTestTopology(); StormTestUtil.submitTopology(stormCluster, TOPOLOGY_NAME, stormTopology); LOG.info("Submitted topology {}", TOPOLOGY_NAME); // todo: test if topology metadata is registered in atlas String guid = getTopologyGUID(); Assert.assertNotNull(guid); LOG.info("GUID is {}", guid); Referenceable topologyReferenceable = atlasClient.getEntity(guid); Assert.assertNotNull(topologyReferenceable); }