Java Code Examples for backtype.storm.task.TopologyContext#getThisTaskIndex()
The following examples show how to use
backtype.storm.task.TopologyContext#getThisTaskIndex() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CorrelationSpout.java From eagle with Apache License 2.0 | 6 votes |
@SuppressWarnings("rawtypes") @Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { if (LOG.isDebugEnabled()) { LOG.debug("open method invoked"); } this.conf = conf; this.context = context; this.collector = collector; this.taskIndex = context.getThisTaskIndex(); // initialize an empty SpoutSpec cachedSpoutSpec = new SpoutSpec(topologyId, new HashMap<>(), new HashMap<>(), new HashMap<>()); changeNotifyService.registerListener(this); changeNotifyService.init(config, MetadataType.SPOUT); // register KafkaSpout metric kafkaSpoutMetric = new KafkaSpoutMetric(); context.registerMetric("kafkaSpout", kafkaSpoutMetric, 60); this.serializer = Serializers.newPartitionedEventSerializer(this); }
Example 2
Source File: BatchMetaSpout.java From jstorm with Apache License 2.0 | 6 votes |
@Override public void prepare(Map stormConf, TopologyContext context) { this.conf = stormConf; taskName = context.getThisComponentId() + "_" + context.getThisTaskId(); taskIndex = context.getThisTaskIndex(); taskParallel = context.getComponentTasks(context.getThisComponentId()) .size(); try { initMetaClient(); } catch (Exception e) { LOG.info("Failed to init Meta Client,", e); throw new RuntimeException(e); } LOG.info(taskName + " successfully do prepare "); }
Example 3
Source File: RefreshingImageFetcher.java From StormCV with Apache License 2.0 | 6 votes |
@SuppressWarnings("rawtypes") @Override public void prepare(Map stormConf, TopologyContext context) throws Exception { frameQueue = new LinkedBlockingQueue<Frame>(); if(stormConf.containsKey(StormCVConfig.STORMCV_FRAME_ENCODING)){ imageType = (String)stormConf.get(StormCVConfig.STORMCV_FRAME_ENCODING); } int nrTasks = context.getComponentTasks(context.getThisComponentId()).size(); int taskIndex = context.getThisTaskIndex(); // change the list based on the number of tasks working on it if(this.locations != null && this.locations.size() > 0){ int batchSize = (int)Math.floor(locations.size() / nrTasks) + 1; int start = batchSize * taskIndex; locations = locations.subList(start, Math.min(start + batchSize, locations.size())); } readers = new ArrayList<ImageReader>(); }
Example 4
Source File: StreamFrameFetcher.java From StormCV with Apache License 2.0 | 6 votes |
@SuppressWarnings({ "rawtypes" }) @Override public void prepare(Map conf, TopologyContext context) throws Exception { this.id = context.getThisComponentId(); int nrTasks = context.getComponentTasks(id).size(); int taskIndex = context.getThisTaskIndex(); if(conf.containsKey(StormCVConfig.STORMCV_FRAME_ENCODING)){ imageType = (String)conf.get(StormCVConfig.STORMCV_FRAME_ENCODING); } // change the list based on the number of tasks working on it if(this.locations != null && this.locations.size() > 0){ int batchSize = (int) Math.floor(locations.size() / nrTasks); int start = batchSize * taskIndex; locations = locations.subList(start, Math.min(start + batchSize, locations.size())); } }
Example 5
Source File: InOrderTestSpout.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; this.task = context.getThisTaskIndex(); this.metricClient = new MetricClient(context); this.emitCounter = metricClient.registerCounter(InOrderTestMetricsDef.METRIC_SPOUT_EMIT); this.emitCounter.setOp(AsmMetric.MetricOp.LOG & AsmMetric.MetricOp.REPORT); LOG.info("open. task = " + task); }
Example 6
Source File: OpaquePartitionedTransactionalSpoutExecutor.java From jstorm with Apache License 2.0 | 5 votes |
public Emitter(Map conf, TopologyContext context) { _emitter = _spout.getEmitter(conf, context); _index = context.getThisTaskIndex(); _numTasks = context.getComponentTasks(context.getThisComponentId()).size(); _state = TransactionalState.newUserState( conf, (String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID), getComponentConfiguration()); List<String> existingPartitions = _state.list(""); for (String p : existingPartitions) { int partition = Integer.parseInt(p); if ((partition - _index) % _numTasks == 0) { _partitionStates.put(partition, new RotatingTransactionalState(_state, p)); } } }
Example 7
Source File: DRPCSpout.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { _collector = collector; _clients = new ArrayList<>(); if (_local_drpc_id == null) { _backround = new ExtendedThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>()); _futures = new LinkedList<>(); int numTasks = context.getComponentTasks(context.getThisComponentId()).size(); int index = context.getThisTaskIndex(); int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT)); List<String> servers = NetWorkUtils.host2Ip((List<String>) conf.get(Config.DRPC_SERVERS)); if (servers == null || servers.isEmpty()) { throw new RuntimeException("No DRPC servers configured for topology"); } if (numTasks < servers.size()) { for (String s : servers) { _futures.add(_backround.submit(new Adder(s, port, conf))); } } else { int i = index % servers.size(); _futures.add(_backround.submit(new Adder(servers.get(i), port, conf))); } } }
Example 8
Source File: PartitionCoordinator.java From jstorm with Apache License 2.0 | 5 votes |
private void createPartitionConsumers(Map conf, TopologyContext context) { partitionConsumerMap = new HashMap<Integer, PartitionConsumer>(); int taskSize = context.getComponentTasks(context.getThisComponentId()).size(); for(int i=context.getThisTaskIndex(); i<config.numPartitions; i+=taskSize) { PartitionConsumer partitionConsumer = new PartitionConsumer(conf, config, i, zkState); partitionConsumers.add(partitionConsumer); partitionConsumerMap.put(i, partitionConsumer); } }
Example 9
Source File: AbstractDataEnrichBolt.java From eagle with Apache License 2.0 | 5 votes |
@Override public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { this.collector = collector; // start external data retrieval try { ExternalDataJoiner joiner = new ExternalDataJoiner( lcm, config, context.getThisComponentId() + "." + context.getThisTaskIndex()); joiner.start(); } catch(Exception ex){ LOG.error("Fail bringing up quartz scheduler.", ex); throw new IllegalStateException(ex); } }
Example 10
Source File: ImageFetcher.java From StormCV with Apache License 2.0 | 5 votes |
@SuppressWarnings({ "rawtypes" }) @Override public void prepare(Map stormConf, TopologyContext context) throws Exception { this.connectorHolder = new ConnectorHolder(stormConf); if(stormConf.containsKey(StormCVConfig.STORMCV_FRAME_ENCODING)){ imageType = (String)stormConf.get(StormCVConfig.STORMCV_FRAME_ENCODING); } int nrTasks = context.getComponentTasks(context.getThisComponentId()).size(); List<String> original = new ArrayList<String>(); original.addAll(locations); locations.clear(); for(String dir : original){ locations.addAll(expand(dir)); } // change the list based on the number of tasks working on it List<String> filesToFetch = new ArrayList<String>(); int i = context.getThisTaskIndex(); while(i < locations.size()){ filesToFetch.add(locations.get(i)); i += nrTasks; } this.locations = filesToFetch; }
Example 11
Source File: FileFrameFetcher.java From StormCV with Apache License 2.0 | 5 votes |
@SuppressWarnings({ "rawtypes"}) @Override public void prepare(Map conf, TopologyContext context) throws Exception { this.connectorHolder = new ConnectorHolder(conf); if(conf.containsKey(StormCVConfig.STORMCV_FRAME_ENCODING)){ imageType = (String)conf.get(StormCVConfig.STORMCV_FRAME_ENCODING); } List<String> original = new ArrayList<String>(); original.addAll(locations); locations.clear(); for(String dir : original){ locations.addAll(expand(dir)); } int nrTasks = context.getComponentTasks(context.getThisComponentId()).size(); // change the list based on the number of tasks working on it List<String> filesToFetch = new ArrayList<String>(); int i = context.getThisTaskIndex(); while(i < locations.size()){ filesToFetch.add(locations.get(i)); i += nrTasks; } this.locations = filesToFetch; }
Example 12
Source File: PartialMatcher.java From StormCV with Apache License 2.0 | 5 votes |
@SuppressWarnings("rawtypes") @Override protected void prepareOpenCVOp(Map conf, TopologyContext context) throws Exception { this.connectorHolder = new ConnectorHolder(conf); matcher = DescriptorMatcher.create( matcherType ); prototypes = new HashMap<Integer, String>(); int nrTasks = context.getComponentTasks(context.getThisComponentId()).size(); int taskIndex = context.getThisTaskIndex(); List<String> original = new ArrayList<String>(); original.addAll(protoLocations); protoLocations.clear(); for(String dir : original){ protoLocations.addAll(expand(dir)); } FileConnector fc = null; List<Mat> training = new ArrayList<Mat>(); for(int i=taskIndex; i<protoLocations.size(); i+=nrTasks){ String imgFile = protoLocations.get(i); fc = connectorHolder.getConnector(imgFile); fc.moveTo(imgFile); File imageFile = fc.getAsFile(); BufferedImage img = ImageIO.read(imageFile); if(img == null) continue; Mat proto = calculateDescriptors(img); prototypes.put(training.size(), imgFile.substring(imgFile.lastIndexOf('/')+1)); training.add(proto); logger.info(this.getClass().getName()+"["+taskIndex+"] "+imgFile+" loaded and prepared for matching"); if(!(fc instanceof LocalFileConnector)) imageFile.delete(); } matcher.add(training); matcher.train(); }
Example 13
Source File: InOrderDeliveryTest.java From jstorm with Apache License 2.0 | 4 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { _collector = collector; _base = context.getThisTaskIndex(); }
Example 14
Source File: PartitionedTridentSpoutExecutor.java From jstorm with Apache License 2.0 | 4 votes |
public Emitter(String txStateId, Map conf, TopologyContext context) { _emitter = _spout.getEmitter(conf, context); _state = TransactionalState.newUserState(conf, txStateId); _index = context.getThisTaskIndex(); _numTasks = context.getComponentTasks(context.getThisComponentId()).size(); }
Example 15
Source File: OpaquePartitionedTridentSpoutExecutor.java From jstorm with Apache License 2.0 | 4 votes |
public Emitter(String txStateId, Map conf, TopologyContext context) { _emitter = _spout.getEmitter(conf, context); _index = context.getThisTaskIndex(); _numTasks = context.getComponentTasks(context.getThisComponentId()).size(); _state = TransactionalState.newUserState(conf, txStateId); }
Example 16
Source File: FeederBatchSpout.java From jstorm with Apache License 2.0 | 4 votes |
@Override public Emitter getEmitter(String txStateId, Map conf, TopologyContext context) { return new FeederEmitter(context.getThisTaskIndex()); }
Example 17
Source File: KafkaSpoutWrapper.java From eagle with Apache License 2.0 | 4 votes |
@SuppressWarnings( {"unchecked", "rawtypes"}) @Override public void open(Map conf, final TopologyContext context, final SpoutOutputCollector collector) { String topologyInstanceId = context.getStormId(); ////// !!!! begin copying code from storm.kafka.KafkaSpout to here _collector = collector; Map stateConf = new HashMap(conf); List<String> zkServers = _spoutConfig.zkServers; if (zkServers == null) { zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS); } Integer zkPort = _spoutConfig.zkPort; if (zkPort == null) { zkPort = ((Number) conf.get(Config.STORM_ZOOKEEPER_PORT)).intValue(); } stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, zkServers); stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, zkPort); stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_ROOT, _spoutConfig.zkRoot); _state = new ZkState(stateConf); _connections = new DynamicPartitionConnections(_spoutConfig, KafkaUtils.makeBrokerReader(conf, _spoutConfig)); // using TransactionalState like this is a hack int totalTasks = context.getComponentTasks(context.getThisComponentId()).size(); if (_spoutConfig.hosts instanceof StaticHosts) { _coordinator = new StaticCoordinator(_connections, conf, _spoutConfig, _state, context.getThisTaskIndex(), totalTasks, topologyInstanceId); } else { _coordinator = new ZkCoordinator(_connections, conf, _spoutConfig, _state, context.getThisTaskIndex(), totalTasks, topologyInstanceId); } ////// !!!! end copying code from storm.kafka.KafkaSpout to here // add new topic to metric KafkaSpoutMetric.KafkaSpoutMetricContext metricContext = new KafkaSpoutMetric.KafkaSpoutMetricContext(); metricContext.connections = _connections; metricContext.coordinator = _coordinator; metricContext.spoutConfig = _spoutConfig; kafkaSpoutMetric.addTopic(_spoutConfig.topic, metricContext); this.collectorWrapper = (SpoutOutputCollectorWrapper) collector; }
Example 18
Source File: PartitionedTransactionalSpoutExecutor.java From jstorm with Apache License 2.0 | 4 votes |
public Emitter(Map conf, TopologyContext context) { _emitter = _spout.getEmitter(conf, context); _state = TransactionalState.newUserState(conf, (String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID), getComponentConfiguration()); _index = context.getThisTaskIndex(); _numTasks = context.getComponentTasks(context.getThisComponentId()).size(); }