org.apache.spark.streaming.receiver.Receiver Java Examples

The following examples show how to use org.apache.spark.streaming.receiver.Receiver. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ZkCoordinator.java    From kafka-spark-consumer with Apache License 2.0 6 votes vote down vote up
public ZkCoordinator(
  DynamicPartitionConnections connections,
  KafkaConfig config,
  ZkState state,
  int partitionId,
  Receiver<MessageAndMetadata> receiver,
  boolean restart,
  KafkaMessageHandler messageHandler) {
    _kafkaconfig = config;
    _connections = connections;
    _partitionOwner = partitionId;
    _refreshFreqMs = config._refreshFreqSecs * 1000;
    _reader = new DynamicBrokersReader(_kafkaconfig, state);
    _brokerInfo = _reader.getBrokerInfo();
    _config = config;
    _receiver = receiver;
    _restart = restart;
    _messageHandler = messageHandler;
}
 
Example #2
Source File: KafkaSparkConsumer.java    From kafka-spark-consumer with Apache License 2.0 5 votes vote down vote up
public KafkaSparkConsumer(
        KafkaConfig config,
        ZkState zkState,
        Receiver<MessageAndMetadata<E>> receiver,
        KafkaMessageHandler messageHandler) {
    _kafkaconfig = config;
    _state = zkState;
    _receiver = receiver;
    _messageHandler = messageHandler;
}
 
Example #3
Source File: PartitionManager.java    From kafka-spark-consumer with Apache License 2.0 4 votes vote down vote up
public PartitionManager(
        DynamicPartitionConnections connections,
        ZkState state,
        KafkaConfig kafkaconfig,
        Partition partitionId,
        Receiver<MessageAndMetadata> receiver,
        boolean restart,
        KafkaMessageHandler messageHandler) {
    _partition = partitionId;
    _connections = connections;
    _kafkaconfig = kafkaconfig;
    _stateConf = _kafkaconfig._stateConf;
    _consumerId = (String) _stateConf.get(Config.KAFKA_CONSUMER_ID);
    _state = state;
    _topic = (String) _stateConf.get(Config.KAFKA_TOPIC);
    _consumer = connections.register(partitionId.host, partitionId.partition, _topic);
    _receiver = receiver;
    _restart = restart;
    _handler = messageHandler;

    Long processOffset = null;
    String processPath = zkPath("offsets");

    try {
        byte[] pOffset = _state.readBytes(processPath);
        LOG.info("Read processed information from: {}", processPath);
        if (pOffset != null) {
            processOffset = Long.valueOf(new String(pOffset));
            LOG.info("Processed offset for Partition : {} is {}",_partition.partition, processOffset);
        }
    } catch (Throwable e) {
        LOG.warn("Error reading and/or parsing at ZkNode", e);
        throw e;
    }
    // failed to parse JSON?
    if (processOffset == null) {
        _lastComittedOffset =
                KafkaUtils.getOffset(
                        _consumer, _topic, _partition.partition, kafkaconfig._forceFromStart);
        LOG.info("No partition information found, using configuration to determine offset");
    } else {
        _lastComittedOffset = processOffset + 1;
    }

  LOG.info("Starting Receiver  {} : {} from offset {}", _partition.host, _partition.partition, _lastComittedOffset);
  _emittedToOffset = _lastComittedOffset;
  _lastEnquedOffset = _lastComittedOffset;
  setZkCoordinator();
}