org.apache.storm.hdfs.bolt.sync.CountSyncPolicy Java Examples
The following examples show how to use
org.apache.storm.hdfs.bolt.sync.CountSyncPolicy.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SourceHandlerTest.java From metron with Apache License 2.0 | 6 votes |
@Test public void testRotateOutputFile() throws IOException { SourceHandler handler = new SourceHandler( rotActions, new FileSizeRotationPolicy(10000, Units.MB), // Don't actually care about the rotation new CountSyncPolicy(1), testFormat, callback ); handler.rotateOutputFile(); // Function should ensure rotation actions and callback are called. verify(rotAction1).execute(any(), any()); verify(rotAction2).execute(any(), any()); verify(callback).removeKey(); }
Example #2
Source File: MovingAvgLocalTopologyRunner.java From hadoop-arch-book with Apache License 2.0 | 6 votes |
/** * Create bolt which will persist ticks to HDFS. */ private static HdfsBolt createHdfsBolt() { // Use "|" instead of "," for field delimiter: RecordFormat format = new DelimitedRecordFormat() .withFieldDelimiter("|"); // sync the filesystem after every 1k tuples: SyncPolicy syncPolicy = new CountSyncPolicy(100); // Rotate files when they reach 5MB: FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); // Write records to <user>/stock-ticks/ directory in HDFS: FileNameFormat fileNameFormat = new DefaultFileNameFormat() .withPath("stock-ticks/"); HdfsBolt hdfsBolt = new HdfsBolt() .withFsUrl("hdfs://localhost:8020") .withFileNameFormat(fileNameFormat) .withRecordFormat(format) .withRotationPolicy(rotationPolicy) .withSyncPolicy(syncPolicy); return hdfsBolt; }
Example #3
Source File: HdfsWriter.java From metron with Apache License 2.0 | 5 votes |
@Override public void init(Map stormConfig, WriterConfiguration configurations) { this.stormConfig = stormConfig; this.stellarProcessor = new StellarProcessor(); if(syncPolicy != null) { //if the user has specified the sync policy, we don't want to override their wishes. LOG.debug("Using user specified sync policy {}", () -> syncPolicy.getClass().getSimpleName()); syncPolicyCreator = new ClonedSyncPolicyCreator(syncPolicy); } else { //if the user has not, then we want to have the sync policy depend on the batch size. LOG.debug("No user specified sync policy, using CountSyncPolicy based on batch size"); syncPolicyCreator = (source, config) -> new CountSyncPolicy(config == null?1:config.getBatchSize(source)); } }
Example #4
Source File: ClonedSyncPolicyCreatorTest.java From metron with Apache License 2.0 | 5 votes |
@Test public void testClonedPolicy() { CountSyncPolicy basePolicy = new CountSyncPolicy(5); ClonedSyncPolicyCreator creator = new ClonedSyncPolicyCreator(basePolicy); //ensure cloned policy continues to work and adheres to the contract: mark on 5th call. SyncPolicy clonedPolicy = creator.create("blah", null); for(int i = 0;i < 4;++i) { assertFalse(clonedPolicy.mark(null, i)); } assertTrue(clonedPolicy.mark(null, 5)); //reclone policy and ensure it adheres to the original contract. clonedPolicy = creator.create("blah", null); assertFalse(clonedPolicy.mark(null, 0)); }
Example #5
Source File: HdfsWriterTest.java From metron with Apache License 2.0 | 5 votes |
@Test @SuppressWarnings("unchecked") public void testSingleFileIfNoStreamClosed() throws Exception { String function = "FORMAT('test-%s/%s', test.key, test.key)"; WriterConfiguration config = buildWriterConfiguration(function); HdfsWriter writer = new HdfsWriter().withFileNameFormat(testFormat); writer.init(new HashMap<String, String>(), config); writer.initFileNameFormat(createTopologyContext()); JSONObject message = new JSONObject(); message.put("test.key", "test.value"); List<BulkMessage<JSONObject>> messages = new ArrayList<BulkMessage<JSONObject>>() {{ add(new BulkMessage("message1", message)); }}; CountSyncPolicy basePolicy = new CountSyncPolicy(5); ClonedSyncPolicyCreator creator = new ClonedSyncPolicyCreator(basePolicy); writer.write(SENSOR_NAME, config, messages); writer.write(SENSOR_NAME, config, messages); writer.close(); File outputFolder = new File(folder.getAbsolutePath() + "/test-test.value/test.value/"); // The message should show up twice, once in each file ArrayList<String> expected = new ArrayList<>(); expected.add(message.toJSONString()); expected.add(message.toJSONString()); // Assert both messages are in the same file, because the stream stayed open assertEquals(1, outputFolder.listFiles().length); for (File file : outputFolder.listFiles()) { List<String> lines = Files.readAllLines(file.toPath()); // One line per file assertEquals(2, lines.size()); assertEquals(expected, lines); } }
Example #6
Source File: WARCHdfsBolt.java From storm-crawler with Apache License 2.0 | 5 votes |
public WARCHdfsBolt() { super(); FileSizeRotationPolicy rotpol = new FileSizeRotationPolicy(1.0f, Units.GB); withRotationPolicy(rotpol); // dummy sync policy withSyncPolicy(new CountSyncPolicy(10)); // default local filesystem withFsUrl("file:///"); }
Example #7
Source File: HdfsTopology.java From storm-kafka-examples with Apache License 2.0 | 4 votes |
public static void main(String[] args) { try{ String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181"; String topic = "order"; String groupId = "id"; int spoutNum = 3; int boltNum = 1; ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId); // create /order /id spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme()); KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig); // HDFS bolt // use "|" instead of "," for field delimiter RecordFormat format = new DelimitedRecordFormat() .withFieldDelimiter("|"); // sync the filesystem after every 1k tuples SyncPolicy syncPolicy = new CountSyncPolicy(1000); // rotate files when they reach 5MB FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB); // FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES); FileNameFormat fileNameFormat = new DefaultFileNameFormat() .withPath("/tmp/").withPrefix("order_").withExtension(".log"); HdfsBolt hdfsBolt = new HdfsBolt() .withFsUrl("hdfs://wxb-1:8020") .withFileNameFormat(fileNameFormat) .withRecordFormat(format) .withRotationPolicy(rotationPolicy) .withSyncPolicy(syncPolicy); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", kafkaSpout, spoutNum); builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout"); builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check"); builder.setBolt("hdfs", hdfsBolt,boltNum).shuffleGrouping("counter"); Config config = new Config(); config.setDebug(true); if(args!=null && args.length > 0) { config.setNumWorkers(2); StormSubmitter.submitTopology(args[0], config, builder.createTopology()); } else { config.setMaxTaskParallelism(2); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("Wordcount-Topology", config, builder.createTopology()); Thread.sleep(500000); cluster.shutdown(); } }catch (Exception e) { e.printStackTrace(); } }
Example #8
Source File: TopologyRunner.java From opensoc-streaming with Apache License 2.0 | 4 votes |
private boolean initializeHDFSBolt(String topology_name, String name) { try { String messageUpstreamComponent = messageComponents .get(messageComponents.size() - 1); System.out.println("[OpenSOC] ------" + name + " is initializing from " + messageUpstreamComponent); RecordFormat format = new DelimitedRecordFormat() .withFieldDelimiter( config.getString("bolt.hdfs.field.delimiter") .toString()).withFields( new Fields("message")); // sync the file system after every x number of tuples SyncPolicy syncPolicy = new CountSyncPolicy(Integer.valueOf(config .getString("bolt.hdfs.batch.size").toString())); // rotate files when they reach certain size FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy( Float.valueOf(config.getString( "bolt.hdfs.file.rotation.size.in.mb").toString()), Units.MB); FileNameFormat fileNameFormat = new DefaultFileNameFormat() .withPath(config.getString("bolt.hdfs.wip.file.path") .toString()); // Post rotate action MoveFileAction moveFileAction = (new MoveFileAction()) .toDestination(config.getString( "bolt.hdfs.finished.file.path").toString()); HdfsBolt hdfsBolt = new HdfsBolt() .withFsUrl( config.getString("bolt.hdfs.file.system.url") .toString()) .withFileNameFormat(fileNameFormat) .withRecordFormat(format) .withRotationPolicy(rotationPolicy) .withSyncPolicy(syncPolicy) .addRotationAction(moveFileAction); if (config.getString("bolt.hdfs.compression.codec.class") != null) { hdfsBolt.withCompressionCodec(config.getString( "bolt.hdfs.compression.codec.class").toString()); } builder.setBolt(name, hdfsBolt, config.getInt("bolt.hdfs.parallelism.hint")) .shuffleGrouping(messageUpstreamComponent, "message") .setNumTasks(config.getInt("bolt.hdfs.num.tasks")); } catch (Exception e) { e.printStackTrace(); System.exit(0); } return true; }
Example #9
Source File: SequenceFileTopology.java From storm-hdfs with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { Config config = new Config(); config.setNumWorkers(1); SentenceSpout spout = new SentenceSpout(); // sync the filesystem after every 1k tuples SyncPolicy syncPolicy = new CountSyncPolicy(1000); // rotate files when they reach 5MB FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB); FileNameFormat fileNameFormat = new DefaultFileNameFormat() .withPath("/source/") .withExtension(".seq"); // create sequence format instance. DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence"); SequenceFileBolt bolt = new SequenceFileBolt() .withFsUrl(args[0]) .withFileNameFormat(fileNameFormat) .withSequenceFormat(format) .withRotationPolicy(rotationPolicy) .withSyncPolicy(syncPolicy) .withCompressionType(SequenceFile.CompressionType.RECORD) .withCompressionCodec("deflate") .addRotationAction(new MoveFileAction().toDestination("/dest/")); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(SENTENCE_SPOUT_ID, spout, 1); // SentenceSpout --> MyBolt builder.setBolt(BOLT_ID, bolt, 4) .shuffleGrouping(SENTENCE_SPOUT_ID); if (args.length == 1) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology()); waitForSeconds(120); cluster.killTopology(TOPOLOGY_NAME); cluster.shutdown(); System.exit(0); } else if(args.length == 2) { StormSubmitter.submitTopology(args[1], config, builder.createTopology()); } }
Example #10
Source File: HdfsFileTopology.java From storm-hdfs with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { Config config = new Config(); config.setNumWorkers(1); SentenceSpout spout = new SentenceSpout(); // sync the filesystem after every 1k tuples SyncPolicy syncPolicy = new CountSyncPolicy(1000); // rotate files when they reach 5MB FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES); FileNameFormat fileNameFormat = new DefaultFileNameFormat() .withPath("/foo/") .withExtension(".txt"); // use "|" instead of "," for field delimiter RecordFormat format = new DelimitedRecordFormat() .withFieldDelimiter("|"); Yaml yaml = new Yaml(); InputStream in = new FileInputStream(args[1]); Map<String, Object> yamlConf = (Map<String, Object>) yaml.load(in); in.close(); config.put("hdfs.config", yamlConf); HdfsBolt bolt = new HdfsBolt() .withConfigKey("hdfs.config") .withFsUrl(args[0]) .withFileNameFormat(fileNameFormat) .withRecordFormat(format) .withRotationPolicy(rotationPolicy) .withSyncPolicy(syncPolicy) .addRotationAction(new MoveFileAction().toDestination("/dest2/")); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(SENTENCE_SPOUT_ID, spout, 1); // SentenceSpout --> MyBolt builder.setBolt(BOLT_ID, bolt, 4) .shuffleGrouping(SENTENCE_SPOUT_ID); if (args.length == 2) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology()); waitForSeconds(120); cluster.killTopology(TOPOLOGY_NAME); cluster.shutdown(); System.exit(0); } else if (args.length == 3) { StormSubmitter.submitTopology(args[0], config, builder.createTopology()); } else{ System.out.println("Usage: HdfsFileTopology [topology name] <yaml config file>"); } }