Java Code Examples for org.apache.crunch.Pipeline#readTextFile()
The following examples show how to use
org.apache.crunch.Pipeline#readTextFile() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MemPipelineUnitTest.java From tutorials with MIT License | 6 votes |
@Test @Ignore("Requires Hadoop binaries") public void givenCollection_whenWriteCalled_fileWrittenSuccessfully() throws IOException { PCollection<String> inputStrings = MemPipeline.collectionOf("Hello", "Apache", "Crunch", Calendar.getInstance() .toString()); final String outputFilePath = createOutputPath(); Target target = To.textFile(outputFilePath); inputStrings.write(target); Pipeline pipeline = MemPipeline.getInstance(); PCollection<String> lines = pipeline.readTextFile(outputFilePath); assertIterableEquals(inputStrings.materialize(), lines.materialize()); }
Example 2
Source File: JoinFilterExampleCrunch.java From hadoop-arch-book with Apache License 2.0 | 5 votes |
public int run(String[] args) throws Exception { String fooInputPath = args[0]; String barInputPath = args[1]; String outputPath = args[2]; int fooValMax = Integer.parseInt(args[3]); int joinValMax = Integer.parseInt(args[4]); int numberOfReducers = Integer.parseInt(args[5]); Pipeline pipeline = new MRPipeline(JoinFilterExampleCrunch.class, getConf()); //<1> PCollection<String> fooLines = pipeline.readTextFile(fooInputPath); //<2> PCollection<String> barLines = pipeline.readTextFile(barInputPath); PTable<Long, Pair<Long, Integer>> fooTable = fooLines.parallelDo( //<3> new FooIndicatorFn(), Avros.tableOf(Avros.longs(), Avros.pairs(Avros.longs(), Avros.ints()))); fooTable = fooTable.filter(new FooFilter(fooValMax)); //<4> PTable<Long, Integer> barTable = barLines.parallelDo(new BarIndicatorFn(), Avros.tableOf(Avros.longs(), Avros.ints())); DefaultJoinStrategy<Long, Pair<Long, Integer>, Integer> joinStrategy = //<5> new DefaultJoinStrategy <Long, Pair<Long, Integer>, Integer> (numberOfReducers); PTable<Long, Pair<Pair<Long, Integer>, Integer>> joinedTable = joinStrategy //<6> .join(fooTable, barTable, JoinType.INNER_JOIN); PTable<Long, Pair<Pair<Long, Integer>, Integer>> filteredTable = joinedTable.filter(new JoinFilter(joinValMax)); filteredTable.write(At.textFile(outputPath), WriteMode.OVERWRITE); //<7> PipelineResult result = pipeline.done(); return result.succeeded() ? 0 : 1; }
Example 3
Source File: WordCount.java From tutorials with MIT License | 5 votes |
public int run(String[] args) throws Exception { if (args.length != 2) { System.err.println("Usage: hadoop jar crunch-1.0.0-SNAPSHOT-job.jar" + " [generic options] input output"); System.err.println(); GenericOptionsParser.printGenericCommandUsage(System.err); return 1; } String inputPath = args[0]; String outputPath = args[1]; // Create an object to coordinate pipeline creation and execution. Pipeline pipeline = new MRPipeline(WordCount.class, getConf()); // Reference a given text file as a collection of Strings. PCollection<String> lines = pipeline.readTextFile(inputPath); // Define a function that splits each line in a PCollection of Strings into // a PCollection made up of the individual words in the file. // The second argument sets the serialization format. PCollection<String> words = lines.parallelDo(new Tokenizer(), Writables.strings()); // Take the collection of words and remove known stop words. PCollection<String> noStopWords = words.filter(new StopWordFilter()); // The count method applies a series of Crunch primitives and returns // a map of the unique words in the input PCollection to their counts. PTable<String, Long> counts = noStopWords.count(); // Instruct the pipeline to write the resulting counts to a text file. pipeline.writeTextFile(counts, outputPath); // Execute the pipeline as a MapReduce. PipelineResult result = pipeline.done(); return result.succeeded() ? 0 : 1; }
Example 4
Source File: MemPipelineUnitTest.java From tutorials with MIT License | 5 votes |
@Test public void givenPipeLine_whenTextFileRead_thenExpectedNumberOfRecordsRead() { Pipeline pipeline = MemPipeline.getInstance(); PCollection<String> lines = pipeline.readTextFile(INPUT_FILE_PATH); assertEquals(21, lines.asCollection() .getValue() .size()); }
Example 5
Source File: MemPipelineUnitTest.java From tutorials with MIT License | 5 votes |
@Test @Ignore("Requires Hadoop binaries") public void givenPipeLine_whenWriteTextFileCalled_fileWrittenSuccessfully() throws IOException { Pipeline pipeline = MemPipeline.getInstance(); PCollection<String> inputStrings = MemPipeline.collectionOf("Hello", "Apache", "Crunch", Calendar.getInstance() .toString()); final String outputFilePath = createOutputPath(); pipeline.writeTextFile(inputStrings, outputFilePath); PCollection<String> lines = pipeline.readTextFile(outputFilePath); assertIterableEquals(inputStrings.materialize(), lines.materialize()); }