Java Code Examples for org.apache.hadoop.io.file.tfile.TFile.Writer#close()
The following examples show how to use
org.apache.hadoop.io.file.tfile.TFile.Writer#close() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestTFileSplit.java From hadoop-gpu with Apache License 2.0 | 6 votes |
void createFile(int count, String compress) throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile + "." + compress); fs = path.getFileSystem(conf); FSDataOutputStream out = fs.create(path); Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf); int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, count, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); } writer.close(); out.close(); }
Example 2
Source File: TestTFile.java From hadoop with Apache License 2.0 | 6 votes |
void unsortedWithSomeCodec(String codec) throws IOException { Path uTfile = new Path(ROOT, "unsorted.tfile"); FSDataOutputStream fout = createFSOutput(uTfile); Writer writer = new Writer(fout, minBlockSize, codec, null, conf); writeRecords(writer); writer.close(); fout.close(); FSDataInputStream fin = fs.open(uTfile); Reader reader = new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf); Scanner scanner = reader.createScanner(); readAllRecords(scanner); scanner.close(); reader.close(); fin.close(); fs.delete(uTfile, true); }
Example 3
Source File: TestTFileSplit.java From hadoop with Apache License 2.0 | 6 votes |
void createFile(int count, String compress) throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile + "." + compress); fs = path.getFileSystem(conf); FSDataOutputStream out = fs.create(path); Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf); int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, count, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); } writer.close(); out.close(); }
Example 4
Source File: TestTFile.java From hadoop-gpu with Apache License 2.0 | 6 votes |
void unsortedWithSomeCodec(String codec) throws IOException { Path uTfile = new Path(ROOT, "unsorted.tfile"); FSDataOutputStream fout = createFSOutput(uTfile); Writer writer = new Writer(fout, minBlockSize, codec, null, conf); writeRecords(writer); writer.close(); fout.close(); FSDataInputStream fin = fs.open(uTfile); Reader reader = new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf); Scanner scanner = reader.createScanner(); readAllRecords(scanner); scanner.close(); reader.close(); fin.close(); fs.delete(uTfile, true); }
Example 5
Source File: TestTFile.java From big-c with Apache License 2.0 | 6 votes |
void unsortedWithSomeCodec(String codec) throws IOException { Path uTfile = new Path(ROOT, "unsorted.tfile"); FSDataOutputStream fout = createFSOutput(uTfile); Writer writer = new Writer(fout, minBlockSize, codec, null, conf); writeRecords(writer); writer.close(); fout.close(); FSDataInputStream fin = fs.open(uTfile); Reader reader = new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf); Scanner scanner = reader.createScanner(); readAllRecords(scanner); scanner.close(); reader.close(); fin.close(); fs.delete(uTfile, true); }
Example 6
Source File: TestTFileSplit.java From RDFS with Apache License 2.0 | 6 votes |
void createFile(int count, String compress) throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile + "." + compress); fs = path.getFileSystem(conf); FSDataOutputStream out = fs.create(path); Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf); int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, count, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); } writer.close(); out.close(); }
Example 7
Source File: TestTFileSplit.java From big-c with Apache License 2.0 | 6 votes |
void createFile(int count, String compress) throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile + "." + compress); fs = path.getFileSystem(conf); FSDataOutputStream out = fs.create(path); Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf); int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, count, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); } writer.close(); out.close(); }
Example 8
Source File: TestTFile.java From RDFS with Apache License 2.0 | 6 votes |
void unsortedWithSomeCodec(String codec) throws IOException { Path uTfile = new Path(ROOT, "unsorted.tfile"); FSDataOutputStream fout = createFSOutput(uTfile); Writer writer = new Writer(fout, minBlockSize, codec, null, conf); writeRecords(writer); writer.close(); fout.close(); FSDataInputStream fin = fs.open(uTfile); Reader reader = new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf); Scanner scanner = reader.createScanner(); readAllRecords(scanner); scanner.close(); reader.close(); fin.close(); fs.delete(uTfile, true); }
Example 9
Source File: TestTFile.java From hadoop with Apache License 2.0 | 5 votes |
private void writeRecords(Writer writer) throws IOException { writeEmptyRecords(writer, 10); int ret = writeSomeRecords(writer, 0, 100); ret = writeLargeRecords(writer, ret, 1); ret = writePrepWithKnownLength(writer, ret, 40); ret = writePrepWithUnkownLength(writer, ret, 50); writer.close(); }
Example 10
Source File: TestTFile.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public void testMetaBlocks() throws IOException { Path mFile = new Path(ROOT, "meta.tfile"); FSDataOutputStream fout = createFSOutput(mFile); Writer writer = new Writer(fout, minBlockSize, "none", null, conf); someTestingWithMetaBlock(writer, "none"); writer.close(); fout.close(); FSDataInputStream fin = fs.open(mFile); Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf); someReadingWithMetaBlock(reader); fs.delete(mFile, true); reader.close(); fin.close(); }
Example 11
Source File: TestTFile.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void writeRecords(Writer writer) throws IOException { writeEmptyRecords(writer, 10); int ret = writeSomeRecords(writer, 0, 100); ret = writeLargeRecords(writer, ret, 1); ret = writePrepWithKnownLength(writer, ret, 40); ret = writePrepWithUnkownLength(writer, ret, 50); writer.close(); }
Example 12
Source File: TestTFile.java From RDFS with Apache License 2.0 | 5 votes |
public void testMetaBlocks() throws IOException { Path mFile = new Path(ROOT, "meta.tfile"); FSDataOutputStream fout = createFSOutput(mFile); Writer writer = new Writer(fout, minBlockSize, "none", null, conf); someTestingWithMetaBlock(writer, "none"); writer.close(); fout.close(); FSDataInputStream fin = fs.open(mFile); Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf); someReadingWithMetaBlock(reader); fs.delete(mFile, true); reader.close(); fin.close(); }
Example 13
Source File: TestTFile.java From RDFS with Apache License 2.0 | 5 votes |
private void writeRecords(Writer writer) throws IOException { writeEmptyRecords(writer, 10); int ret = writeSomeRecords(writer, 0, 100); ret = writeLargeRecords(writer, ret, 1); ret = writePrepWithKnownLength(writer, ret, 40); ret = writePrepWithUnkownLength(writer, ret, 50); writer.close(); }
Example 14
Source File: TestTFile.java From big-c with Apache License 2.0 | 5 votes |
public void testMetaBlocks() throws IOException { Path mFile = new Path(ROOT, "meta.tfile"); FSDataOutputStream fout = createFSOutput(mFile); Writer writer = new Writer(fout, minBlockSize, "none", null, conf); someTestingWithMetaBlock(writer, "none"); writer.close(); fout.close(); FSDataInputStream fin = fs.open(mFile); Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf); someReadingWithMetaBlock(reader); fs.delete(mFile, true); reader.close(); fin.close(); }
Example 15
Source File: TestTFile.java From big-c with Apache License 2.0 | 5 votes |
private void writeRecords(Writer writer) throws IOException { writeEmptyRecords(writer, 10); int ret = writeSomeRecords(writer, 0, 100); ret = writeLargeRecords(writer, ret, 1); ret = writePrepWithKnownLength(writer, ret, 40); ret = writePrepWithUnkownLength(writer, ret, 50); writer.close(); }
Example 16
Source File: TestTFile.java From hadoop with Apache License 2.0 | 5 votes |
public void testMetaBlocks() throws IOException { Path mFile = new Path(ROOT, "meta.tfile"); FSDataOutputStream fout = createFSOutput(mFile); Writer writer = new Writer(fout, minBlockSize, "none", null, conf); someTestingWithMetaBlock(writer, "none"); writer.close(); fout.close(); FSDataInputStream fin = fs.open(mFile); Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf); someReadingWithMetaBlock(reader); fs.delete(mFile, true); reader.close(); fin.close(); }
Example 17
Source File: TestTFileSeek.java From RDFS with Apache License 2.0 | 4 votes |
private void createTFile() throws IOException { long totalBytes = 0; FSDataOutputStream fout = createFSOutput(path, fs); try { Writer writer = new Writer(fout, options.minBlockSize, options.compress, "memcmp", conf); try { BytesWritable key = new BytesWritable(); BytesWritable val = new BytesWritable(); timer.start(); for (long i = 0; true; ++i) { if (i % 1000 == 0) { // test the size for every 1000 rows. if (fs.getFileStatus(path).getLen() >= options.fileSize) { break; } } kvGen.next(key, val, false); writer.append(key.get(), 0, key.getSize(), val.get(), 0, val .getSize()); totalBytes += key.getSize(); totalBytes += val.getSize(); } timer.stop(); } finally { writer.close(); } } finally { fout.close(); } double duration = (double)timer.read()/1000; // in us. long fsize = fs.getFileStatus(path).getLen(); System.out.printf( "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration); System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration); }
Example 18
Source File: TestTFileSeek.java From hadoop-gpu with Apache License 2.0 | 4 votes |
private void createTFile() throws IOException { long totalBytes = 0; FSDataOutputStream fout = createFSOutput(path, fs); try { Writer writer = new Writer(fout, options.minBlockSize, options.compress, "memcmp", conf); try { BytesWritable key = new BytesWritable(); BytesWritable val = new BytesWritable(); timer.start(); for (long i = 0; true; ++i) { if (i % 1000 == 0) { // test the size for every 1000 rows. if (fs.getFileStatus(path).getLen() >= options.fileSize) { break; } } kvGen.next(key, val, false); writer.append(key.get(), 0, key.getSize(), val.get(), 0, val .getSize()); totalBytes += key.getSize(); totalBytes += val.getSize(); } timer.stop(); } finally { writer.close(); } } finally { fout.close(); } double duration = (double)timer.read()/1000; // in us. long fsize = fs.getFileStatus(path).getLen(); System.out.printf( "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration); System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration); }
Example 19
Source File: TestTFileSeek.java From big-c with Apache License 2.0 | 4 votes |
private void createTFile() throws IOException { long totalBytes = 0; FSDataOutputStream fout = createFSOutput(path, fs); try { Writer writer = new Writer(fout, options.minBlockSize, options.compress, "memcmp", conf); try { BytesWritable key = new BytesWritable(); BytesWritable val = new BytesWritable(); timer.start(); for (long i = 0; true; ++i) { if (i % 1000 == 0) { // test the size for every 1000 rows. if (fs.getFileStatus(path).getLen() >= options.fileSize) { break; } } kvGen.next(key, val, false); writer.append(key.get(), 0, key.getSize(), val.get(), 0, val .getSize()); totalBytes += key.getSize(); totalBytes += val.getSize(); } timer.stop(); } finally { writer.close(); } } finally { fout.close(); } double duration = (double)timer.read()/1000; // in us. long fsize = fs.getFileStatus(path).getLen(); System.out.printf( "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration); System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration); }
Example 20
Source File: TestTFileSeek.java From hadoop with Apache License 2.0 | 4 votes |
private void createTFile() throws IOException { long totalBytes = 0; FSDataOutputStream fout = createFSOutput(path, fs); try { Writer writer = new Writer(fout, options.minBlockSize, options.compress, "memcmp", conf); try { BytesWritable key = new BytesWritable(); BytesWritable val = new BytesWritable(); timer.start(); for (long i = 0; true; ++i) { if (i % 1000 == 0) { // test the size for every 1000 rows. if (fs.getFileStatus(path).getLen() >= options.fileSize) { break; } } kvGen.next(key, val, false); writer.append(key.get(), 0, key.getSize(), val.get(), 0, val .getSize()); totalBytes += key.getSize(); totalBytes += val.getSize(); } timer.stop(); } finally { writer.close(); } } finally { fout.close(); } double duration = (double)timer.read()/1000; // in us. long fsize = fs.getFileStatus(path).getLen(); System.out.printf( "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration); System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration); }