Java Code Examples for org.apache.hadoop.io.file.tfile.TFile.Writer#append()
The following examples show how to use
org.apache.hadoop.io.file.tfile.TFile.Writer#append() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestTFileSplit.java From RDFS with Apache License 2.0 | 6 votes |
void createFile(int count, String compress) throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile + "." + compress); fs = path.getFileSystem(conf); FSDataOutputStream out = fs.create(path); Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf); int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, count, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); } writer.close(); out.close(); }
Example 2
Source File: TestTFileSplit.java From big-c with Apache License 2.0 | 6 votes |
void createFile(int count, String compress) throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile + "." + compress); fs = path.getFileSystem(conf); FSDataOutputStream out = fs.create(path); Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf); int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, count, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); } writer.close(); out.close(); }
Example 3
Source File: TestTFile.java From big-c with Apache License 2.0 | 5 votes |
private void writeEmptyRecords(Writer writer, int n) throws IOException { byte[] key = new byte[0]; byte[] value = new byte[0]; for (int i = 0; i < n; i++) { writer.append(key, value); } }
Example 4
Source File: TestTFileByteArrays.java From RDFS with Apache License 2.0 | 5 votes |
static long writeRecords(Writer writer, int count) throws IOException { long rawDataSize = 0; int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); rawDataSize += WritableUtils.getVIntSize(key.length) + key.length + WritableUtils.getVIntSize(value.length) + value.length; } return rawDataSize; }
Example 5
Source File: TestTFile.java From RDFS with Apache License 2.0 | 5 votes |
private int writeLargeRecords(Writer writer, int start, int n) throws IOException { byte[] value = new byte[largeVal]; for (int i = start; i < (start + n); i++) { String key = String.format(localFormatter, i); writer.append(key.getBytes(), value); writer.append(key.getBytes(), value); } return (start + n); }
Example 6
Source File: TestTFileUnsortedByteArrays.java From RDFS with Apache License 2.0 | 5 votes |
@Override public void setUp() throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile); fs = path.getFileSystem(conf); out = fs.create(path); writer = new Writer(out, BLOCK_SIZE, compression, null, conf); writer.append("keyZ".getBytes(), "valueZ".getBytes()); writer.append("keyM".getBytes(), "valueM".getBytes()); writer.append("keyN".getBytes(), "valueN".getBytes()); writer.append("keyA".getBytes(), "valueA".getBytes()); closeOutput(); }
Example 7
Source File: TestTFileUnsortedByteArrays.java From big-c with Apache License 2.0 | 5 votes |
@Override public void setUp() throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile); fs = path.getFileSystem(conf); out = fs.create(path); writer = new Writer(out, BLOCK_SIZE, compression, null, conf); writer.append("keyZ".getBytes(), "valueZ".getBytes()); writer.append("keyM".getBytes(), "valueM".getBytes()); writer.append("keyN".getBytes(), "valueN".getBytes()); writer.append("keyA".getBytes(), "valueA".getBytes()); closeOutput(); }
Example 8
Source File: TestTFileByteArrays.java From big-c with Apache License 2.0 | 5 votes |
static long writeRecords(Writer writer, int count) throws IOException { long rawDataSize = 0; int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); rawDataSize += WritableUtils.getVIntSize(key.length) + key.length + WritableUtils.getVIntSize(value.length) + value.length; } return rawDataSize; }
Example 9
Source File: TestTFile.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private int writeSomeRecords(Writer writer, int start, int n) throws IOException { String value = "value"; for (int i = start; i < (start + n); i++) { String key = String.format(localFormatter, i); writer.append(key.getBytes(), (value + key).getBytes()); writer.append(key.getBytes(), (value + key).getBytes()); } return (start + n); }
Example 10
Source File: TestTFile.java From big-c with Apache License 2.0 | 5 votes |
private int writeSomeRecords(Writer writer, int start, int n) throws IOException { String value = "value"; for (int i = start; i < (start + n); i++) { String key = String.format(localFormatter, i); writer.append(key.getBytes(), (value + key).getBytes()); writer.append(key.getBytes(), (value + key).getBytes()); } return (start + n); }
Example 11
Source File: TestTFileUnsortedByteArrays.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void setUp() throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile); fs = path.getFileSystem(conf); out = fs.create(path); writer = new Writer(out, BLOCK_SIZE, compression, null, conf); writer.append("keyZ".getBytes(), "valueZ".getBytes()); writer.append("keyM".getBytes(), "valueM".getBytes()); writer.append("keyN".getBytes(), "valueN".getBytes()); writer.append("keyA".getBytes(), "valueA".getBytes()); closeOutput(); }
Example 12
Source File: TestTFileByteArrays.java From hadoop-gpu with Apache License 2.0 | 5 votes |
static long writeRecords(Writer writer, int count) throws IOException { long rawDataSize = 0; int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, count, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); rawDataSize += WritableUtils.getVIntSize(key.length) + key.length + WritableUtils.getVIntSize(value.length) + value.length; } return rawDataSize; }
Example 13
Source File: TestTFileUnsortedByteArrays.java From hadoop-gpu with Apache License 2.0 | 5 votes |
@Override public void setUp() throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile); fs = path.getFileSystem(conf); out = fs.create(path); writer = new Writer(out, BLOCK_SIZE, compression, null, conf); writer.append("keyZ".getBytes(), "valueZ".getBytes()); writer.append("keyM".getBytes(), "valueM".getBytes()); writer.append("keyN".getBytes(), "valueN".getBytes()); writer.append("keyA".getBytes(), "valueA".getBytes()); closeOutput(); }
Example 14
Source File: TestTFile.java From hadoop with Apache License 2.0 | 5 votes |
private void writeEmptyRecords(Writer writer, int n) throws IOException { byte[] key = new byte[0]; byte[] value = new byte[0]; for (int i = 0; i < n; i++) { writer.append(key, value); } }
Example 15
Source File: TestTFile.java From hadoop with Apache License 2.0 | 5 votes |
private int writeLargeRecords(Writer writer, int start, int n) throws IOException { byte[] value = new byte[largeVal]; for (int i = start; i < (start + n); i++) { String key = String.format(localFormatter, i); writer.append(key.getBytes(), value); writer.append(key.getBytes(), value); } return (start + n); }
Example 16
Source File: TestTFile.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void writeEmptyRecords(Writer writer, int n) throws IOException { byte[] key = new byte[0]; byte[] value = new byte[0]; for (int i = 0; i < n; i++) { writer.append(key, value); } }
Example 17
Source File: TestTFileSeek.java From hadoop with Apache License 2.0 | 4 votes |
private void createTFile() throws IOException { long totalBytes = 0; FSDataOutputStream fout = createFSOutput(path, fs); try { Writer writer = new Writer(fout, options.minBlockSize, options.compress, "memcmp", conf); try { BytesWritable key = new BytesWritable(); BytesWritable val = new BytesWritable(); timer.start(); for (long i = 0; true; ++i) { if (i % 1000 == 0) { // test the size for every 1000 rows. if (fs.getFileStatus(path).getLen() >= options.fileSize) { break; } } kvGen.next(key, val, false); writer.append(key.get(), 0, key.getSize(), val.get(), 0, val .getSize()); totalBytes += key.getSize(); totalBytes += val.getSize(); } timer.stop(); } finally { writer.close(); } } finally { fout.close(); } double duration = (double)timer.read()/1000; // in us. long fsize = fs.getFileStatus(path).getLen(); System.out.printf( "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration); System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration); }
Example 18
Source File: TestTFileSeek.java From big-c with Apache License 2.0 | 4 votes |
private void createTFile() throws IOException { long totalBytes = 0; FSDataOutputStream fout = createFSOutput(path, fs); try { Writer writer = new Writer(fout, options.minBlockSize, options.compress, "memcmp", conf); try { BytesWritable key = new BytesWritable(); BytesWritable val = new BytesWritable(); timer.start(); for (long i = 0; true; ++i) { if (i % 1000 == 0) { // test the size for every 1000 rows. if (fs.getFileStatus(path).getLen() >= options.fileSize) { break; } } kvGen.next(key, val, false); writer.append(key.get(), 0, key.getSize(), val.get(), 0, val .getSize()); totalBytes += key.getSize(); totalBytes += val.getSize(); } timer.stop(); } finally { writer.close(); } } finally { fout.close(); } double duration = (double)timer.read()/1000; // in us. long fsize = fs.getFileStatus(path).getLen(); System.out.printf( "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration); System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration); }
Example 19
Source File: TestTFileSeek.java From RDFS with Apache License 2.0 | 4 votes |
private void createTFile() throws IOException { long totalBytes = 0; FSDataOutputStream fout = createFSOutput(path, fs); try { Writer writer = new Writer(fout, options.minBlockSize, options.compress, "memcmp", conf); try { BytesWritable key = new BytesWritable(); BytesWritable val = new BytesWritable(); timer.start(); for (long i = 0; true; ++i) { if (i % 1000 == 0) { // test the size for every 1000 rows. if (fs.getFileStatus(path).getLen() >= options.fileSize) { break; } } kvGen.next(key, val, false); writer.append(key.get(), 0, key.getSize(), val.get(), 0, val .getSize()); totalBytes += key.getSize(); totalBytes += val.getSize(); } timer.stop(); } finally { writer.close(); } } finally { fout.close(); } double duration = (double)timer.read()/1000; // in us. long fsize = fs.getFileStatus(path).getLen(); System.out.printf( "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration); System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration); }
Example 20
Source File: TestTFileSeek.java From hadoop-gpu with Apache License 2.0 | 4 votes |
private void createTFile() throws IOException { long totalBytes = 0; FSDataOutputStream fout = createFSOutput(path, fs); try { Writer writer = new Writer(fout, options.minBlockSize, options.compress, "memcmp", conf); try { BytesWritable key = new BytesWritable(); BytesWritable val = new BytesWritable(); timer.start(); for (long i = 0; true; ++i) { if (i % 1000 == 0) { // test the size for every 1000 rows. if (fs.getFileStatus(path).getLen() >= options.fileSize) { break; } } kvGen.next(key, val, false); writer.append(key.get(), 0, key.getSize(), val.get(), 0, val .getSize()); totalBytes += key.getSize(); totalBytes += val.getSize(); } timer.stop(); } finally { writer.close(); } } finally { fout.close(); } double duration = (double)timer.read()/1000; // in us. long fsize = fs.getFileStatus(path).getLen(); System.out.printf( "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration); System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration); }