Java Code Examples for org.apache.hadoop.io.DataOutputBuffer#getLength()
The following examples show how to use
org.apache.hadoop.io.DataOutputBuffer#getLength() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CryptoStreamsTestBase.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setUp() throws IOException { // Generate data final int seed = new Random().nextInt(); final DataOutputBuffer dataBuf = new DataOutputBuffer(); final RandomDatum.Generator generator = new RandomDatum.Generator(seed); for(int i = 0; i < count; ++i) { generator.next(); final RandomDatum key = generator.getKey(); final RandomDatum value = generator.getValue(); key.write(dataBuf); value.write(dataBuf); } LOG.info("Generated " + count + " records"); data = dataBuf.getData(); dataLen = dataBuf.getLength(); }
Example 2
Source File: StreamXmlRecordReader.java From RDFS with Apache License 2.0 | 6 votes |
public synchronized boolean next(WritableComparable key, Writable value) throws IOException { numNext++; if (pos_ >= end_) { return false; } DataOutputBuffer buf = new DataOutputBuffer(); if (!readUntilMatchBegin()) { return false; } if (!readUntilMatchEnd(buf)) { return false; } // There is only one elem..key/value splitting is not done here. byte[] record = new byte[buf.getLength()]; System.arraycopy(buf.getData(), 0, record, 0, record.length); numRecStats(record, 0, record.length); ((Text) key).set(record); ((Text) value).set(""); return true; }
Example 3
Source File: TestIndexedSort.java From RDFS with Apache License 2.0 | 6 votes |
public WritableSortable(int j) throws IOException { seed = r.nextLong(); r.setSeed(seed); Text t = new Text(); StringBuffer sb = new StringBuffer(); indices = new int[j]; offsets = new int[j]; check = new String[j]; DataOutputBuffer dob = new DataOutputBuffer(); for (int i = 0; i < j; ++i) { indices[i] = i; offsets[i] = dob.getLength(); genRandom(t, r.nextInt(15) + 1, sb); t.write(dob); check[i] = t.toString(); } eob = dob.getLength(); bytes = dob.getData(); comparator = WritableComparator.get(Text.class); }
Example 4
Source File: StreamXmlRecordReader.java From RDFS with Apache License 2.0 | 6 votes |
public synchronized boolean next(Text key, Text value) throws IOException { numNext++; if (pos_ >= end_) { return false; } DataOutputBuffer buf = new DataOutputBuffer(); if (!readUntilMatchBegin()) { return false; } if (!readUntilMatchEnd(buf)) { return false; } // There is only one elem..key/value splitting is not done here. byte[] record = new byte[buf.getLength()]; System.arraycopy(buf.getData(), 0, record, 0, record.length); numRecStats(record, 0, record.length); key.set(record); value.set(""); return true; }
Example 5
Source File: QueryWritableTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
@Test public void testTermQuery() throws IOException { TermQuery query = new TermQuery(new Term("field", "value")); QueryWritable queryWritable = new QueryWritable(); queryWritable.setQuery(query); DataOutputBuffer out = new DataOutputBuffer(); queryWritable.write(out); byte[] data = out.getData(); int length = out.getLength(); DataInputBuffer in = new DataInputBuffer(); in.reset(data, length); QueryWritable newQueryWritable = new QueryWritable(); newQueryWritable.readFields(in); Query termQuery = newQueryWritable.getQuery(); assertEquals(query, termQuery); }
Example 6
Source File: StreamXmlRecordReader.java From big-c with Apache License 2.0 | 6 votes |
public synchronized boolean next(Text key, Text value) throws IOException { numNext++; if (pos_ >= end_) { return false; } DataOutputBuffer buf = new DataOutputBuffer(); if (!readUntilMatchBegin()) { return false; } if (pos_ >= end_ || !readUntilMatchEnd(buf)) { return false; } // There is only one elem..key/value splitting is not done here. byte[] record = new byte[buf.getLength()]; System.arraycopy(buf.getData(), 0, record, 0, record.length); numRecStats(record, 0, record.length); key.set(record); value.set(""); return true; }
Example 7
Source File: StreamXmlRecordReader.java From hadoop with Apache License 2.0 | 6 votes |
public synchronized boolean next(Text key, Text value) throws IOException { numNext++; if (pos_ >= end_) { return false; } DataOutputBuffer buf = new DataOutputBuffer(); if (!readUntilMatchBegin()) { return false; } if (pos_ >= end_ || !readUntilMatchEnd(buf)) { return false; } // There is only one elem..key/value splitting is not done here. byte[] record = new byte[buf.getLength()]; System.arraycopy(buf.getData(), 0, record, 0, record.length); numRecStats(record, 0, record.length); key.set(record); value.set(""); return true; }
Example 8
Source File: ShuffleHandler.java From big-c with Apache License 2.0 | 5 votes |
protected void populateHeaders(List<String> mapIds, String outputBaseStr, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map<String, MapOutputInfo> mapOutputInfoMap) throws IOException { long contentLength = 0; for (String mapId : mapIds) { String base = outputBaseStr + mapId; MapOutputInfo outputInfo = getMapOutputInfo(base, mapId, reduce, user); if (mapOutputInfoMap.size() < mapOutputMetaInfoCacheSize) { mapOutputInfoMap.put(mapId, outputInfo); } // Index file Path indexFileName = lDirAlloc.getLocalPathToRead(base + "/file.out.index", conf); IndexRecord info = indexCache.getIndexInformation(mapId, reduce, indexFileName, user); ShuffleHeader header = new ShuffleHeader(mapId, info.partLength, info.rawLength, reduce); DataOutputBuffer dob = new DataOutputBuffer(); header.write(dob); contentLength += info.partLength; contentLength += dob.getLength(); } // Now set the response headers. setResponseHeaders(response, keepAliveParam, contentLength); }
Example 9
Source File: TezMerger.java From tez with Apache License 2.0 | 5 votes |
int compare(KeyValueBuffer nextKey, DataOutputBuffer buf2) { byte[] b1 = nextKey.getData(); byte[] b2 = buf2.getData(); int s1 = nextKey.getPosition(); int s2 = 0; int l1 = nextKey.getLength(); int l2 = buf2.getLength(); return comparator.compare(b1, s1, l1, b2, s2, l2); }
Example 10
Source File: TestGridmixRecord.java From big-c with Apache License 2.0 | 5 votes |
static void binSortTest(GridmixRecord x, GridmixRecord y, int min, int max, WritableComparator cmp) throws Exception { final Random r = new Random(); final long s = r.nextLong(); r.setSeed(s); LOG.info("sort: " + s); final DataOutputBuffer out1 = new DataOutputBuffer(); final DataOutputBuffer out2 = new DataOutputBuffer(); for (int i = min; i < max; ++i) { final long seed1 = r.nextLong(); setSerialize(x, seed1, i, out1); assertEquals(0, x.compareSeed(seed1, Math.max(0, i - x.fixedBytes()))); final long seed2 = r.nextLong(); setSerialize(y, seed2, i, out2); assertEquals(0, y.compareSeed(seed2, Math.max(0, i - x.fixedBytes()))); // for eq sized records, ensure byte cmp where req final int chk = WritableComparator.compareBytes( out1.getData(), 0, out1.getLength(), out2.getData(), 0, out2.getLength()); assertEquals(Integer.signum(chk), Integer.signum(x.compareTo(y))); assertEquals(Integer.signum(chk), Integer.signum(cmp.compare( out1.getData(), 0, out1.getLength(), out2.getData(), 0, out2.getLength()))); // write second copy, compare eq final int s1 = out1.getLength(); x.write(out1); assertEquals(0, cmp.compare(out1.getData(), 0, s1, out1.getData(), s1, out1.getLength() - s1)); final int s2 = out2.getLength(); y.write(out2); assertEquals(0, cmp.compare(out2.getData(), 0, s2, out2.getData(), s2, out2.getLength() - s2)); assertEquals(Integer.signum(chk), Integer.signum(cmp.compare(out1.getData(), 0, s1, out2.getData(), s2, out2.getLength() - s2))); } }
Example 11
Source File: BufferUtils.java From tez with Apache License 2.0 | 5 votes |
public static void copy(DataOutputBuffer src, DataOutputBuffer dst) throws IOException { byte[] b1 = src.getData(); int s1 = 0; int l1 = src.getLength(); dst.reset(); dst.write(b1, s1, l1); }
Example 12
Source File: ShuffleHandler.java From hadoop with Apache License 2.0 | 5 votes |
protected void populateHeaders(List<String> mapIds, String outputBaseStr, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map<String, MapOutputInfo> mapOutputInfoMap) throws IOException { long contentLength = 0; for (String mapId : mapIds) { String base = outputBaseStr + mapId; MapOutputInfo outputInfo = getMapOutputInfo(base, mapId, reduce, user); if (mapOutputInfoMap.size() < mapOutputMetaInfoCacheSize) { mapOutputInfoMap.put(mapId, outputInfo); } // Index file Path indexFileName = lDirAlloc.getLocalPathToRead(base + "/file.out.index", conf); IndexRecord info = indexCache.getIndexInformation(mapId, reduce, indexFileName, user); ShuffleHeader header = new ShuffleHeader(mapId, info.partLength, info.rawLength, reduce); DataOutputBuffer dob = new DataOutputBuffer(); header.write(dob); contentLength += info.partLength; contentLength += dob.getLength(); } // Now set the response headers. setResponseHeaders(response, keepAliveParam, contentLength); }
Example 13
Source File: ShuffleHandler.java From tez with Apache License 2.0 | 5 votes |
protected void populateHeaders(List<String> mapIds, String outputBaseStr, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map<String, MapOutputInfo> mapOutputInfoMap) throws IOException { long contentLength = 0; for (String mapId : mapIds) { String base = outputBaseStr + mapId; MapOutputInfo outputInfo = getMapOutputInfo(base, mapId, reduce, user); if (mapOutputInfoMap.size() < mapOutputMetaInfoCacheSize) { mapOutputInfoMap.put(mapId, outputInfo); } // Index file Path indexFileName = lDirAlloc.getLocalPathToRead(base + "/file.out.index", conf); TezIndexRecord info = indexCache.getIndexInformation(mapId, reduce, indexFileName, user); ShuffleHeader header = new ShuffleHeader(mapId, info.getPartLength(), info.getRawLength(), reduce); DataOutputBuffer dob = new DataOutputBuffer(); header.write(dob); contentLength += info.getPartLength(); contentLength += dob.getLength(); } // Now set the response headers. setResponseHeaders(response, keepAliveParam, contentLength); }
Example 14
Source File: Client.java From RDFS with Apache License 2.0 | 5 votes |
private void writeHeader() throws IOException { // Write out the header and version out.write(Server.HEADER.array()); out.write(Server.CURRENT_VERSION); // Write out the ConnectionHeader DataOutputBuffer buf = new DataOutputBuffer(); header.write(buf); // Write out the payload length int bufLen = buf.getLength(); out.writeInt(bufLen); out.write(buf.getData(), 0, bufLen); }
Example 15
Source File: Client.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void writeHeader() throws IOException { // Write out the header and version out.write(Server.HEADER.array()); out.write(Server.CURRENT_VERSION); // Write out the ConnectionHeader DataOutputBuffer buf = new DataOutputBuffer(); header.write(buf); // Write out the payload length int bufLen = buf.getLength(); out.writeInt(bufLen); out.write(buf.getData(), 0, bufLen); }
Example 16
Source File: TestGridmixRecord.java From hadoop with Apache License 2.0 | 5 votes |
static void binSortTest(GridmixRecord x, GridmixRecord y, int min, int max, WritableComparator cmp) throws Exception { final Random r = new Random(); final long s = r.nextLong(); r.setSeed(s); LOG.info("sort: " + s); final DataOutputBuffer out1 = new DataOutputBuffer(); final DataOutputBuffer out2 = new DataOutputBuffer(); for (int i = min; i < max; ++i) { final long seed1 = r.nextLong(); setSerialize(x, seed1, i, out1); assertEquals(0, x.compareSeed(seed1, Math.max(0, i - x.fixedBytes()))); final long seed2 = r.nextLong(); setSerialize(y, seed2, i, out2); assertEquals(0, y.compareSeed(seed2, Math.max(0, i - x.fixedBytes()))); // for eq sized records, ensure byte cmp where req final int chk = WritableComparator.compareBytes( out1.getData(), 0, out1.getLength(), out2.getData(), 0, out2.getLength()); assertEquals(Integer.signum(chk), Integer.signum(x.compareTo(y))); assertEquals(Integer.signum(chk), Integer.signum(cmp.compare( out1.getData(), 0, out1.getLength(), out2.getData(), 0, out2.getLength()))); // write second copy, compare eq final int s1 = out1.getLength(); x.write(out1); assertEquals(0, cmp.compare(out1.getData(), 0, s1, out1.getData(), s1, out1.getLength() - s1)); final int s2 = out2.getLength(); y.write(out2); assertEquals(0, cmp.compare(out2.getData(), 0, s2, out2.getData(), s2, out2.getLength() - s2)); assertEquals(Integer.signum(chk), Integer.signum(cmp.compare(out1.getData(), 0, s1, out2.getData(), s2, out2.getLength() - s2))); } }
Example 17
Source File: BufferUtils.java From tez with Apache License 2.0 | 5 votes |
public static int compare(DataOutputBuffer buf1, DataOutputBuffer buf2) { byte[] b1 = buf1.getData(); byte[] b2 = buf2.getData(); int s1 = 0; int s2 = 0; int l1 = buf1.getLength(); int l2 = buf2.getLength(); return FastByteComparisons.compareTo(b1, s1, l1, b2, s2, l2); }
Example 18
Source File: FlinkXMLParser.java From incubator-retired-mrql with Apache License 2.0 | 4 votes |
public String slice () { if (splitter.hasNext()) { DataOutputBuffer b = splitter.next(); return new String(b.getData(),0,b.getLength()); } else return null; }
Example 19
Source File: FlinkJsonParser.java From incubator-retired-mrql with Apache License 2.0 | 4 votes |
public String slice () { if (splitter.hasNext()) { DataOutputBuffer b = splitter.next(); return new String(b.getData(),0,b.getLength()); } else return null; }
Example 20
Source File: JsonFormatParser.java From incubator-retired-mrql with Apache License 2.0 | 4 votes |
public String slice () { if (splitter.hasNext()) { DataOutputBuffer b = splitter.next(); return new String(b.getData(),0,b.getLength()); } else return null; }