Java Code Examples for org.elasticsearch.common.io.stream.StreamInput#readBytesRef()
The following examples show how to use
org.elasticsearch.common.io.stream.StreamInput#readBytesRef() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DfsSearchResult.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); id = in.readLong(); int termsSize = in.readVInt(); if (termsSize == 0) { terms = EMPTY_TERMS; } else { terms = new Term[termsSize]; for (int i = 0; i < terms.length; i++) { terms[i] = new Term(in.readString(), in.readBytesRef()); } } this.termStatistics = readTermStats(in, terms); readFieldStats(in, fieldStatistics); maxDoc = in.readVInt(); }
Example 2
Source File: StringType.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public BytesRef readValueFrom(StreamInput in) throws IOException { int length = in.readVInt() -1 ; if (length == -1) { return null; } return in.readBytesRef(length); }
Example 3
Source File: SignificantStringTerms.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void readFrom(StreamInput in) throws IOException { termBytes = in.readBytesRef(); subsetDf = in.readVLong(); supersetDf = in.readVLong(); score = in.readDouble(); aggregations = InternalAggregations.readAggregations(in); }
Example 4
Source File: StringTerms.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void readFrom(StreamInput in) throws IOException { termBytes = in.readBytesRef(); docCount = in.readVLong(); docCountError = -1; if (showDocCountError) { docCountError = in.readLong(); } aggregations = InternalAggregations.readAggregations(in); }
Example 5
Source File: AggregatedDfs.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void readFrom(StreamInput in) throws IOException { int size = in.readVInt(); termStatistics = HppcMaps.newMap(size); for (int i = 0; i < size; i++) { Term term = new Term(in.readString(), in.readBytesRef()); TermStatistics stats = new TermStatistics(in.readBytesRef(), in.readVLong(), DfsSearchResult.subOne(in.readVLong())); termStatistics.put(term, stats); } fieldStatistics = DfsSearchResult.readFieldStats(in); maxDoc = in.readVLong(); }
Example 6
Source File: StoreFileMetaData.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void readFrom(StreamInput in) throws IOException { name = in.readString(); length = in.readVLong(); checksum = in.readOptionalString(); String versionString = in.readOptionalString(); writtenBy = Lucene.parseVersionLenient(versionString, null); hash = in.readBytesRef(); }
Example 7
Source File: Lucene.java From Elasticsearch with Apache License 2.0 | 5 votes |
public static FieldDoc readFieldDoc(StreamInput in) throws IOException { Comparable[] cFields = new Comparable[in.readVInt()]; for (int j = 0; j < cFields.length; j++) { byte type = in.readByte(); if (type == 0) { cFields[j] = null; } else if (type == 1) { cFields[j] = in.readString(); } else if (type == 2) { cFields[j] = in.readInt(); } else if (type == 3) { cFields[j] = in.readLong(); } else if (type == 4) { cFields[j] = in.readFloat(); } else if (type == 5) { cFields[j] = in.readDouble(); } else if (type == 6) { cFields[j] = in.readByte(); } else if (type == 7) { cFields[j] = in.readShort(); } else if (type == 8) { cFields[j] = in.readBoolean(); } else if (type == 9) { cFields[j] = in.readBytesRef(); } else { throw new IOException("Can't match type [" + type + "]"); } } return new FieldDoc(in.readVInt(), in.readFloat(), cFields); }
Example 8
Source File: PercolateShardResponse.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); percolatorTypeId = in.readByte(); requestedSize = in.readVInt(); count = in.readVLong(); matches = new BytesRef[in.readVInt()]; for (int i = 0; i < matches.length; i++) { matches[i] = in.readBytesRef(); } scores = new float[in.readVInt()]; for (int i = 0; i < scores.length; i++) { scores[i] = in.readFloat(); } int size = in.readVInt(); for (int i = 0; i < size; i++) { int mSize = in.readVInt(); Map<String, HighlightField> fields = new HashMap<>(); for (int j = 0; j < mSize; j++) { fields.put(in.readString(), HighlightField.readHighlightField(in)); } hls.add(fields); } aggregations = InternalAggregations.readOptionalAggregations(in); if (in.readBoolean()) { int pipelineAggregatorsSize = in.readVInt(); List<SiblingPipelineAggregator> pipelineAggregators = new ArrayList<>(pipelineAggregatorsSize); for (int i = 0; i < pipelineAggregatorsSize; i++) { BytesReference type = in.readBytesReference(); PipelineAggregator pipelineAggregator = PipelineAggregatorStreams.stream(type).readResult(in); pipelineAggregators.add((SiblingPipelineAggregator) pipelineAggregator); } this.pipelineAggregators = pipelineAggregators; } }
Example 9
Source File: TermsByQueryResponse.java From siren-join with GNU Affero General Public License v3.0 | 5 votes |
/** * Deserialize * * @param in the input * @throws IOException */ @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); isPruned = in.readBoolean(); size = in.readVInt(); termsEncoding = TermsByQueryRequest.TermsEncoding.values()[in.readVInt()]; encodedTerms = in.readBytesRef(); }
Example 10
Source File: InternalDateHierarchy.java From elasticsearch-aggregation-pathhierarchy with MIT License | 5 votes |
/** * Read from a stream. */ public InternalBucket(StreamInput in) throws IOException { key = in.readBytesRef(); name = in.readString(); docCount = in.readLong(); aggregations = new InternalAggregations(in); level = in.readInt(); int pathsSize = in.readInt(); paths = new String[pathsSize]; for (int i=0; i < pathsSize; i++) { paths[i] = in.readString(); } }
Example 11
Source File: InternalPathHierarchy.java From elasticsearch-aggregation-pathhierarchy with MIT License | 5 votes |
/** * Read from a stream. */ public InternalBucket(StreamInput in) throws IOException { termBytes = in.readBytesRef(); docCount = in.readLong(); aggregations = new InternalAggregations(in); level = in.readInt(); minDepth = in.readInt(); basename = in.readString(); int pathsSize = in.readInt(); paths = new String[pathsSize]; for (int i=0; i < pathsSize; i++) { paths[i] = in.readString(); } }
Example 12
Source File: InternalPathHierarchy.java From elasticsearch-aggregation-pathhierarchy with MIT License | 5 votes |
/** * Read from a stream. */ public InternalPathHierarchy(StreamInput in) throws IOException { super(in); order = InternalOrder.Streams.readOrder(in); minDocCount = in.readVLong(); requiredSize = readSize(in); shardSize = readSize(in); otherHierarchyNodes = in.readVLong(); separator = in.readBytesRef(); int bucketsSize = in.readInt(); this.buckets = new ArrayList<>(bucketsSize); for (int i=0; i<bucketsSize; i++) { this.buckets.add(new InternalBucket(in)); } }
Example 13
Source File: InternalGeoShape.java From elasticsearch-plugin-geoshape with MIT License | 5 votes |
/** * Read from a stream. */ public InternalBucket(StreamInput in) throws IOException { wkb = in.readBytesRef(); wkbHash = in.readString(); realType = in.readString(); area = in.readDouble(); docCount = in.readLong(); aggregations = new InternalAggregations(in); }
Example 14
Source File: StoreFileMetaData.java From crate with Apache License 2.0 | 5 votes |
/** * Read from a stream. */ public StoreFileMetaData(StreamInput in) throws IOException { name = in.readString(); length = in.readVLong(); checksum = in.readString(); try { writtenBy = Version.parse(in.readString()); } catch (ParseException e) { throw new AssertionError(e); } hash = in.readBytesRef(); }
Example 15
Source File: FieldStats.java From Elasticsearch with Apache License 2.0 | 4 votes |
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); minValue = in.readBytesRef(); maxValue = in.readBytesRef(); }