Java Code Examples for org.apache.accumulo.core.data.Key#equals()
The following examples show how to use
org.apache.accumulo.core.data.Key#equals() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TimestampSkippingIterator.java From fluo with Apache License 2.0 | 6 votes |
public void skipToTimestamp(Key curCol, long timestamp) throws IOException { source.next(); int count = 0; while (source.hasTop() && curCol.equals(source.getTopKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS) && timestamp < source.getTopKey().getTimestamp()) { if (count == 10) { // seek to prefix Key seekKey = new Key(curCol); seekKey.setTimestamp(timestamp); Range newRange = new Range(seekKey, true, range.getEndKey(), range.isEndKeyInclusive()); seek(newRange); break; } source.next(); count++; } }
Example 2
Source File: GlobalIndexRangeSamplingIterator.java From datawave with Apache License 2.0 | 5 votes |
public void findTop() throws IOException { long count = 0; // Sum the Uid.List (value) count for this term (row) // Copy the starting key or this won't work.... Key startKey = new Key(this.iterator.getTopKey()); do { // Get the shard id and datatype from the colq String colq = this.iterator.getTopKey().getColumnQualifier().toString(); // Parse the UID.List object from the value Uid.List uidList = null; try { uidList = Uid.List.parseFrom(this.iterator.getTopValue().get()); // Add the count for this shard to the total count for the term. count += uidList.getCOUNT(); } catch (InvalidProtocolBufferException e) { count = Long.MAX_VALUE; log.debug("Error deserializing Uid.List at: " + this.iterator.getTopKey()); break; } this.iterator.next(); } while (this.iterator.hasTop() && startKey.equals(this.iterator.getTopKey(), PartialKey.ROW)); key = new Key(startKey); value = new Value(Long.toString(count).getBytes()); }
Example 3
Source File: ShardUidMappingIterator.java From datawave with Apache License 2.0 | 5 votes |
/** * Cache keys that map to the same base uid key * * @param baseUidKey * @throws IOException */ protected void cacheKeys(Key baseUidKey) throws IOException { this.cacheBaseUidKey = baseUidKey; // now cache data until we run out or move to a new base uid while (super.topKey != null && baseUidKey.equals(getBaseUidKey(super.topKey), PartialKey.ROW_COLFAM)) { cacheAdd(super.topKey, super.topValue); super.next(); } }
Example 4
Source File: MapReduceStatePersisterTest.java From datawave with Apache License 2.0 | 5 votes |
@Test public void testUpdateState() throws Exception { // create the initial entry testPersistentCreate(); PowerMock.resetAll(); // Get ready to call updateState HashMap<String,String> trackingMap = new HashMap<>(); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); expect(connectionFactory.getConnection(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(connection); connectionFactory.returnConnection(connection); expect(connectionFactory.getTrackingMap(EasyMock.anyObject())).andReturn(trackingMap); expect(connectionFactory.getConnection(EasyMock.eq(AccumuloConnectionFactory.Priority.ADMIN), EasyMock.eq(trackingMap))).andReturn(connection); connectionFactory.returnConnection(connection); replayAll(); bean.updateState(mapReduceJobId, MapReduceState.FAILED); verifyAll(); // Ensure that the new FAILED state made it into the table Key failedKey = new Key(id, sid, MapReduceStatePersisterBean.STATE + NULL + mapReduceJobId); Value failedValue = new Value(MapReduceState.FAILED.toString().getBytes()); boolean found = false; Scanner s = connection.createScanner(TABLE_NAME, new Authorizations(auths)); s.setRange(new Range(id)); s.fetchColumnFamily(new Text(sid)); for (Entry<Key,Value> entry : s) { if (entry.getKey().getColumnQualifier().toString().equals(MapReduceStatePersisterBean.STATE + NULL + mapReduceJobId)) { if (failedKey.equals(entry.getKey(), PartialKey.ROW_COLFAM_COLQUAL) && failedValue.equals(entry.getValue())) { found = true; } } } if (!found) fail("Updated state not found"); }
Example 5
Source File: PrefixEquality.java From datawave with Apache License 2.0 | 4 votes |
public boolean partOf(Key docKey, Key test) { return docKey.equals(test, prefix); }
Example 6
Source File: SourceManager.java From datawave with Apache License 2.0 | 4 votes |
private boolean keysMatch(Key childKey, Key lastKey) { return (childKey == null && lastKey == null) || (childKey != null && lastKey != null && childKey.equals(lastKey)); }
Example 7
Source File: ChunkInputStream.java From accumulo-examples with Apache License 2.0 | 4 votes |
private int fill() throws IOException { if (source == null || !source.hasNext()) { if (gotEndMarker) return count = pos = 0; else throw new IOException("no end chunk marker but source has no data"); } Entry<Key,Value> entry = source.peek(); Key thisKey = entry.getKey(); log.debug("evaluating key: " + thisKey.toString()); // check that we're still on the same row if (!thisKey.equals(currentKey, PartialKey.ROW)) { if (gotEndMarker) return -1; else { String currentRow = currentKey.getRow().toString(); clear(); throw new IOException("got to the end of the row without end chunk marker " + currentRow); } } log.debug("matches current key"); // ok to advance the iterator source.next(); // check that this is part of a chunk if (!thisKey.getColumnFamily().equals(FileDataIngest.CHUNK_CF)) { log.debug("skipping non-chunk key"); return fill(); } log.debug("is a chunk"); // check that the chunk size is the same as the one being read if (currentChunkSize != FileDataIngest.bytesToInt(thisKey.getColumnQualifier().getBytes(), 0)) { log.debug("skipping chunk of different size"); return fill(); } // add the visibility to the list if it's not there if (!currentVis.contains(thisKey.getColumnVisibility())) currentVis.add(thisKey.getColumnVisibility()); // check to see if it is an identical chunk with a different visibility if (thisKey.getColumnQualifier().equals(currentKey.getColumnQualifier())) { log.debug("skipping identical chunk with different visibility"); return fill(); } if (gotEndMarker) { log.debug("got another chunk after end marker: " + currentKey.toString() + " " + thisKey.toString()); clear(); throw new IOException("found extra chunk after end marker"); } // got new chunk of the same file, check that it's the next chunk int thisChunk = FileDataIngest.bytesToInt(thisKey.getColumnQualifier().getBytes(), 4); if (thisChunk != currentChunk + 1) { log.debug("new chunk same file, unexpected chunkID: " + currentKey.toString() + " " + thisKey.toString()); clear(); throw new IOException("missing chunks between " + currentChunk + " and " + thisChunk); } currentKey = thisKey; currentChunk = thisChunk; buf = entry.getValue().get(); pos = 0; // check to see if it's the last chunk if (buf.length == 0) { gotEndMarker = true; return fill(); } return count = buf.length; }
Example 8
Source File: RollbackCheckIterator.java From fluo with Apache License 2.0 | 4 votes |
@Override public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException { range = IteratorUtil.maximizeStartKeyTimeStamp(range); if (columnFamilies.isEmpty() && !inclusive) { source.seek(range, SnapshotIterator.NOTIFY_CF_SET, false); } else { source.seek(range, columnFamilies, inclusive); } Key curCol = new Key(); if (source.hasTop()) { curCol.set(source.getTopKey()); // TODO can this optimization cause problems? if (!curCol.equals(range.getStartKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS)) { return; } } long invalidationTime = -1; hasTop = false; while (source.hasTop() && curCol.equals(source.getTopKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS)) { ColumnType colType = ColumnType.from(source.getTopKey()); long ts = source.getTopKey().getTimestamp() & ColumnConstants.TIMESTAMP_MASK; switch (colType) { case TX_DONE: source.skipToPrefix(curCol, ColumnType.WRITE); continue; case WRITE: { long timePtr = WriteValue.getTimestamp(source.getTopValue().get()); if (timePtr > invalidationTime) { invalidationTime = timePtr; } if (lockTime == timePtr) { hasTop = true; return; } if (lockTime > timePtr) { source.skipToPrefix(curCol, ColumnType.DEL_LOCK); continue; } break; } case DEL_LOCK: { if (ts > invalidationTime) { invalidationTime = ts; } if (ts == lockTime) { hasTop = true; return; } if (lockTime > ts) { source.skipToPrefix(curCol, ColumnType.LOCK); continue; } break; } case RLOCK: { source.skipToPrefix(curCol, ColumnType.LOCK); continue; } case LOCK: { if (ts > invalidationTime) { // nothing supersedes this lock, therefore the column is locked hasTop = true; return; } break; } case DATA: { // can stop looking return; } case ACK: { // do nothing if ACK break; } default: throw new IllegalArgumentException(); } source.next(); } }