Java Code Examples for org.apache.hadoop.hbase.util.Bytes#toLong()
The following examples show how to use
org.apache.hadoop.hbase.util.Bytes#toLong() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StoreFileReader.java From hbase with Apache License 2.0 | 6 votes |
public Map<byte[], byte[]> loadFileInfo() throws IOException { Map<byte [], byte []> fi = reader.getHFileInfo(); byte[] b = fi.get(BLOOM_FILTER_TYPE_KEY); if (b != null) { bloomFilterType = BloomType.valueOf(Bytes.toString(b)); } byte[] p = fi.get(BLOOM_FILTER_PARAM_KEY); if (bloomFilterType == BloomType.ROWPREFIX_FIXED_LENGTH) { prefixLength = Bytes.toInt(p); } lastBloomKey = fi.get(LAST_BLOOM_KEY); if(bloomFilterType == BloomType.ROWCOL) { lastBloomKeyOnlyKV = new KeyValue.KeyOnlyKeyValue(lastBloomKey, 0, lastBloomKey.length); } byte[] cnt = fi.get(DELETE_FAMILY_COUNT); if (cnt != null) { deleteFamilyCnt = Bytes.toLong(cnt); } return fi; }
Example 2
Source File: DataJanitorState.java From phoenix-tephra with Apache License 2.0 | 6 votes |
/** * Delete prune upper bounds for the regions that are not in the given exclude set, and the * prune upper bound is less than the given value. * After the invalid list is pruned up to deletionPruneUpperBound, we do not need entries for regions that have * prune upper bound less than deletionPruneUpperBound. We however limit the deletion to only regions that are * no longer in existence (due to deletion, etc.), to avoid update/delete race conditions. * * @param deletionPruneUpperBound prune upper bound below which regions will be deleted * @param excludeRegions set of regions that should not be deleted * @throws IOException when not able to delete data in HBase */ public void deletePruneUpperBounds(long deletionPruneUpperBound, SortedSet<byte[]> excludeRegions) throws IOException { try (Table stateTable = stateTableSupplier.get()) { byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY); Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP); scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL); try (ResultScanner scanner = stateTable.getScanner(scan)) { Result next; while ((next = scanner.next()) != null) { byte[] region = getRegionFromKey(next.getRow()); if (!excludeRegions.contains(region)) { byte[] timeBytes = next.getValue(FAMILY, PRUNE_UPPER_BOUND_COL); if (timeBytes != null) { long pruneUpperBoundRegion = Bytes.toLong(timeBytes); if (pruneUpperBoundRegion < deletionPruneUpperBound) { stateTable.delete(new Delete(next.getRow())); } } } } } } }
Example 3
Source File: CellCodec.java From hbase with Apache License 2.0 | 6 votes |
@Override protected Cell parseCell() throws IOException { byte [] row = readByteArray(this.in); byte [] family = readByteArray(in); byte [] qualifier = readByteArray(in); byte [] longArray = new byte[Bytes.SIZEOF_LONG]; IOUtils.readFully(this.in, longArray); long timestamp = Bytes.toLong(longArray); byte type = (byte) this.in.read(); byte[] value = readByteArray(in); // Read memstore version byte[] memstoreTSArray = new byte[Bytes.SIZEOF_LONG]; IOUtils.readFully(this.in, memstoreTSArray); long memstoreTS = Bytes.toLong(memstoreTSArray); return cellBuilder.clear() .setRow(row) .setFamily(family) .setQualifier(qualifier) .setTimestamp(timestamp) .setType(type) .setValue(value) .setSequenceId(memstoreTS) .build(); }
Example 4
Source File: DataJanitorState.java From phoenix-tephra with Apache License 2.0 | 6 votes |
/** * Delete prune upper bounds for the regions that are not in the given exclude set, and the * prune upper bound is less than the given value. * After the invalid list is pruned up to deletionPruneUpperBound, we do not need entries for regions that have * prune upper bound less than deletionPruneUpperBound. We however limit the deletion to only regions that are * no longer in existence (due to deletion, etc.), to avoid update/delete race conditions. * * @param deletionPruneUpperBound prune upper bound below which regions will be deleted * @param excludeRegions set of regions that should not be deleted * @throws IOException when not able to delete data in HBase */ public void deletePruneUpperBounds(long deletionPruneUpperBound, SortedSet<byte[]> excludeRegions) throws IOException { try (Table stateTable = stateTableSupplier.get()) { byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY); Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP); scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL); try (ResultScanner scanner = stateTable.getScanner(scan)) { Result next; while ((next = scanner.next()) != null) { byte[] region = getRegionFromKey(next.getRow()); if (!excludeRegions.contains(region)) { byte[] timeBytes = next.getValue(FAMILY, PRUNE_UPPER_BOUND_COL); if (timeBytes != null) { long pruneUpperBoundRegion = Bytes.toLong(timeBytes); if (pruneUpperBoundRegion < deletionPruneUpperBound) { stateTable.delete(new Delete(next.getRow())); } } } } } } }
Example 5
Source File: WordCountClient.java From storm-hbase with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { Configuration config = HBaseConfiguration.create(); if(args.length > 0){ config.set("hbase.rootdir", args[0]); } HTable table = new HTable(config, "WordCount"); for (String word : WordSpout.words) { Get get = new Get(Bytes.toBytes(word)); Result result = table.get(get); byte[] countBytes = result.getValue(Bytes.toBytes("cf"), Bytes.toBytes("count")); byte[] wordBytes = result.getValue(Bytes.toBytes("cf"), Bytes.toBytes("word")); String wordStr = Bytes.toString(wordBytes); System.out.println(wordStr); long count = Bytes.toLong(countBytes); System.out.println("Word: '" + wordStr + "', Count: " + count); } }
Example 6
Source File: DefaultHBaseSerde.java From envelope with Apache License 2.0 | 6 votes |
private static Object getColumnValue(byte[] source, int offset, int length, String type) { switch (type) { case ConfigurationDataTypes.INT: return Bytes.toInt(source, offset, length); case ConfigurationDataTypes.LONG: return Bytes.toLong(source, offset, length); case ConfigurationDataTypes.BOOLEAN: return Bytes.toBoolean(source); case ConfigurationDataTypes.FLOAT: return Bytes.toFloat(source); case ConfigurationDataTypes.DOUBLE: return Bytes.toDouble(source); case ConfigurationDataTypes.STRING: return Bytes.toString(source, offset, length); default: LOG.error("Unsupported column type: {}", type); throw new IllegalArgumentException("Unsupported column type: " + type); } }
Example 7
Source File: BalanceBooks.java From phoenix-tephra with Apache License 2.0 | 5 votes |
private long getCurrentBalance(int id) throws IOException { Result r = txTable.get(new Get(Bytes.toBytes(id))); byte[] balanceBytes = r.getValue(FAMILY, COL); if (balanceBytes == null) { return 0; } return Bytes.toLong(balanceBytes); }
Example 8
Source File: HBaseEventSource.java From mewbase with MIT License | 5 votes |
private long getTimestampEventNumber(final Table table, long epochTimestampInclusive) throws IOException { Scan scan = new Scan(); scan.setTimeRange(epochTimestampInclusive, Long.MAX_VALUE); scan.setMaxResultSize(1); // i.e. search only for the nearest item. scan.addColumn(HBaseEventSink.colFamily, HBaseEventSink.qualifier); final ResultScanner scanner = table.getScanner(scan); Result result = scanner.next(); long timeStampedEventNumber = -1L; if ( result !=null && !result.isEmpty() ) { timeStampedEventNumber = Bytes.toLong(result.getRow()); } log.info("Time Stamped Event for "+table.getName()+" is "+ timeStampedEventNumber); return timeStampedEventNumber; }
Example 9
Source File: TestRegionIncrement.java From hbase with Apache License 2.0 | 5 votes |
/** * Have each thread update its own Cell. Avoid contention with another thread. */ @Test public void testContendedAcrossCellsIncrement() throws IOException, InterruptedException { final HRegion region = getRegion(TEST_UTIL.getConfiguration(), TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); long startTime = System.currentTimeMillis(); try { CrossRowCellIncrementer [] threads = new CrossRowCellIncrementer[THREAD_COUNT]; for (int i = 0; i < threads.length; i++) { threads[i] = new CrossRowCellIncrementer(i, INCREMENT_COUNT, region, THREAD_COUNT); } for (int i = 0; i < threads.length; i++) { threads[i].start(); } for (int i = 0; i < threads.length; i++) { threads[i].join(); } RegionScanner regionScanner = region.getScanner(new Scan()); List<Cell> cells = new ArrayList<>(100); while(regionScanner.next(cells)) continue; assertEquals(THREAD_COUNT, cells.size()); long total = 0; for (Cell cell: cells) total += Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); assertEquals(INCREMENT_COUNT * THREAD_COUNT, total); } finally { closeRegion(region); LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms"); } }
Example 10
Source File: HBaseTimestampStorage.java From phoenix-omid with Apache License 2.0 | 5 votes |
@Override public long getMaxTimestamp() throws IOException { Get get = new Get(TSO_ROW); get.addColumn(cfName, TSO_QUALIFIER); Result result = table.get(get); if (result.containsColumn(cfName, TSO_QUALIFIER)) { return Bytes.toLong(result.getValue(cfName, TSO_QUALIFIER)); } else { // This happens for example when a new cluster is created return INITIAL_MAX_TS_VALUE; } }
Example 11
Source File: AppAggregationKeyConverter.java From hraven with Apache License 2.0 | 5 votes |
public AppAggregationKey fromBytes(byte[][] splitBytes) { long runId = splitBytes.length > 1 ? Long.MAX_VALUE - Bytes.toLong(splitBytes[1]) : 0; return new AppAggregationKey( Bytes.toString(splitBytes[0]), splitBytes.length > 2 ? Bytes.toString(splitBytes[2]) : null, splitBytes.length > 3 ? Bytes.toString(splitBytes[3]) : null, runId); }
Example 12
Source File: WriteHeavyIncrementObserver.java From hbase with Apache License 2.0 | 5 votes |
@Override public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> c, Get get, List<Cell> result) throws IOException { Scan scan = new Scan().withStartRow(get.getRow()).withStopRow(get.getRow(), true).readAllVersions(); NavigableMap<byte[], NavigableMap<byte[], MutableLong>> sums = new TreeMap<>(Bytes.BYTES_COMPARATOR); get.getFamilyMap().forEach((cf, cqs) -> { NavigableMap<byte[], MutableLong> ss = new TreeMap<>(Bytes.BYTES_COMPARATOR); sums.put(cf, ss); cqs.forEach(cq -> { ss.put(cq, new MutableLong(0)); scan.addColumn(cf, cq); }); }); List<Cell> cells = new ArrayList<>(); try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) { boolean moreRows; do { moreRows = scanner.next(cells); for (Cell cell : cells) { byte[] family = CellUtil.cloneFamily(cell); byte[] qualifier = CellUtil.cloneQualifier(cell); long value = Bytes.toLong(cell.getValueArray(), cell.getValueOffset()); sums.get(family).get(qualifier).add(value); } cells.clear(); } while (moreRows); } sums.forEach((cf, m) -> m.forEach((cq, s) -> result .add(createCell(get.getRow(), cf, cq, HConstants.LATEST_TIMESTAMP, s.longValue())))); c.bypass(); }
Example 13
Source File: ClientKeyValue.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Override public boolean updateLatestStamp(byte[] now) { if (this.isLatestTimestamp()) { // unfortunately, this is a bit slower than the usual kv, but we don't expect this to happen // all that often on the client (unless users are updating the ts this way), as it generally // happens on the server this.ts = Bytes.toLong(now); return true; } return false; }
Example 14
Source File: LongComparator.java From hbase with Apache License 2.0 | 5 votes |
/** * @param pbBytes A pb serialized {@link LongComparator} instance * @return An instance of {@link LongComparator} made from <code>bytes</code> * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ public static LongComparator parseFrom(final byte [] pbBytes) throws DeserializationException { ComparatorProtos.LongComparator proto; try { proto = ComparatorProtos.LongComparator.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } return new LongComparator(Bytes.toLong(proto.getComparable().getValue().toByteArray())); }
Example 15
Source File: HBaseTypeUtils.java From flink with Apache License 2.0 | 5 votes |
/** * Deserialize byte array to Java Object with the given type. */ public static Object deserializeToObject(byte[] value, int typeIdx, Charset stringCharset) { switch (typeIdx) { case 0: // byte[] return value; case 1: // String return new String(value, stringCharset); case 2: // byte return value[0]; case 3: return Bytes.toShort(value); case 4: return Bytes.toInt(value); case 5: return Bytes.toLong(value); case 6: return Bytes.toFloat(value); case 7: return Bytes.toDouble(value); case 8: return Bytes.toBoolean(value); case 9: // sql.Timestamp encoded as long return new Timestamp(Bytes.toLong(value)); case 10: // sql.Date encoded as long return new Date(Bytes.toLong(value)); case 11: // sql.Time encoded as long return new Time(Bytes.toLong(value)); case 12: return Bytes.toBigDecimal(value); case 13: return new BigInteger(value); default: throw new IllegalArgumentException("unsupported type index:" + typeIdx); } }
Example 16
Source File: IntegrationTestBigLinkedList.java From hbase with Apache License 2.0 | 4 votes |
/** * Dump out extra info around references if there are any. Helps debugging. * @return StringBuilder filled with references if any. * @throws IOException */ private StringBuilder dumpExtraInfoOnRefs(final BytesWritable key, final Context context, final List<byte []> refs) throws IOException { StringBuilder refsSb = null; if (refs.isEmpty()) return refsSb; refsSb = new StringBuilder(); String comma = ""; // If a row is a reference but has no define, print the content of the row that has // this row as a 'prev'; it will help debug. The missing row was written just before // the row we are dumping out here. TableName tn = getTableName(context.getConfiguration()); try (Table t = this.connection.getTable(tn)) { for (byte [] ref : refs) { Result r = t.get(new Get(ref)); List<Cell> cells = r.listCells(); String ts = (cells != null && !cells.isEmpty())? new java.util.Date(cells.get(0).getTimestamp()).toString(): ""; byte [] b = r.getValue(FAMILY_NAME, COLUMN_CLIENT); String jobStr = (b != null && b.length > 0)? Bytes.toString(b): ""; b = r.getValue(FAMILY_NAME, COLUMN_COUNT); long count = (b != null && b.length > 0)? Bytes.toLong(b): -1; b = r.getValue(FAMILY_NAME, COLUMN_PREV); String refRegionLocation = ""; String keyRegionLocation = ""; if (b != null && b.length > 0) { try (RegionLocator rl = this.connection.getRegionLocator(tn)) { HRegionLocation hrl = rl.getRegionLocation(b); if (hrl != null) refRegionLocation = hrl.toString(); // Key here probably has trailing zeros on it. hrl = rl.getRegionLocation(key.getBytes()); if (hrl != null) keyRegionLocation = hrl.toString(); } } LOG.error("Extras on ref without a def, ref=" + Bytes.toStringBinary(ref) + ", refPrevEqualsKey=" + (Bytes.compareTo(key.getBytes(), 0, key.getLength(), b, 0, b.length) == 0) + ", key=" + Bytes.toStringBinary(key.getBytes(), 0, key.getLength()) + ", ref row date=" + ts + ", jobStr=" + jobStr + ", ref row count=" + count + ", ref row regionLocation=" + refRegionLocation + ", key row regionLocation=" + keyRegionLocation); refsSb.append(comma); comma = ","; refsSb.append(Bytes.toStringBinary(ref)); } } return refsSb; }
Example 17
Source File: Mutation.java From hbase with Apache License 2.0 | 4 votes |
@Override public void setTimestamp(byte[] ts) { timestamp = Bytes.toLong(ts); }
Example 18
Source File: HBaseConnectorITCase.java From flink with Apache License 2.0 | 4 votes |
public long eval(byte[] bytes) { return Bytes.toLong(bytes); }
Example 19
Source File: JobCountersSerDeser.java From eagle with Apache License 2.0 | 4 votes |
@Override public JobCounters deserialize(byte[] bytes) { JobCounters counters = new JobCounters(); final int length = bytes.length; if (length < 4) { return counters; } final Map<String, Map<String, Long>> groupMap = counters.getCounters(); int pos = 0; final int totalGroups = Bytes.toInt(bytes, pos); pos += 4; for (int i = 0; i < totalGroups; ++i) { final int groupIndex = Bytes.toInt(bytes, pos); pos += 4; final int totalCounters = Bytes.toInt(bytes, pos); pos += 4; final int nextGroupPos = pos + (totalCounters * 12); try { final CounterGroupKey groupKey = getCounterGroup(groupIndex); if (groupKey == null) { throw new JobCounterException("Group index " + groupIndex + " is not defined"); } final Map<String, Long> counterMap = new TreeMap<String, Long>(); groupMap.put(groupKey.getName(), counterMap); for (int j = 0; j < totalCounters; ++j) { final int counterIndex = Bytes.toInt(bytes, pos); pos += 4; final long value = Bytes.toLong(bytes, pos); pos += 8; final CounterKey counterKey = groupKey.getCounterKeyByID(counterIndex); if (counterKey == null) { continue; } counterMap.put(counterKey.getNames().get(0), value); } } catch (JobCounterException ex) { // skip the group pos = nextGroupPos; } } return counters; }
Example 20
Source File: Table.java From hbase with Apache License 2.0 | 3 votes |
/** * Atomically increments a column value. If the column value already exists * and is not a big-endian long, this could throw an exception. If the column * value does not yet exist it is initialized to <code>amount</code> and * written to the specified column. * * <p>Setting durability to {@link Durability#SKIP_WAL} means that in a fail * scenario you will lose any increments that have not been flushed. * @param row The row that contains the cell to increment. * @param family The column family of the cell to increment. * @param qualifier The column qualifier of the cell to increment. * @param amount The amount to increment the cell with (or decrement, if the * amount is negative). * @param durability The persistence guarantee for this increment. * @return The new value, post increment. * @throws IOException if a remote or network exception occurs. */ default long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, Durability durability) throws IOException { Increment increment = new Increment(row) .addColumn(family, qualifier, amount) .setDurability(durability); Cell cell = increment(increment).getColumnLatestCell(family, qualifier); return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); }