Java Code Examples for org.apache.hadoop.hbase.util.Bytes#toString()
The following examples show how to use
org.apache.hadoop.hbase.util.Bytes#toString() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestMultithreadedTableMapper.java From hbase with Apache License 2.0 | 6 votes |
/** * Pass the key, and reversed value to reduce * * @param key * @param value * @param context * @throws IOException */ @Override public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { if (value.size() != 1) { throw new IOException("There should only be one input column"); } Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> cf = value.getMap(); if(!cf.containsKey(INPUT_FAMILY)) { throw new IOException("Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, INPUT_FAMILY)); StringBuilder newValue = new StringBuilder(originalValue); newValue.reverse(); // Now set the value to be collected Put outval = new Put(key.get()); outval.addColumn(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString())); context.write(key, outval); }
Example 2
Source File: UpgradeUtil.java From phoenix with Apache License 2.0 | 6 votes |
private static void mapChildViewsToNamespace(String connUrl, Properties props, List<TableInfo> viewInfoList) throws SQLException, SnapshotCreationException, IllegalArgumentException, IOException, InterruptedException { String tenantId = null; String prevTenantId = null; PhoenixConnection conn = null; for (TableInfo viewInfo : viewInfoList) { tenantId = viewInfo.getTenantId()!=null ? Bytes.toString(viewInfo.getTenantId()) : null; String viewName = SchemaUtil.getTableName(viewInfo.getSchemaName(), viewInfo.getTableName()); if (!java.util.Objects.equals(prevTenantId, tenantId)) { if (tenantId != null) { props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); } else { props.remove(PhoenixRuntime.TENANT_ID_ATTRIB); } if (conn!=null) conn.close(); conn = DriverManager.getConnection(connUrl, props).unwrap(PhoenixConnection.class); } LOGGER.info(String.format("Upgrading view %s for tenantId %s..", viewName,tenantId)); UpgradeUtil.upgradeTable(conn, viewName); prevTenantId = tenantId; } }
Example 3
Source File: OccurrencePersistenceServiceImpl.java From occurrence with Apache License 2.0 | 6 votes |
/** * Note that the returned fragment here is a String that holds the actual xml or json snippet for this occurrence, * and not the Fragment object that is used elsewhere. * * @param key that identifies an occurrence * @return a String holding the original xml or json snippet for this occurrence */ @Override public String getFragment(long key) { String fragment = null; try (Table table = connection.getTable(TableName.valueOf(fragmenterTableName))) { String saltedKey = getSaltedKey(key); Get get = new Get(Bytes.toBytes(saltedKey)); Result result = table.get(get); if (result == null || result.isEmpty()) { LOG.info("Couldn't find occurrence for id [{}], returning null", key); return null; } byte[] rawFragment = result.getValue(Bytes.toBytes("fragment"), Bytes.toBytes("record")); if (rawFragment != null) { fragment = Bytes.toString(rawFragment); } } catch (IOException e) { throw new ServiceUnavailableException("Could not read from HBase", e); } return fragment; }
Example 4
Source File: ImportTsv.java From hbase with Apache License 2.0 | 6 votes |
public long getTimestamp(long ts) throws BadTsvLineException { // Return ts if HBASE_TS_KEY is not configured in column spec if (!hasTimestamp()) { return ts; } String timeStampStr = Bytes.toString(lineBytes, getColumnOffset(timestampKeyColumnIndex), getColumnLength(timestampKeyColumnIndex)); try { return Long.parseLong(timeStampStr); } catch (NumberFormatException nfe) { // treat this record as bad record throw new BadTsvLineException("Invalid timestamp " + timeStampStr); } }
Example 5
Source File: SimpleHfileToRmdbMapper.java From super-cloudops with Apache License 2.0 | 5 votes |
@Override public void map(ImmutableBytesWritable key, Result result, Context context) throws IOException, InterruptedException { Counter c = context.getCounter(DEFUALT_COUNTER_GROUP, DEFUALT_COUNTER_TOTAL); c.increment(1); LinkedHashMap<String, String> rowdata = new LinkedHashMap<>(); rowdata.put("row", Bytes.toString(key.get())); Iterator<Cell> it = result.listCells().iterator(); while (it.hasNext()) { Cell cell = it.next(); byte[] qualifier = extractFieldByteArray(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); byte[] value = extractFieldByteArray(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); String _qualifier = Bytes.toString(qualifier); if (!HbaseMigrateUtils.isIgnoreHbaseQualifier(_qualifier)) { rowdata.put(_qualifier, Bytes.toString(value)); } } // Insert sql. try { String insertSql = SimpleHfileToRmdbExporter.currentRmdbManager.buildInsertSql(rowdata); if (SimpleHfileToRmdbExporter.verbose) { log.info(format("Inserting [%s]: %s", c.getValue(), insertSql)); } SimpleHfileToRmdbExporter.currentRmdbManager.getRmdbRepository().saveRowdata(insertSql); context.getCounter(DEFUALT_COUNTER_GROUP, DEFUALT_COUNTER_PROCESSED).increment(1); } catch (Exception e) { log.error(e); } }
Example 6
Source File: DataBlockEncodingValidator.java From hbase with Apache License 2.0 | 5 votes |
/** * Check DataBlockEncodings of column families are compatible. * * @return number of column families with incompatible DataBlockEncoding * @throws IOException if a remote or network exception occurs */ private int validateDBE() throws IOException { int incompatibilities = 0; LOG.info("Validating Data Block Encodings"); try (Connection connection = ConnectionFactory.createConnection(getConf()); Admin admin = connection.getAdmin()) { List<TableDescriptor> tableDescriptors = admin.listTableDescriptors(); String encoding = ""; for (TableDescriptor td : tableDescriptors) { ColumnFamilyDescriptor[] columnFamilies = td.getColumnFamilies(); for (ColumnFamilyDescriptor cfd : columnFamilies) { try { encoding = Bytes.toString(cfd.getValue(DATA_BLOCK_ENCODING)); // IllegalArgumentException will be thrown if encoding is incompatible with 2.0 DataBlockEncoding.valueOf(encoding); } catch (IllegalArgumentException e) { incompatibilities++; LOG.warn("Incompatible DataBlockEncoding for table: {}, cf: {}, encoding: {}", td.getTableName().getNameAsString(), cfd.getNameAsString(), encoding); } } } } if (incompatibilities > 0) { LOG.warn("There are {} column families with incompatible Data Block Encodings. Do not " + "upgrade until these encodings are converted to a supported one. " + "Check https://s.apache.org/prefixtree for instructions.", incompatibilities); } else { LOG.info("The used Data Block Encodings are compatible with HBase 2.0."); } return incompatibilities; }
Example 7
Source File: TransactionState.java From hbase-secondary-index with GNU General Public License v3.0 | 5 votes |
@Override public String toString() { return "startRow: " + (startRow == null ? "null" : Bytes.toString(startRow)) + ", endRow: " + (endRow == null ? "null" : Bytes.toString(endRow)); }
Example 8
Source File: IndexToolForNonTxGlobalIndexIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testIndexToolVerifyAfterOption() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); try (Connection conn = DriverManager.getConnection(getUrl(), props)) { String schemaName = generateUniqueName(); String dataTableName = generateUniqueName(); String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTableName); String indexTableName = generateUniqueName(); String viewName = generateUniqueName(); String viewFullName = SchemaUtil.getTableName(schemaName, viewName); conn.createStatement().execute("CREATE TABLE " + dataTableFullName + " (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, ZIP INTEGER) " + tableDDLOptions); conn.commit(); conn.createStatement().execute("CREATE VIEW " + viewFullName + " AS SELECT * FROM " + dataTableFullName); conn.commit(); // Insert a row conn.createStatement().execute("upsert into " + viewFullName + " values (1, 'Phoenix', 12345)"); conn.commit(); // Configure IndexRegionObserver to fail the first write phase. This should not // lead to any change on index and thus the index verify during index rebuild should fail IndexRegionObserver.setIgnoreIndexRebuildForTesting(true); conn.createStatement().execute(String.format( "CREATE INDEX %s ON %s (NAME) INCLUDE (ZIP) ASYNC", indexTableName, viewFullName)); // Run the index MR job and verify that the index table rebuild fails IndexToolIT.runIndexTool(directApi, useSnapshot, schemaName, viewName, indexTableName, null, -1, IndexTool.IndexVerifyType.AFTER); // The index tool output table should report that there is a missing index row Cell cell = IndexToolIT.getErrorMessageFromIndexToolOutputTable(conn, dataTableFullName, "_IDX_" + dataTableFullName); try { String expectedErrorMsg = IndexRebuildRegionScanner.ERROR_MESSAGE_MISSING_INDEX_ROW; String actualErrorMsg = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); assertTrue(expectedErrorMsg.equals(actualErrorMsg)); } catch(Exception ex){ Assert.fail("Fail to parsing the error message from IndexToolOutputTable"); } IndexRegionObserver.setIgnoreIndexRebuildForTesting(false); } }
Example 9
Source File: SchemaUtil.java From phoenix with Apache License 2.0 | 5 votes |
public static String getTableNameFromFullName(byte[] tableName) { if (tableName == null) { return null; } if (isExistingTableMappedToPhoenixName(Bytes.toString(tableName))) { return Bytes.toString(tableName); } int index = indexOf(tableName, QueryConstants.NAME_SEPARATOR_BYTE); if (index < 0) { index = indexOf(tableName, QueryConstants.NAMESPACE_SEPARATOR_BYTE); if (index < 0) { return Bytes.toString(tableName); } } return Bytes.toString(tableName, index+1, tableName.length - index - 1); }
Example 10
Source File: TestIgnoreUnknownFamily.java From hbase with Apache License 2.0 | 5 votes |
private void addStoreFileToKnownFamily(RegionInfo region) throws IOException { MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path regionDir = FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(mfs.getConfiguration()), region); Path familyDir = new Path(regionDir, Bytes.toString(UNKNOWN_FAMILY)); StoreFileWriter writer = new StoreFileWriter.Builder(mfs.getConfiguration(), mfs.getFileSystem()) .withOutputDir(familyDir).withFileContext(new HFileContextBuilder().build()).build(); writer.close(); }
Example 11
Source File: HBaseGetRecordCursor.java From presto-hbase-connector with Apache License 2.0 | 4 votes |
@Override public boolean advanceNextPosition() { String colName = null; try { // if we got error when reading data, return false to end this reading. if (results == null) { return false; } else if (this.currentRecordIndex >= this.results.length) { InetAddress localhost = InetAddress.getLocalHost(); // Random printing if (System.currentTimeMillis() % SYSTEMOUT_INTERVAL == 0) { log.info("BATCH GET RECORD. tableName=" + split.getTableName() + ", rowKey_0=" + split.getConstraint().get(0) + ", READ_DATA_TIME=" + (System.currentTimeMillis() - startTime) + " mill secs. recordCount=" + recordCount + ", startTime=" + new Date(startTime).toString() + ", localhost=" + localhost.getHostAddress() + ", specified worker ip: " + (split.getAddresses().size() > 0 ? split.getAddresses().get(0).toString() : "")); } return false; } else { fields = new Object[this.columnHandles.size()]; ordinalPositionAndFieldsIndexMap.clear(); int fieldIndex = 0; Result record = this.results[this.currentRecordIndex]; for (Cell cell : record.rawCells()) { colName = Bytes.toString( arrayCopy(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); HBaseColumnHandle hch = fieldIndexMap.get(colName.hashCode()); if (hch == null) { continue; } Object value = matchValue(hch.getColumnType(), arrayCopy(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); fields[fieldIndex] = value; ordinalPositionAndFieldsIndexMap.put(hch.getOrdinalPosition(), fieldIndex); fieldIndex++; } // Handle the value of rowKey column setRowKeyValue2FieldsAry(record, fieldIndex); this.currentRecordIndex++; return true; } } catch (Exception ex) { log.error(ex, ex.getMessage()); this.close(); log.error("fieldIndexMap.size=" + fieldIndexMap.size() + ", ERROR ColName=" + colName); fieldIndexMap.forEach((cName, columnHandle) -> log.error("fieldIndexMap: key=" + cName + ", hch.toString=" + columnHandle.toString()) ); return false; } }
Example 12
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 4 votes |
private String getIndexTableName() { return Bytes.toString(TestTable.getTableName()) + "_index"; }
Example 13
Source File: DefaultHBaseSerde.java From envelope with Apache License 2.0 | 4 votes |
private static int addColumnValue(byte[] source, int offset, int endIndex, Object[] values, String type, int valueIndex, byte[] keySeparator, boolean last) { switch (type) { case ConfigurationDataTypes.INT: values[valueIndex] = Bytes.toInt(source, offset, 4); return 4; case ConfigurationDataTypes.LONG: values[valueIndex] = Bytes.toLong(source, offset, 8); return 8; case ConfigurationDataTypes.BOOLEAN: values[valueIndex] = Bytes.toInt(source, offset, 1); return 1; case ConfigurationDataTypes.FLOAT: values[valueIndex] = Bytes.toFloat(source, offset); return 4; case ConfigurationDataTypes.DOUBLE: values[valueIndex] = Bytes.toDouble(source, offset); return 8; case ConfigurationDataTypes.STRING: if (last) { // if the last field just grab it all values[valueIndex] = Bytes.toString(source, offset, endIndex - offset); return endIndex - offset; } else { int startIndex = offset; while (offset < endIndex) { if (source[offset] != keySeparator[0]) { offset++; } else { // Might be the start of a separator int startOfOffset = offset; int sepOffset = 1; boolean isSep = sepOffset == keySeparator.length; while (sepOffset < keySeparator.length && offset < endIndex && source[offset] == keySeparator[sepOffset]) { isSep = sepOffset == keySeparator.length - 1; offset++; sepOffset++; } if (isSep) { // We found a separator, so return the string before that values[valueIndex] = Bytes.toString(source, startIndex, startOfOffset - startIndex); return startOfOffset - startIndex; } } } // We reached the end which is an error except for the last field if (offset == endIndex - 1) { LOG.error("Reached end of array while looking for separator"); throw new IllegalArgumentException("Reached end of array while looking for separator"); } else { values[valueIndex] = Bytes.toString(source, startIndex, offset - startIndex); return offset - startIndex; } } default: LOG.error("Unsupported column type: {}", type); throw new IllegalArgumentException("Unsupported column type: " + type); } }
Example 14
Source File: BackupSystemTable.java From hbase with Apache License 2.0 | 4 votes |
/** * Get table name from rowkey * @param cloneRow rowkey * @return table name */ private String getTableNameForReadLogTimestampMap(byte[] cloneRow) { String s = Bytes.toString(cloneRow); int index = s.lastIndexOf(NULL); return s.substring(index + 1); }
Example 15
Source File: ObserverAggregators.java From Kylin with Apache License 2.0 | 4 votes |
@Override public String toString() { return "HCol [bFamily=" + Bytes.toString(family) + ", bQualifier=" + Bytes.toString(qualifier) + ", nMeasures=" + nMeasures + "]"; }
Example 16
Source File: TestHFileOutputFormat2.java From hbase with Apache License 2.0 | 4 votes |
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testExcludeMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()){ Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs = util.getDFSCluster().getFileSystem(); Table table = util.createTable(TABLE_NAMES[0], FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir final Path storePath = new Path( CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), new Path(admin.getRegions(TABLE_NAMES[0]).get(0).getEncodedName(), Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // put some data in it and flush to create a storefile Put p = new Put(Bytes.toBytes("test")); p.addColumn(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1")); table.put(p); admin.flush(TABLE_NAMES[0]); assertEquals(1, util.countRows(table)); quickPoll(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } }, 5000); // Generate a bulk load file with more rows conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]); runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table.getDescriptor(), regionLocator)), testDir, false); // Perform the actual load BulkLoadHFiles.create(conf).bulkLoad(table.getName(), testDir); // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals("BulkLoadHFiles should put expected data in table", expectedRows + 1, util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); // minor compactions shouldn't get rid of the file admin.compact(TABLE_NAMES[0]); try { quickPoll(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } }, 5000); throw new IOException("SF# = " + fs.listStatus(storePath).length); } catch (AssertionError ae) { // this is expected behavior } // a major compaction should work though admin.majorCompact(TABLE_NAMES[0]); quickPoll(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } }, 5000); } finally { util.shutdownMiniCluster(); } }
Example 17
Source File: LookupBytesTable.java From Kylin with Apache License 2.0 | 4 votes |
@Override protected String toString(ByteArray cell) { return Bytes.toString(cell.data); }
Example 18
Source File: QueryEndpoint.java From yuzhouwan with Apache License 2.0 | 4 votes |
@Override public void queryByRowKey(RpcController controller, DataProtos.DataQueryRequest request, RpcCallback<DataQueryResponse> done) { DataProtos.DataQueryResponse response = null; try { String rowKey = request.getRowKey(); String regionStartKey = Bytes.toString(this.env.getRegion().getRegionInfo().getStartKey()); if (request.getIsSalting()) { // 如果加盐过则在key前添加盐值 String startSalt = null; if (null != regionStartKey && !regionStartKey.isEmpty()) { startSalt = regionStartKey.split("_")[0]; // 加盐的方式为盐值+"_",所以取_前面的 } if (null != startSalt) { if (null != rowKey) { rowKey = startSalt + "_" + rowKey; } } } if (StrUtils.isEmpty(rowKey)) return; Get get = new Get(Bytes.toBytes(rowKey)); Result result = this.env.getRegion().get(get); DataProtos.DataQueryResponse.Builder responseBuilder = DataProtos.DataQueryResponse.newBuilder(); DataProtos.DataQueryResponse.Row.Builder rowBuilder = DataProtos.DataQueryResponse.Row.newBuilder(); if (result != null && !result.isEmpty()) { List<KeyValue> list = result.list(); if (null != list && !list.isEmpty()) { rowBuilder.setRowKey(ByteString.copyFrom(list.get(0).getRow())); for (KeyValue kv : list) { queryBuilder(rowBuilder, ByteString.copyFrom(kv.getFamily()), ByteString.copyFrom(kv.getQualifier()), ByteString.copyFrom(kv.getRow()), ByteString.copyFrom(kv.getValue())); } } } responseBuilder.addRowList(rowBuilder); response = responseBuilder.build(); } catch (IOException ignored) { ResponseConverter.setControllerException(controller, ignored); } done.run(response); }
Example 19
Source File: AggregateRegionObserverTest.java From Kylin with Apache License 2.0 | 4 votes |
@Test public void test() throws IOException { CoprocessorRowType rowType = newRowType(); CoprocessorProjector projector = new CoprocessorProjector(mask); ObserverAggregators aggregators = new ObserverAggregators(new HCol[] { c1, c2 }); CoprocessorFilter filter = CoprocessorFilter.deserialize(null); // a default, // always-true, // filter HashSet<String> expectedResult = new HashSet<String>(); expectedResult.add("\\x02\\x02\\x00\\x00, f:q1, [26.0, 7]"); expectedResult.add("\\x02\\x02\\x00\\x00, f:q2, [48.0]"); expectedResult.add("\\x01\\x01\\x00\\x00, f:q1, [22.0, 3]"); expectedResult.add("\\x01\\x01\\x00\\x00, f:q2, [44.0]"); MockupRegionScanner innerScanner = new MockupRegionScanner(cellsInput); RegionScanner aggrScanner = new AggregationScanner(rowType, filter, projector, aggregators, innerScanner); ArrayList<Cell> result = Lists.newArrayList(); boolean hasMore = true; while (hasMore) { result.clear(); hasMore = aggrScanner.next(result); if (result.isEmpty()) continue; Cell cell = result.get(0); HCol hcol = null; if (ObserverAggregators.match(c1, cell)) { hcol = c1; } else if (ObserverAggregators.match(c2, cell)) { hcol = c2; } else fail(); hcol.measureCodec.decode(ByteBuffer.wrap(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()), hcol.measureValues); String rowKey = toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), mask); String col = Bytes.toString(hcol.family) + ":" + Bytes.toString(hcol.qualifier); String values = Arrays.toString(hcol.measureValues); System.out.println(rowKey); System.out.println(col); System.out.println(values); assertTrue(expectedResult.contains(rowKey + ", " + col + ", " + values)); } aggrScanner.close(); }
Example 20
Source File: AggregateProtocolEndPoint.java From Eagle with Apache License 2.0 | 4 votes |
/** * Asynchronous HBase scan read as RAW qualifier * * @param scan * @param listener * @throws Exception */ protected InternalReadReport asyncStreamRead(EntityDefinition ed, Scan scan, QualifierCreationListener listener) throws IOException { // _init(); long counter = 0; long startTimestamp = 0; long stopTimestamp = 0; InternalScanner scanner = this.getCurrentRegion().getScanner(scan); List<Cell> results = new ArrayList<Cell>(); try{ boolean hasMoreRows;//false by default do{ hasMoreRows = scanner.next(results); Map<String, byte[]> kvMap = new HashMap<String, byte[]>(); if(!results.isEmpty()){ counter ++; byte[] row = results.get(0).getRow(); // if(ed.isTimeSeries()){ long timestamp = RowkeyBuilder.getTimestamp(row,ed); // Min if(startTimestamp == 0 || startTimestamp > timestamp ){ startTimestamp = timestamp; } // Max if(stopTimestamp == 0 || stopTimestamp < timestamp ){ stopTimestamp = timestamp; } // } for(Cell kv:results){ String qualifierName = Bytes.toString(kv.getQualifier()); Qualifier qualifier = null; if(!ed.isTag(qualifierName)){ qualifier = ed.getQualifierNameMap().get(qualifierName); if(qualifier == null){ LOG.error("qualifier for field " + qualifierName + " not exist"); throw new IOException(new NullPointerException("qualifier for field "+qualifierName+" is null")); } qualifierName = qualifier.getDisplayName(); } if(kv.getValue()!=null) kvMap.put(qualifierName,kv.getValue()); } // LOG.info("DEBUG: timestamp="+timestamp+", keys=["+StringUtils.join(kvMap.keySet(),",")+"]"); if(!kvMap.isEmpty()) listener.qualifierCreated(kvMap); results.clear(); }else{ if(LOG.isDebugEnabled()) LOG.warn("Empty batch of KeyValue"); } } while(hasMoreRows); } catch(IOException ex){ LOG.error(ex.getMessage(),ex); throw ex; } finally { if(scanner != null) { scanner.close(); } } return new InternalReadReport(counter,startTimestamp,stopTimestamp); }