org.jooq.Record3 Java Examples
The following examples show how to use
org.jooq.Record3.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KeysetPaginationTest.java From high-performance-java-persistence with Apache License 2.0 | 6 votes |
public List<PostSummary> nextPage(int pageSize, PostSummary offsetPostSummary) { return doInJOOQ(sql -> { SelectSeekStep2<Record3<Long, String, Timestamp>, Timestamp, Long> selectStep = sql .select(POST.ID, POST.TITLE, POST_DETAILS.CREATED_ON) .from(POST) .join(POST_DETAILS).on(POST.ID.eq(POST_DETAILS.ID)) .orderBy(POST_DETAILS.CREATED_ON.desc(), POST.ID.desc()); return (offsetPostSummary != null) ? selectStep .seek(offsetPostSummary.getCreatedOn(), offsetPostSummary.getId()) .limit(pageSize) .fetchInto(PostSummary.class) : selectStep .limit(pageSize) .fetchInto(PostSummary.class); }); }
Example #2
Source File: KeysetPaginationTest.java From high-performance-java-persistence with Apache License 2.0 | 6 votes |
public List<PostSummary> nextPage(int pageSize, PostSummary offsetPostSummary) { return doInJOOQ(sql -> { SelectSeekStep2<Record3<Long, String, Timestamp>, Timestamp, Long> selectStep = sql .select(POST.ID, POST.TITLE, POST_DETAILS.CREATED_ON) .from(POST) .join(POST_DETAILS).using(POST.ID) .orderBy(POST_DETAILS.CREATED_ON.desc(), POST.ID.desc()); return (offsetPostSummary != null) ? selectStep .seek(offsetPostSummary.getCreatedOn(), offsetPostSummary.getId()) .limit(pageSize) .fetchInto(PostSummary.class) : selectStep .limit(pageSize) .fetchInto(PostSummary.class); }); }
Example #3
Source File: KeysetPaginationTest.java From high-performance-java-persistence with Apache License 2.0 | 6 votes |
public List<PostSummary> nextPage(int pageSize, PostSummary offsetPostSummary) { return doInJOOQ(sql -> { SelectSeekStep2<Record3<BigInteger, String, Timestamp>, Timestamp, BigInteger> selectStep = sql .select(POST.ID, POST.TITLE, POST_DETAILS.CREATED_ON) .from(POST) .join(POST_DETAILS).on(POST.ID.eq(POST_DETAILS.ID)) .orderBy(POST_DETAILS.CREATED_ON.desc(), POST.ID.desc()); return (offsetPostSummary != null) ? selectStep .seek(offsetPostSummary.getCreatedOn(), BigInteger.valueOf(offsetPostSummary.getId())) .limit(pageSize) .fetchInto(PostSummary.class) : selectStep .limit(pageSize) .fetchInto(PostSummary.class); }); }
Example #4
Source File: KeysetPaginationFailTest.java From high-performance-java-persistence with Apache License 2.0 | 6 votes |
public List<PostSummary> nextPage(int pageSize, PostSummary offsetPostSummary) { return doInJOOQ(sql -> { SelectSeekStep2<Record3<BigInteger, String, Timestamp>, Timestamp, BigInteger> selectStep = sql .select(POST.ID, POST.TITLE, POST_DETAILS.CREATED_ON) .from(POST) .join(POST_DETAILS).using(POST.ID) .orderBy(POST_DETAILS.CREATED_ON.desc(), POST.ID.desc()); return (offsetPostSummary != null) ? selectStep .seek(offsetPostSummary.getCreatedOn(), BigInteger.valueOf(offsetPostSummary.getId())) .limit(pageSize) .fetchInto(PostSummary.class) : selectStep .limit(pageSize) .fetchInto(PostSummary.class); }); }
Example #5
Source File: KeysetPaginationTest.java From high-performance-java-persistence with Apache License 2.0 | 6 votes |
public List<PostSummary> nextPage(int pageSize, PostSummary offsetPostSummary) { return doInJOOQ(sql -> { SelectSeekStep2<Record3<Long, String, Timestamp>, Timestamp, Long> selectStep = sql .select(POST.ID, POST.TITLE, POST_DETAILS.CREATED_ON) .from(POST) .join(POST_DETAILS).on(POST.ID.eq(POST_DETAILS.ID)) .orderBy(POST_DETAILS.CREATED_ON.desc(), POST.ID.desc()); return (offsetPostSummary != null) ? selectStep .seek(offsetPostSummary.getCreatedOn(), offsetPostSummary.getId()) .limit(pageSize) .fetchInto(PostSummary.class) : selectStep .limit(pageSize) .fetchInto(PostSummary.class); }); }
Example #6
Source File: SpringBootIntegrationTest.java From tutorials with MIT License | 5 votes |
@Test public void givenValidData_whenDeleting_thenSucceed() { dsl.delete(AUTHOR) .where(AUTHOR.ID.lt(3)) .execute(); final Result<Record3<Integer, String, String>> result = dsl.select(AUTHOR.ID, AUTHOR.FIRST_NAME, AUTHOR.LAST_NAME) .from(AUTHOR).fetch(); assertEquals(1, result.size()); assertEquals("Bryan", result.getValue(0, AUTHOR.FIRST_NAME)); assertEquals("Basham", result.getValue(0, AUTHOR.LAST_NAME)); }
Example #7
Source File: FileSizeCountTask.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Populate DB with the counts of file sizes calculated * using the dao. * */ private void writeCountsToDB(boolean isDbTruncated, Map<FileSizeCountKey, Long> fileSizeCountMap) { fileSizeCountMap.keySet().forEach((FileSizeCountKey key) -> { FileCountBySize newRecord = new FileCountBySize(); newRecord.setVolume(key.volume); newRecord.setBucket(key.bucket); newRecord.setFileSize(key.fileSizeUpperBound); newRecord.setCount(fileSizeCountMap.get(key)); if (!isDbTruncated) { // Get the current count from database and update Record3<String, String, Long> recordToFind = dslContext.newRecord( FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE) .value1(key.volume) .value2(key.bucket) .value3(key.fileSizeUpperBound); FileCountBySize fileCountRecord = fileCountBySizeDao.findById(recordToFind); if (fileCountRecord == null && newRecord.getCount() > 0L) { // insert new row only for non-zero counts. fileCountBySizeDao.insert(newRecord); } else if (fileCountRecord != null) { newRecord.setCount(fileCountRecord.getCount() + fileSizeCountMap.get(key)); fileCountBySizeDao.update(newRecord); } } else if (newRecord.getCount() > 0) { // insert new row only for non-zero counts. fileCountBySizeDao.insert(newRecord); } }); }
Example #8
Source File: SpringBootIntegrationTest.java From tutorials with MIT License | 5 votes |
@Test public void givenValidData_whenUpdating_thenSucceed() { dsl.update(AUTHOR) .set(AUTHOR.LAST_NAME, "Baeldung") .where(AUTHOR.ID.equal(3)) .execute(); dsl.update(BOOK) .set(BOOK.TITLE, "Building your REST API with Spring") .where(BOOK.ID.equal(3)) .execute(); dsl.insertInto(AUTHOR_BOOK) .set(AUTHOR_BOOK.AUTHOR_ID, 3) .set(AUTHOR_BOOK.BOOK_ID, 3) .execute(); final Result<Record3<Integer, String, String>> result = dsl.select(AUTHOR.ID, AUTHOR.LAST_NAME, BOOK.TITLE) .from(AUTHOR).join(AUTHOR_BOOK).on(AUTHOR.ID.equal(AUTHOR_BOOK.AUTHOR_ID)) .join(BOOK).on(AUTHOR_BOOK.BOOK_ID.equal(BOOK.ID)) .where(AUTHOR.ID.equal(3)) .fetch(); assertEquals(1, result.size()); assertEquals(Integer.valueOf(3), result.getValue(0, AUTHOR.ID)); assertEquals("Baeldung", result.getValue(0, AUTHOR.LAST_NAME)); assertEquals("Building your REST API with Spring", result.getValue(0, BOOK.TITLE)); }
Example #9
Source File: SpringBootIntegrationTest.java From tutorials with MIT License | 5 votes |
@Test public void givenValidData_whenInserting_thenSucceed() { dsl.insertInto(AUTHOR) .set(AUTHOR.ID, 4) .set(AUTHOR.FIRST_NAME, "Herbert") .set(AUTHOR.LAST_NAME, "Schildt") .execute(); dsl.insertInto(BOOK) .set(BOOK.ID, 4) .set(BOOK.TITLE, "A Beginner's Guide") .execute(); dsl.insertInto(AUTHOR_BOOK) .set(AUTHOR_BOOK.AUTHOR_ID, 4) .set(AUTHOR_BOOK.BOOK_ID, 4) .execute(); final Result<Record3<Integer, String, Integer>> result = dsl.select(AUTHOR.ID, AUTHOR.LAST_NAME, DSL.count()) .from(AUTHOR).join(AUTHOR_BOOK).on(AUTHOR.ID.equal(AUTHOR_BOOK.AUTHOR_ID)) .join(BOOK).on(AUTHOR_BOOK.BOOK_ID.equal(BOOK.ID)) .groupBy(AUTHOR.LAST_NAME) .orderBy(AUTHOR.LAST_NAME.desc()) .fetch(); assertEquals(3, result.size()); assertEquals("Sierra", result.getValue(0, AUTHOR.LAST_NAME)); assertEquals(Integer.valueOf(2), result.getValue(0, DSL.count())); assertEquals("Bates", result.getValue(2, AUTHOR.LAST_NAME)); assertEquals(Integer.valueOf(1), result.getValue(2, DSL.count())); }
Example #10
Source File: QueryIntegrationTest.java From tutorials with MIT License | 5 votes |
@Test public void givenValidData_whenDeleting_thenSucceed() { dsl.delete(AUTHOR) .where(AUTHOR.ID.lt(3)) .execute(); final Result<Record3<Integer, String, String>> result = dsl.select(AUTHOR.ID, AUTHOR.FIRST_NAME, AUTHOR.LAST_NAME) .from(AUTHOR) .fetch(); assertEquals(1, result.size()); assertEquals("Bryan", result.getValue(0, AUTHOR.FIRST_NAME)); assertEquals("Basham", result.getValue(0, AUTHOR.LAST_NAME)); }
Example #11
Source File: QueryIntegrationTest.java From tutorials with MIT License | 5 votes |
@Test public void givenValidData_whenUpdating_thenSucceed() { dsl.update(AUTHOR) .set(AUTHOR.LAST_NAME, "Baeldung") .where(AUTHOR.ID.equal(3)) .execute(); dsl.update(BOOK) .set(BOOK.TITLE, "Building your REST API with Spring") .where(BOOK.ID.equal(3)).execute(); dsl.insertInto(AUTHOR_BOOK) .set(AUTHOR_BOOK.AUTHOR_ID, 3) .set(AUTHOR_BOOK.BOOK_ID, 3) .execute(); final Result<Record3<Integer, String, String>> result = dsl.select(AUTHOR.ID, AUTHOR.LAST_NAME, BOOK.TITLE) .from(AUTHOR) .join(AUTHOR_BOOK).on(AUTHOR.ID.equal(AUTHOR_BOOK.AUTHOR_ID)) .join(BOOK).on(AUTHOR_BOOK.BOOK_ID.equal(BOOK.ID)) .where(AUTHOR.ID.equal(3)) .fetch(); assertEquals(1, result.size()); assertEquals(Integer.valueOf(3), result.getValue(0, AUTHOR.ID)); assertEquals("Baeldung", result.getValue(0, AUTHOR.LAST_NAME)); assertEquals("Building your REST API with Spring", result.getValue(0, BOOK.TITLE)); }
Example #12
Source File: QueryIntegrationTest.java From tutorials with MIT License | 5 votes |
@Test public void givenValidData_whenInserting_thenSucceed() { dsl.insertInto(AUTHOR) .set(AUTHOR.ID, 4) .set(AUTHOR.FIRST_NAME, "Herbert") .set(AUTHOR.LAST_NAME, "Schildt") .execute(); dsl.insertInto(BOOK) .set(BOOK.ID, 4) .set(BOOK.TITLE, "A Beginner's Guide") .execute(); dsl.insertInto(AUTHOR_BOOK) .set(AUTHOR_BOOK.AUTHOR_ID, 4) .set(AUTHOR_BOOK.BOOK_ID, 4) .execute(); final Result<Record3<Integer, String, Integer>> result = dsl.select(AUTHOR.ID, AUTHOR.LAST_NAME, DSL.count()) .from(AUTHOR) .join(AUTHOR_BOOK).on(AUTHOR.ID.equal(AUTHOR_BOOK.AUTHOR_ID)) .join(BOOK).on(AUTHOR_BOOK.BOOK_ID.equal(BOOK.ID)) .groupBy(AUTHOR.LAST_NAME) .orderBy(AUTHOR.LAST_NAME.desc()) .fetch(); assertEquals(3, result.size()); assertEquals("Sierra", result.getValue(0, AUTHOR.LAST_NAME)); assertEquals(Integer.valueOf(2), result.getValue(0, DSL.count())); assertEquals("Bates", result.getValue(2, AUTHOR.LAST_NAME)); assertEquals(Integer.valueOf(1), result.getValue(2, DSL.count())); }
Example #13
Source File: UtilizationEndpoint.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Return the file counts from Recon DB. * @return {@link Response} */ @GET @Path("/fileCount") public Response getFileCounts( @QueryParam(RECON_QUERY_VOLUME) String volume, @QueryParam(RECON_QUERY_BUCKET) String bucket, @QueryParam(RECON_QUERY_FILE_SIZE) long fileSize ) { DSLContext dslContext = utilizationSchemaDefinition.getDSLContext(); List<FileCountBySize> resultSet; if (volume != null && bucket != null && fileSize > 0) { Record3<String, String, Long> recordToFind = dslContext .newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE) .value1(volume) .value2(bucket) .value3(fileSize); FileCountBySize record = fileCountBySizeDao.findById(recordToFind); resultSet = record != null ? Collections.singletonList(record) : Collections.emptyList(); } else if (volume != null && bucket != null) { resultSet = dslContext.select().from(FILE_COUNT_BY_SIZE) .where(FILE_COUNT_BY_SIZE.VOLUME.eq(volume)) .and(FILE_COUNT_BY_SIZE.BUCKET.eq(bucket)) .fetchInto(FileCountBySize.class); } else if (volume != null) { resultSet = fileCountBySizeDao.fetchByVolume(volume); } else { // fetch all records resultSet = fileCountBySizeDao.findAll(); } return Response.ok(resultSet).build(); }
Example #14
Source File: ExonTranscriptRecord.java From hmftools with GNU General Public License v3.0 | 4 votes |
/** * {@inheritDoc} */ @Override public Record3<UInteger, UInteger, Integer> key() { return (Record3) super.key(); }
Example #15
Source File: DjReleaseRevLabelRecord.java From oneops with Apache License 2.0 | 4 votes |
/** * {@inheritDoc} */ @Override public Record3<Long, Short, Long> key() { return (Record3) super.key(); }
Example #16
Source File: TestUtilizationSchemaDefinition.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testFileCountBySizeCRUDOperations() throws SQLException { Connection connection = getConnection(); DatabaseMetaData metaData = connection.getMetaData(); ResultSet resultSet = metaData.getTables(null, null, FILE_COUNT_BY_SIZE_TABLE_NAME, null); while (resultSet.next()) { Assert.assertEquals(FILE_COUNT_BY_SIZE_TABLE_NAME, resultSet.getString("TABLE_NAME")); } FileCountBySizeDao fileCountBySizeDao = getDao(FileCountBySizeDao.class); UtilizationSchemaDefinition utilizationSchemaDefinition = getSchemaDefinition(UtilizationSchemaDefinition.class); FileCountBySize newRecord = new FileCountBySize(); newRecord.setVolume("vol1"); newRecord.setBucket("bucket1"); newRecord.setFileSize(1024L); newRecord.setCount(1L); fileCountBySizeDao.insert(newRecord); Record3<String, String, Long> recordToFind = utilizationSchemaDefinition .getDSLContext().newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE) .value1("vol1") .value2("bucket1") .value3(1024L); FileCountBySize dbRecord = fileCountBySizeDao.findById(recordToFind); assertEquals(Long.valueOf(1), dbRecord.getCount()); dbRecord.setCount(2L); fileCountBySizeDao.update(dbRecord); dbRecord = fileCountBySizeDao.findById(recordToFind); assertEquals(Long.valueOf(2), dbRecord.getCount()); Table<FileCountBySizeRecord> fileCountBySizeRecordTable = fileCountBySizeDao.getTable(); List<UniqueKey<FileCountBySizeRecord>> tableKeys = fileCountBySizeRecordTable.getKeys(); for (UniqueKey key : tableKeys) { String name = key.getName(); } }
Example #17
Source File: TestFileSizeCountTask.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testReprocessAtScale() throws IOException { // generate mocks for 2 volumes, 500 buckets each volume // and 42 keys in each bucket. List<OmKeyInfo> omKeyInfoList = new ArrayList<>(); List<Boolean> hasNextAnswer = new ArrayList<>(); for (int volIndex = 1; volIndex <= 2; volIndex++) { for (int bktIndex = 1; bktIndex <= 500; bktIndex++) { for (int keyIndex = 1; keyIndex <= 42; keyIndex++) { OmKeyInfo omKeyInfo = mock(OmKeyInfo.class); given(omKeyInfo.getKeyName()).willReturn("key" + keyIndex); given(omKeyInfo.getVolumeName()).willReturn("vol" + volIndex); given(omKeyInfo.getBucketName()).willReturn("bucket" + bktIndex); // Place keys in each bin long fileSize = (long)Math.pow(2, keyIndex + 9) - 1L; given(omKeyInfo.getDataSize()).willReturn(fileSize); omKeyInfoList.add(omKeyInfo); hasNextAnswer.add(true); } } } hasNextAnswer.add(false); OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class); TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable .TypedTableIterator.class); TypedTable.TypedKeyValue mockKeyValue = mock( TypedTable.TypedKeyValue.class); when(keyTable.iterator()).thenReturn(mockKeyIter); when(omMetadataManager.getKeyTable()).thenReturn(keyTable); when(mockKeyIter.hasNext()) .thenAnswer(AdditionalAnswers.returnsElementsOf(hasNextAnswer)); when(mockKeyIter.next()).thenReturn(mockKeyValue); when(mockKeyValue.getValue()) .thenAnswer(AdditionalAnswers.returnsElementsOf(omKeyInfoList)); Pair<String, Boolean> result = fileSizeCountTask.reprocess(omMetadataManager); assertTrue(result.getRight()); // 2 volumes * 500 buckets * 42 bins = 42000 rows assertEquals(42000, fileCountBySizeDao.count()); Record3<String, String, Long> recordToFind = dslContext .newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE) .value1("vol1") .value2("bucket1") .value3(1024L); assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue()); // file size upper bound for 100000L is 131072L (next highest power of 2) recordToFind.value1("vol1"); recordToFind.value3(131072L); assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue()); recordToFind.value2("bucket500"); recordToFind.value3(Long.MAX_VALUE); assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue()); }
Example #18
Source File: TestFileSizeCountTask.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testProcess() { // Write 2 keys. OmKeyInfo toBeDeletedKey = mock(OmKeyInfo.class); given(toBeDeletedKey.getVolumeName()).willReturn("vol1"); given(toBeDeletedKey.getBucketName()).willReturn("bucket1"); given(toBeDeletedKey.getKeyName()).willReturn("deletedKey"); given(toBeDeletedKey.getDataSize()).willReturn(2000L); // Bin 1 OMDBUpdateEvent event = new OMUpdateEventBuilder() .setAction(PUT) .setKey("deletedKey") .setValue(toBeDeletedKey) .build(); OmKeyInfo toBeUpdatedKey = mock(OmKeyInfo.class); given(toBeUpdatedKey.getVolumeName()).willReturn("vol1"); given(toBeUpdatedKey.getBucketName()).willReturn("bucket1"); given(toBeUpdatedKey.getKeyName()).willReturn("updatedKey"); given(toBeUpdatedKey.getDataSize()).willReturn(10000L); // Bin 4 OMDBUpdateEvent event2 = new OMUpdateEventBuilder() .setAction(PUT) .setKey("updatedKey") .setValue(toBeUpdatedKey) .build(); OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(Arrays.asList(event, event2)); fileSizeCountTask.process(omUpdateEventBatch); // Verify 2 keys are in correct bins. assertEquals(2, fileCountBySizeDao.count()); Record3<String, String, Long> recordToFind = dslContext .newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE) .value1("vol1") .value2("bucket1") .value3(2048L); assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue()); // file size upper bound for 10000L is 16384L (next highest power of 2) recordToFind.value3(16384L); assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue()); // Add new key. OmKeyInfo newKey = mock(OmKeyInfo.class); given(newKey.getVolumeName()).willReturn("vol1"); given(newKey.getBucketName()).willReturn("bucket1"); given(newKey.getKeyName()).willReturn("newKey"); given(newKey.getDataSize()).willReturn(1000L); // Bin 0 OMDBUpdateEvent putEvent = new OMUpdateEventBuilder() .setAction(PUT) .setKey("newKey") .setValue(newKey) .build(); // Update existing key. OmKeyInfo updatedKey = mock(OmKeyInfo.class); given(updatedKey.getVolumeName()).willReturn("vol1"); given(updatedKey.getBucketName()).willReturn("bucket1"); given(updatedKey.getKeyName()).willReturn("updatedKey"); given(updatedKey.getDataSize()).willReturn(50000L); // Bin 6 OMDBUpdateEvent updateEvent = new OMUpdateEventBuilder() .setAction(UPDATE) .setKey("updatedKey") .setValue(updatedKey) .setOldValue(toBeUpdatedKey) .build(); // Delete another existing key. OMDBUpdateEvent deleteEvent = new OMUpdateEventBuilder() .setAction(DELETE) .setKey("deletedKey") .setValue(toBeDeletedKey) .build(); omUpdateEventBatch = new OMUpdateEventBatch( Arrays.asList(updateEvent, putEvent, deleteEvent)); fileSizeCountTask.process(omUpdateEventBatch); assertEquals(4, fileCountBySizeDao.count()); recordToFind.value3(1024L); assertEquals(1, fileCountBySizeDao.findById(recordToFind) .getCount().longValue()); recordToFind.value3(2048L); assertEquals(0, fileCountBySizeDao.findById(recordToFind) .getCount().longValue()); recordToFind.value3(16384L); assertEquals(0, fileCountBySizeDao.findById(recordToFind) .getCount().longValue()); recordToFind.value3(65536L); assertEquals(1, fileCountBySizeDao.findById(recordToFind) .getCount().longValue()); }
Example #19
Source File: TestFileSizeCountTask.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testReprocess() throws IOException { OmKeyInfo omKeyInfo1 = mock(OmKeyInfo.class); given(omKeyInfo1.getKeyName()).willReturn("key1"); given(omKeyInfo1.getVolumeName()).willReturn("vol1"); given(omKeyInfo1.getBucketName()).willReturn("bucket1"); given(omKeyInfo1.getDataSize()).willReturn(1000L); OmKeyInfo omKeyInfo2 = mock(OmKeyInfo.class); given(omKeyInfo2.getKeyName()).willReturn("key2"); given(omKeyInfo2.getVolumeName()).willReturn("vol1"); given(omKeyInfo2.getBucketName()).willReturn("bucket1"); given(omKeyInfo2.getDataSize()).willReturn(100000L); OmKeyInfo omKeyInfo3 = mock(OmKeyInfo.class); given(omKeyInfo3.getKeyName()).willReturn("key3"); given(omKeyInfo3.getVolumeName()).willReturn("vol1"); given(omKeyInfo3.getBucketName()).willReturn("bucket1"); given(omKeyInfo3.getDataSize()).willReturn(1125899906842624L * 4); // 4PB OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class); TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable .TypedTableIterator.class); TypedTable.TypedKeyValue mockKeyValue = mock( TypedTable.TypedKeyValue.class); when(keyTable.iterator()).thenReturn(mockKeyIter); when(omMetadataManager.getKeyTable()).thenReturn(keyTable); when(mockKeyIter.hasNext()) .thenReturn(true) .thenReturn(true) .thenReturn(true) .thenReturn(false); when(mockKeyIter.next()).thenReturn(mockKeyValue); when(mockKeyValue.getValue()) .thenReturn(omKeyInfo1) .thenReturn(omKeyInfo2) .thenReturn(omKeyInfo3); Pair<String, Boolean> result = fileSizeCountTask.reprocess(omMetadataManager); assertTrue(result.getRight()); assertEquals(3, fileCountBySizeDao.count()); Record3<String, String, Long> recordToFind = dslContext .newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE) .value1("vol1") .value2("bucket1") .value3(1024L); assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue()); // file size upper bound for 100000L is 131072L (next highest power of 2) recordToFind.value3(131072L); assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue()); // file size upper bound for 4PB is Long.MAX_VALUE recordToFind.value3(Long.MAX_VALUE); assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue()); }