Java Code Examples for com.jayway.restassured.response.Response#print()

The following examples show how to use com.jayway.restassured.response.Response#print() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: EventStreamReadingAT.java    From nakadi with MIT License 6 votes vote down vote up
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void whenReachKeepAliveLimitThenStreamIsClosed() {
    // ACT //
    final int keepAliveLimit = 3;
    final Response response = RestAssured.given()
            .param("batch_flush_timeout", "1")
            .param("stream_keep_alive_limit", keepAliveLimit)
            .when()
            .get(streamEndpoint);

    // ASSERT //
    response.then().statusCode(HttpStatus.OK.value()).header(HttpHeaders.TRANSFER_ENCODING, "chunked");

    final String body = response.print();
    final List<Map<String, Object>> batches = deserializeBatches(body);

    // validate amount of batches and structure of each batch
    Assert.assertThat(batches, Matchers.hasSize(PARTITIONS_NUM * keepAliveLimit));
    batches.forEach(batch -> validateBatchStructure(batch, null));
}
 
Example 2
Source File: NakadiTestUtils.java    From nakadi with MIT License 5 votes vote down vote up
public static List<Map> listTimelines(final String eventType) throws IOException {
    final Response response = RestAssured.given()
            .accept(ContentType.JSON)
            .get(format("/event-types/{0}/timelines", eventType));
    final String data = response.print();
    final TypeReference<List<Map>> typeReference = new TypeReference<List<Map>>() {
    };
    return MAPPER.readValue(data, typeReference);
}
 
Example 3
Source File: NakadiTestUtils.java    From nakadi with MIT License 5 votes vote down vote up
public static void deleteTimeline(final String eventType) throws IOException {
    final Response response = RestAssured.given()
            .accept(ContentType.JSON)
            .get(format("/event-types/{0}/timelines", eventType));
    final String data = response.print();
    final TimelineView[] timelines = MAPPER.readerFor(TimelineView[].class).readValue(data);
    Assert.assertEquals(1, timelines.length);
    RestAssured.given()
            .delete(format("/event-types/{0}/timelines/{1}", eventType, timelines[0].getId().toString()))
            .then()
            .statusCode(HttpStatus.SC_OK);
}
 
Example 4
Source File: EventStreamReadingAT.java    From nakadi with MIT License 5 votes vote down vote up
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void whenReadFromTheEndThenLatestOffsetsInStream() {

    // ACT //
    // just stream without X-nakadi-cursors; that should make nakadi to read from the very end
    final Response response = RestAssured.given()
            .param("stream_timeout", "2")
            .param("batch_flush_timeout", "2")
            .when()
            .get(streamEndpoint);

    // ASSERT //
    response.then().statusCode(HttpStatus.OK.value()).header(HttpHeaders.TRANSFER_ENCODING, "chunked");

    final String body = response.print();
    final List<Map<String, Object>> batches = deserializeBatches(body);

    // validate amount of batches and structure of each batch
    Assert.assertThat(batches, Matchers.hasSize(PARTITIONS_NUM));
    batches.forEach(batch -> validateBatchStructure(batch, DUMMY_EVENT));

    // validate that the latest offsets in batches correspond to the newest offsets
    final Set<Cursor> offsets = batches
            .stream()
            .map(batch -> {
                final Map<String, String> cursor = (Map<String, String>) batch.get("cursor");
                return new Cursor(cursor.get("partition"), cursor.get("offset"));
            })
            .collect(Collectors.toSet());
    Assert.assertThat(offsets, Matchers.equalTo(Sets.newHashSet(initialCursors)));
}
 
Example 5
Source File: APIClientTest.java    From data-prep with Apache License 2.0 5 votes vote down vote up
public DataSetMetadata getPrepMetadata(String preparationId) throws IOException {
    DataSetMetadata metadata;

    // when
    Response transformedResponse = given().when().get("/api/preparations/{id}/metadata", preparationId);

    HttpStatus responseStatus = HttpStatus.valueOf(transformedResponse.getStatusCode());
    if (ACCEPTED.equals(responseStatus)) {
        // first time we have a 202 with a Location to see asynchronous method status
        final String asyncMethodStatusUrl = transformedResponse.getHeader("Location");

        waitForAsyncMethodToFinishWithSuccess(asyncMethodStatusUrl);

        Response response = given() //
                .when() //
                .expect() //
                .statusCode(200) //
                .log() //
                .ifError() //
                .get("/api/preparations/{id}/metadata", preparationId);
        metadata = mapper.readValue(response.asInputStream(), DataSetMetadata.class);
    } else if (OK.equals(responseStatus)) {
        metadata = mapper.readValue(transformedResponse.asInputStream(), DataSetMetadata.class);
    } else {
        throw new RuntimeException(
                "Could not get preparation metadata. Response was: " + transformedResponse.print());
    }
    return metadata;
}
 
Example 6
Source File: EventStreamReadingAT.java    From nakadi with MIT License 4 votes vote down vote up
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void whenPushFewEventsAndReadThenGetEventsInStream()
        throws ExecutionException, InterruptedException {

    // ARRANGE //
    // push events to one of the partitions
    final int eventsPushed = 2;
    kafkaHelper.writeMultipleMessageToPartition(TEST_PARTITION, topicName, DUMMY_EVENT, eventsPushed);

    // ACT //
    final Response response = readEvents();

    // ASSERT //
    response.then().statusCode(HttpStatus.OK.value()).header(HttpHeaders.TRANSFER_ENCODING, "chunked");

    final String body = response.print();
    final List<Map<String, Object>> batches = deserializeBatches(body);

    // validate amount of batches and structure of each batch
    Assert.assertThat(batches, Matchers.hasSize(PARTITIONS_NUM));
    batches.forEach(batch -> validateBatchStructure(batch, DUMMY_EVENT));

    // find the batch where we expect to see the messages we pushed
    final Map<String, Object> batchToCheck = batches
            .stream()
            .filter(isForPartition(TEST_PARTITION))
            .findFirst()
            .orElseThrow(() -> new AssertionError("Failed to find a partition in a stream"));

    // calculate the offset we expect to see in this batch in a stream
    final Cursor partitionCursor = kafkaInitialNextOffsets.stream()
            .filter(cursor -> TEST_PARTITION.equals(cursor.getPartition()))
            .findFirst()
            .orElseThrow(() -> new AssertionError("Failed to find cursor for needed partition"));
    final String expectedOffset = TestUtils.toTimelineOffset(Long.parseLong(partitionCursor.getOffset()) - 1 +
            eventsPushed);

    // check that batch has offset, partition and events number we expect
    validateBatch(batchToCheck, TEST_PARTITION, expectedOffset, eventsPushed);
}
 
Example 7
Source File: EventStreamReadingAT.java    From nakadi with MIT License 4 votes vote down vote up
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void whenPushedAmountOfEventsMoreThanBatchSizeAndReadThenGetEventsInMultipleBatches()
        throws ExecutionException, InterruptedException {

    // ARRANGE //
    // push events to one of the partitions so that they don't fit into one branch
    final int batchLimit = 5;
    final int eventsPushed = 8;
    kafkaHelper.writeMultipleMessageToPartition(TEST_PARTITION, topicName, DUMMY_EVENT, eventsPushed);

    // ACT //
    final Response response = RestAssured.given()
            .header(new Header("X-nakadi-cursors", xNakadiCursors))
            .param("batch_limit", batchLimit)
            .param("stream_timeout", "2")
            .param("batch_flush_timeout", "2")
            .when()
            .get(streamEndpoint);

    // ASSERT //
    response.then().statusCode(HttpStatus.OK.value()).header(HttpHeaders.TRANSFER_ENCODING, "chunked");

    final String body = response.print();
    final List<Map<String, Object>> batches = deserializeBatches(body);

    // validate amount of batches and structure of each batch
    // for partition with events we should get 2 batches
    Assert.assertThat(batches, Matchers.hasSize(PARTITIONS_NUM + 1));
    batches.forEach(batch -> validateBatchStructure(batch, DUMMY_EVENT));

    // find the batches where we expect to see the messages we pushed
    final List<Map<String, Object>> batchesToCheck = batches
            .stream()
            .filter(isForPartition(TEST_PARTITION))
            .collect(Collectors.toList());
    Assert.assertThat(batchesToCheck, Matchers.hasSize(2));

    // calculate the offset we expect to see in this batch in a stream
    final Cursor partitionCursor = kafkaInitialNextOffsets.stream()
            .filter(cursor -> TEST_PARTITION.equals(cursor.getPartition()))
            .findFirst()
            .orElseThrow(() -> new AssertionError("Failed to find cursor for needed partition"));
    final String expectedOffset1 =
            TestUtils.toTimelineOffset(Long.parseLong(partitionCursor.getOffset()) - 1 + batchLimit);
    final String expectedOffset2 =
            TestUtils.toTimelineOffset(Long.parseLong(partitionCursor.getOffset()) - 1 + eventsPushed);

    // check that batches have offset, partition and events number we expect
    validateBatch(batchesToCheck.get(0), TEST_PARTITION, expectedOffset1, batchLimit);
    validateBatch(batchesToCheck.get(1), TEST_PARTITION, expectedOffset2, eventsPushed - batchLimit);
}