Java Code Examples for org.apache.nifi.provenance.serialization.RecordWriter#flush()
The following examples show how to use
org.apache.nifi.provenance.serialization.RecordWriter#flush() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: WriteAheadStorePartition.java From localization_nifi with Apache License 2.0 | 5 votes |
private Map<ProvenanceEventRecord, StorageSummary> addEvents(final Iterable<ProvenanceEventRecord> events, final RecordWriter writer) throws IOException { final Map<ProvenanceEventRecord, StorageSummary> locationMap = new HashMap<>(); try { long maxId = -1L; int numEvents = 0; for (final ProvenanceEventRecord nextEvent : events) { final StorageSummary writerSummary = writer.writeRecord(nextEvent); final StorageSummary summaryWithIndex = new StorageSummary(writerSummary.getEventId(), writerSummary.getStorageLocation(), this.partitionName, writerSummary.getBlockIndex(), writerSummary.getSerializedLength(), writerSummary.getBytesWritten()); locationMap.put(nextEvent, summaryWithIndex); maxId = summaryWithIndex.getEventId(); numEvents++; } if (numEvents == 0) { return locationMap; } writer.flush(); // Update max event id to be equal to be the greater of the current value or the // max value just written. final long maxIdWritten = maxId; this.maxEventId.getAndUpdate(cur -> maxIdWritten > cur ? maxIdWritten : cur); if (config.isAlwaysSync()) { writer.sync(); } } catch (final Exception e) { // We need to set the repoDirty flag before we release the lock for this journal. // Otherwise, another thread may write to this journal -- this is a problem because // the journal contains part of our record but not all of it. Writing to the end of this // journal will result in corruption! writer.markDirty(); throw e; } return locationMap; }
Example 2
Source File: TestPersistentProvenanceRepository.java From localization_nifi with Apache License 2.0 | 5 votes |
@BeforeClass public static void findJournalSizes() throws IOException { // determine header and record size final Map<String, String> attributes = new HashMap<>(); final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder(); builder.setEventTime(System.currentTimeMillis()); builder.setEventType(ProvenanceEventType.RECEIVE); builder.setTransitUri("nifi://unit-test"); attributes.put("uuid", "12345678-0000-0000-0000-012345678912"); builder.fromFlowFile(createFlowFile(3L, 3000L, attributes)); builder.setComponentId("1234"); builder.setComponentType("dummy processor"); final ProvenanceEventRecord record = builder.build(); builder.setComponentId("2345"); final ProvenanceEventRecord record2 = builder.build(); final File tempRecordFile = tempFolder.newFile("record.tmp"); System.out.println("findJournalSizes position 0 = " + tempRecordFile.length()); final AtomicLong idGenerator = new AtomicLong(0L); final RecordWriter writer = RecordWriters.newSchemaRecordWriter(tempRecordFile, idGenerator, false, false); writer.writeHeader(12345L); writer.flush(); headerSize = Long.valueOf(tempRecordFile.length()).intValue(); writer.writeRecord(record); writer.flush(); recordSize = Long.valueOf(tempRecordFile.length()).intValue() - headerSize; writer.writeRecord(record2); writer.flush(); recordSize2 = Long.valueOf(tempRecordFile.length()).intValue() - headerSize - recordSize; writer.close(); System.out.println("headerSize =" + headerSize); System.out.println("recordSize =" + recordSize); System.out.println("recordSize2=" + recordSize2); }
Example 3
Source File: MiNiFiPersistentProvenanceRepositoryTest.java From nifi-minifi with Apache License 2.0 | 5 votes |
@BeforeClass public static void findJournalSizes() throws IOException { // determine header and record size final Map<String, String> attributes = new HashMap<>(); final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder(); builder.setEventTime(System.currentTimeMillis()); builder.setEventType(ProvenanceEventType.RECEIVE); builder.setTransitUri("nifi://unit-test"); attributes.put("uuid", "12345678-0000-0000-0000-012345678912"); builder.fromFlowFile(createFlowFile(3L, 3000L, attributes)); builder.setComponentId("1234"); builder.setComponentType("dummy processor"); final ProvenanceEventRecord record = builder.build(); builder.setComponentId("2345"); final ProvenanceEventRecord record2 = builder.build(); final File tempRecordFile = tempFolder.newFile("record.tmp"); System.out.println("findJournalSizes position 0 = " + tempRecordFile.length()); final AtomicLong idGenerator = new AtomicLong(0L); final RecordWriter writer = RecordWriters.newSchemaRecordWriter(tempRecordFile, idGenerator, false, false); writer.writeHeader(12345L); writer.flush(); headerSize = Long.valueOf(tempRecordFile.length()).intValue(); writer.writeRecord(record); writer.flush(); recordSize = Long.valueOf(tempRecordFile.length()).intValue() - headerSize; writer.writeRecord(record2); writer.flush(); recordSize2 = Long.valueOf(tempRecordFile.length()).intValue() - headerSize - recordSize; writer.close(); System.out.println("headerSize =" + headerSize); System.out.println("recordSize =" + recordSize); System.out.println("recordSize2=" + recordSize2); }
Example 4
Source File: WriteAheadStorePartition.java From nifi with Apache License 2.0 | 5 votes |
private Map<ProvenanceEventRecord, StorageSummary> addEvents(final Iterable<ProvenanceEventRecord> events, final RecordWriter writer) throws IOException { final Map<ProvenanceEventRecord, StorageSummary> locationMap = new HashMap<>(); try { long maxId = -1L; int numEvents = 0; for (final ProvenanceEventRecord nextEvent : events) { final StorageSummary writerSummary = writer.writeRecord(nextEvent); final StorageSummary summaryWithIndex = new StorageSummary(writerSummary.getEventId(), writerSummary.getStorageLocation(), this.partitionName, writerSummary.getBlockIndex(), writerSummary.getSerializedLength(), writerSummary.getBytesWritten()); locationMap.put(nextEvent, summaryWithIndex); maxId = summaryWithIndex.getEventId(); numEvents++; } if (numEvents == 0) { return locationMap; } writer.flush(); // Update max event id to be equal to be the greater of the current value or the // max value just written. final long maxIdWritten = maxId; this.maxEventId.getAndUpdate(cur -> Math.max(maxIdWritten, cur)); if (config.isAlwaysSync()) { writer.sync(); } } catch (final Exception e) { // We need to set the repoDirty flag before we release the lock for this journal. // Otherwise, another thread may write to this journal -- this is a problem because // the journal contains part of our record but not all of it. Writing to the end of this // journal will result in corruption! writer.markDirty(); throw e; } return locationMap; }
Example 5
Source File: ITestPersistentProvenanceRepository.java From nifi with Apache License 2.0 | 5 votes |
@BeforeClass public static void findJournalSizes() throws IOException { // determine header and record size final Map<String, String> attributes = new HashMap<>(); final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder(); builder.setEventTime(System.currentTimeMillis()); builder.setEventType(ProvenanceEventType.RECEIVE); builder.setTransitUri("nifi://unit-test"); attributes.put("uuid", "12345678-0000-0000-0000-012345678912"); builder.fromFlowFile(createFlowFile(3L, 3000L, attributes)); builder.setComponentId("1234"); builder.setComponentType("dummy processor"); final ProvenanceEventRecord record = builder.build(); builder.setComponentId("2345"); final ProvenanceEventRecord record2 = builder.build(); final File tempRecordFile = tempFolder.newFile("record.tmp"); System.out.println("findJournalSizes position 0 = " + tempRecordFile.length()); final AtomicLong idGenerator = new AtomicLong(0L); final RecordWriter writer = RecordWriters.newSchemaRecordWriter(tempRecordFile, idGenerator, false, false); writer.writeHeader(12345L); writer.flush(); headerSize = Long.valueOf(tempRecordFile.length()).intValue(); writer.writeRecord(record); writer.flush(); recordSize = Long.valueOf(tempRecordFile.length()).intValue() - headerSize; writer.writeRecord(record2); writer.flush(); recordSize2 = Long.valueOf(tempRecordFile.length()).intValue() - headerSize - recordSize; writer.close(); System.out.println("headerSize =" + headerSize); System.out.println("recordSize =" + recordSize); System.out.println("recordSize2=" + recordSize2); }