org.apache.flink.cep.nfa.sharedbuffer.SharedBufferAccessor Java Examples
The following examples show how to use
org.apache.flink.cep.nfa.sharedbuffer.SharedBufferAccessor.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NFAITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testSharedBufferClearing() throws Exception { Pattern<Event, ?> pattern = Pattern.<Event>begin("start").followedBy("end"); Event a = new Event(40, "a", 1.0); Event b = new Event(41, "b", 2.0); NFA<Event> nfa = compile(pattern, false); TestTimerService timerService = new TestTimerService(); try (SharedBufferAccessor<Event> accessor = Mockito.spy(sharedBuffer.getAccessor())) { nfa.process(accessor, nfa.createInitialNFAState(), a, 1, AfterMatchSkipStrategy.noSkip(), timerService); nfa.process(accessor, nfa.createInitialNFAState(), b, 2, AfterMatchSkipStrategy.noSkip(), timerService); Mockito.verify(accessor, Mockito.never()).advanceTime(anyLong()); nfa.advanceTime(accessor, nfa.createInitialNFAState(), 2); Mockito.verify(accessor, Mockito.times(1)).advanceTime(2); } }
Example #2
Source File: NFA.java From flink with Apache License 2.0 | 6 votes |
/** * Extracts all the sequences of events from the start to the given computation state. An event * sequence is returned as a map which contains the events and the names of the states to which * the events were mapped. * * @param sharedBufferAccessor The accessor to {@link SharedBuffer} from which to extract the matches * @param computationState The end computation state of the extracted event sequences * @return Collection of event sequences which end in the given computation state * @throws Exception Thrown if the system cannot access the state. */ private Map<String, List<EventId>> extractCurrentMatches( final SharedBufferAccessor<T> sharedBufferAccessor, final ComputationState computationState) throws Exception { if (computationState.getPreviousBufferEntry() == null) { return new HashMap<>(); } List<Map<String, List<EventId>>> paths = sharedBufferAccessor.extractPatterns( computationState.getPreviousBufferEntry(), computationState.getVersion()); if (paths.isEmpty()) { return new HashMap<>(); } // for a given computation state, we cannot have more than one matching patterns. Preconditions.checkState(paths.size() == 1); return paths.get(0); }
Example #3
Source File: AfterMatchSkipStrategy.java From flink with Apache License 2.0 | 6 votes |
/** * Prunes matches/partial matches based on the chosen strategy. * * @param matchesToPrune current partial matches * @param matchedResult already completed matches * @param sharedBufferAccessor accessor to corresponding shared buffer * @throws Exception thrown if could not access the state */ public void prune( Collection<ComputationState> matchesToPrune, Collection<Map<String, List<EventId>>> matchedResult, SharedBufferAccessor<?> sharedBufferAccessor) throws Exception { EventId pruningId = getPruningId(matchedResult); if (pruningId != null) { List<ComputationState> discardStates = new ArrayList<>(); for (ComputationState computationState : matchesToPrune) { if (computationState.getStartEventID() != null && shouldPrune(computationState.getStartEventID(), pruningId)) { sharedBufferAccessor.releaseNode(computationState.getPreviousBufferEntry()); discardStates.add(computationState); } } matchesToPrune.removeAll(discardStates); } }
Example #4
Source File: CEPOperatorTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testProcessingTimestampisPassedToNFA() throws Exception { final NFA<Event> nfa = NFACompiler.compileFactory(Pattern.<Event>begin("begin"), true).createNFA(); final NFA<Event> spyNFA = spy(nfa); try ( OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness(createOperatorForNFA(spyNFA).build())) { long timestamp = 5; harness.open(); harness.setProcessingTime(timestamp); StreamRecord<Event> event = event().withTimestamp(3).asStreamRecord(); harness.processElement(event); verify(spyNFA).process( any(SharedBufferAccessor.class), any(NFAState.class), eq(event.getValue()), eq(timestamp), any(AfterMatchSkipStrategy.class), any(TimerService.class)); } }
Example #5
Source File: NFAITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testSharedBufferClearing() throws Exception { Pattern<Event, ?> pattern = Pattern.<Event>begin("start").followedBy("end"); Event a = new Event(40, "a", 1.0); Event b = new Event(41, "b", 2.0); NFA<Event> nfa = compile(pattern, false); TestTimerService timerService = new TestTimerService(); try (SharedBufferAccessor<Event> accessor = Mockito.spy(sharedBuffer.getAccessor())) { nfa.process(accessor, nfa.createInitialNFAState(), a, 1, AfterMatchSkipStrategy.noSkip(), timerService); nfa.process(accessor, nfa.createInitialNFAState(), b, 2, AfterMatchSkipStrategy.noSkip(), timerService); Mockito.verify(accessor, Mockito.never()).advanceTime(anyLong()); nfa.advanceTime(accessor, nfa.createInitialNFAState(), 2); Mockito.verify(accessor, Mockito.times(1)).advanceTime(2); } }
Example #6
Source File: NFAITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testSharedBufferClearing() throws Exception { Pattern<Event, ?> pattern = Pattern.<Event>begin("start").followedBy("end"); Event a = new Event(40, "a", 1.0); Event b = new Event(41, "b", 2.0); NFA<Event> nfa = compile(pattern, false); TestTimerService timerService = new TestTimerService(); try (SharedBufferAccessor<Event> accessor = Mockito.spy(sharedBuffer.getAccessor())) { nfa.process(accessor, nfa.createInitialNFAState(), a, 1, AfterMatchSkipStrategy.noSkip(), timerService); nfa.process(accessor, nfa.createInitialNFAState(), b, 2, AfterMatchSkipStrategy.noSkip(), timerService); Mockito.verify(accessor, Mockito.never()).advanceTime(anyLong()); nfa.advanceTime(accessor, nfa.createInitialNFAState(), 2); Mockito.verify(accessor, Mockito.times(1)).advanceTime(2); } }
Example #7
Source File: CEPOperatorTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testProcessingTimestampisPassedToNFA() throws Exception { final NFA<Event> nfa = NFACompiler.compileFactory(Pattern.<Event>begin("begin"), true).createNFA(); final NFA<Event> spyNFA = spy(nfa); try ( OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness(createOperatorForNFA(spyNFA).build())) { long timestamp = 5; harness.open(); harness.setProcessingTime(timestamp); StreamRecord<Event> event = event().withTimestamp(3).asStreamRecord(); harness.processElement(event); verify(spyNFA).process( any(SharedBufferAccessor.class), any(NFAState.class), eq(event.getValue()), eq(timestamp), any(AfterMatchSkipStrategy.class), any(TimerService.class)); } }
Example #8
Source File: AfterMatchSkipStrategy.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Prunes matches/partial matches based on the chosen strategy. * * @param matchesToPrune current partial matches * @param matchedResult already completed matches * @param sharedBufferAccessor accessor to corresponding shared buffer * @throws Exception thrown if could not access the state */ public void prune( Collection<ComputationState> matchesToPrune, Collection<Map<String, List<EventId>>> matchedResult, SharedBufferAccessor<?> sharedBufferAccessor) throws Exception { EventId pruningId = getPruningId(matchedResult); if (pruningId != null) { List<ComputationState> discardStates = new ArrayList<>(); for (ComputationState computationState : matchesToPrune) { if (computationState.getStartEventID() != null && shouldPrune(computationState.getStartEventID(), pruningId)) { sharedBufferAccessor.releaseNode(computationState.getPreviousBufferEntry()); discardStates.add(computationState); } } matchesToPrune.removeAll(discardStates); } }
Example #9
Source File: NFA.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Extracts all the sequences of events from the start to the given computation state. An event * sequence is returned as a map which contains the events and the names of the states to which * the events were mapped. * * @param sharedBufferAccessor The accessor to {@link SharedBuffer} from which to extract the matches * @param computationState The end computation state of the extracted event sequences * @return Collection of event sequences which end in the given computation state * @throws Exception Thrown if the system cannot access the state. */ private Map<String, List<EventId>> extractCurrentMatches( final SharedBufferAccessor<T> sharedBufferAccessor, final ComputationState computationState) throws Exception { if (computationState.getPreviousBufferEntry() == null) { return new HashMap<>(); } List<Map<String, List<EventId>>> paths = sharedBufferAccessor.extractPatterns( computationState.getPreviousBufferEntry(), computationState.getVersion()); if (paths.isEmpty()) { return new HashMap<>(); } // for a given computation state, we cannot have more than one matching patterns. Preconditions.checkState(paths.size() == 1); return paths.get(0); }
Example #10
Source File: NFA.java From flink with Apache License 2.0 | 6 votes |
/** * Extracts all the sequences of events from the start to the given computation state. An event * sequence is returned as a map which contains the events and the names of the states to which * the events were mapped. * * @param sharedBufferAccessor The accessor to {@link SharedBuffer} from which to extract the matches * @param computationState The end computation state of the extracted event sequences * @return Collection of event sequences which end in the given computation state * @throws Exception Thrown if the system cannot access the state. */ private Map<String, List<EventId>> extractCurrentMatches( final SharedBufferAccessor<T> sharedBufferAccessor, final ComputationState computationState) throws Exception { if (computationState.getPreviousBufferEntry() == null) { return new HashMap<>(); } List<Map<String, List<EventId>>> paths = sharedBufferAccessor.extractPatterns( computationState.getPreviousBufferEntry(), computationState.getVersion()); if (paths.isEmpty()) { return new HashMap<>(); } // for a given computation state, we cannot have more than one matching patterns. Preconditions.checkState(paths.size() == 1); return paths.get(0); }
Example #11
Source File: AfterMatchSkipStrategy.java From flink with Apache License 2.0 | 6 votes |
/** * Prunes matches/partial matches based on the chosen strategy. * * @param matchesToPrune current partial matches * @param matchedResult already completed matches * @param sharedBufferAccessor accessor to corresponding shared buffer * @throws Exception thrown if could not access the state */ public void prune( Collection<ComputationState> matchesToPrune, Collection<Map<String, List<EventId>>> matchedResult, SharedBufferAccessor<?> sharedBufferAccessor) throws Exception { EventId pruningId = getPruningId(matchedResult); if (pruningId != null) { List<ComputationState> discardStates = new ArrayList<>(); for (ComputationState computationState : matchesToPrune) { if (computationState.getStartEventID() != null && shouldPrune(computationState.getStartEventID(), pruningId)) { sharedBufferAccessor.releaseNode(computationState.getPreviousBufferEntry()); discardStates.add(computationState); } } matchesToPrune.removeAll(discardStates); } }
Example #12
Source File: CEPOperatorTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testProcessingTimestampisPassedToNFA() throws Exception { final NFA<Event> nfa = NFACompiler.compileFactory(Pattern.<Event>begin("begin"), true).createNFA(); final NFA<Event> spyNFA = spy(nfa); try ( OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness(createOperatorForNFA(spyNFA).build())) { long timestamp = 5; harness.open(); harness.setProcessingTime(timestamp); StreamRecord<Event> event = event().withTimestamp(3).asStreamRecord(); harness.processElement(event); verify(spyNFA).process( any(SharedBufferAccessor.class), any(NFAState.class), eq(event.getValue()), eq(timestamp), any(AfterMatchSkipStrategy.class), any(TimerService.class)); } }
Example #13
Source File: NFA.java From flink with Apache License 2.0 | 5 votes |
/** * Prunes states assuming there will be no events with timestamp <b>lower</b> than the given one. * It clears the sharedBuffer and also emits all timed out partial matches. * * @param sharedBufferAccessor the accessor to SharedBuffer object that we need to work upon while processing * @param nfaState The NFAState object that we need to affect while processing * @param timestamp timestamp that indicates that there will be no more events with lower timestamp * @return all timed outed partial matches * @throws Exception Thrown if the system cannot access the state. */ public Collection<Tuple2<Map<String, List<T>>, Long>> advanceTime( final SharedBufferAccessor<T> sharedBufferAccessor, final NFAState nfaState, final long timestamp) throws Exception { final Collection<Tuple2<Map<String, List<T>>, Long>> timeoutResult = new ArrayList<>(); final PriorityQueue<ComputationState> newPartialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); for (ComputationState computationState : nfaState.getPartialMatches()) { if (isStateTimedOut(computationState, timestamp)) { if (handleTimeout) { // extract the timed out event pattern Map<String, List<T>> timedOutPattern = sharedBufferAccessor.materializeMatch(extractCurrentMatches( sharedBufferAccessor, computationState)); timeoutResult.add(Tuple2.of(timedOutPattern, computationState.getStartTimestamp() + windowTime)); } sharedBufferAccessor.releaseNode(computationState.getPreviousBufferEntry()); nfaState.setStateChanged(); } else { newPartialMatches.add(computationState); } } nfaState.setNewPartialMatches(newPartialMatches); sharedBufferAccessor.advanceTime(timestamp); return timeoutResult; }
Example #14
Source File: NFA.java From flink with Apache License 2.0 | 5 votes |
private void addComputationState( SharedBufferAccessor<T> sharedBufferAccessor, List<ComputationState> computationStates, State<T> currentState, NodeId previousEntry, DeweyNumber version, long startTimestamp, EventId startEventId) throws Exception { ComputationState computationState = ComputationState.createState( currentState.getName(), previousEntry, version, startTimestamp, startEventId); computationStates.add(computationState); sharedBufferAccessor.lockNode(previousEntry); }
Example #15
Source File: CepOperator.java From flink with Apache License 2.0 | 5 votes |
/** * Advances the time for the given NFA to the given timestamp. This means that no more events with timestamp * <b>lower</b> than the given timestamp should be passed to the nfa, This can lead to pruning and timeouts. */ private void advanceTime(NFAState nfaState, long timestamp) throws Exception { try (SharedBufferAccessor<IN> sharedBufferAccessor = partialMatches.getAccessor()) { Collection<Tuple2<Map<String, List<IN>>, Long>> timedOut = nfa.advanceTime(sharedBufferAccessor, nfaState, timestamp); if (!timedOut.isEmpty()) { processTimedOutSequences(timedOut); } } }
Example #16
Source File: NFA.java From flink with Apache License 2.0 | 5 votes |
ConditionContext( final SharedBufferAccessor<T> sharedBufferAccessor, final ComputationState computationState, final TimerService timerService, final long eventTimestamp) { this.computationState = computationState; this.sharedBufferAccessor = sharedBufferAccessor; this.timerService = timerService; this.eventTimestamp = eventTimestamp; }
Example #17
Source File: NFATestHarness.java From flink with Apache License 2.0 | 5 votes |
public Collection<Map<String, List<Event>>> consumeRecord(StreamRecord<Event> inputEvent) throws Exception { try (SharedBufferAccessor<Event> sharedBufferAccessor = sharedBuffer.getAccessor()) { nfa.advanceTime(sharedBufferAccessor, nfaState, inputEvent.getTimestamp()); return nfa.process( sharedBufferAccessor, nfaState, inputEvent.getValue(), inputEvent.getTimestamp(), afterMatchSkipStrategy, timerService); } }
Example #18
Source File: NFATestHarness.java From flink with Apache License 2.0 | 5 votes |
public Collection<Map<String, List<Event>>> consumeRecord(StreamRecord<Event> inputEvent) throws Exception { try (SharedBufferAccessor<Event> sharedBufferAccessor = sharedBuffer.getAccessor()) { nfa.advanceTime(sharedBufferAccessor, nfaState, inputEvent.getTimestamp()); return nfa.process( sharedBufferAccessor, nfaState, inputEvent.getValue(), inputEvent.getTimestamp(), afterMatchSkipStrategy, timerService); } }
Example #19
Source File: NFA.java From flink with Apache License 2.0 | 5 votes |
ConditionContext( final SharedBufferAccessor<T> sharedBufferAccessor, final ComputationState computationState, final TimerService timerService, final long eventTimestamp) { this.computationState = computationState; this.sharedBufferAccessor = sharedBufferAccessor; this.timerService = timerService; this.eventTimestamp = eventTimestamp; }
Example #20
Source File: NFA.java From flink with Apache License 2.0 | 5 votes |
private void addComputationState( SharedBufferAccessor<T> sharedBufferAccessor, List<ComputationState> computationStates, State<T> currentState, NodeId previousEntry, DeweyNumber version, long startTimestamp, EventId startEventId) throws Exception { ComputationState computationState = ComputationState.createState( currentState.getName(), previousEntry, version, startTimestamp, startEventId); computationStates.add(computationState); sharedBufferAccessor.lockNode(previousEntry); }
Example #21
Source File: CepOperator.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Process the given event by giving it to the NFA and outputting the produced set of matched * event sequences. * * @param nfaState Our NFAState object * @param event The current event to be processed * @param timestamp The timestamp of the event */ private void processEvent(NFAState nfaState, IN event, long timestamp) throws Exception { // 当数据触发新逻辑注入时,调用用户方法注入新逻辑 if (needChange(event)){ changeNFA(event); return; } try (SharedBufferAccessor<IN> sharedBufferAccessor = partialMatches.getAccessor()) { Collection<Map<String, List<IN>>> patterns = nfa.process(sharedBufferAccessor, nfaState, event, timestamp, afterMatchSkipStrategy, cepTimerService); processMatchedSequences(patterns, timestamp); } }
Example #22
Source File: NFA.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
ConditionContext( final SharedBufferAccessor<T> sharedBufferAccessor, final ComputationState computationState, final TimerService timerService, final long eventTimestamp) { this.computationState = computationState; this.sharedBufferAccessor = sharedBufferAccessor; this.timerService = timerService; this.eventTimestamp = eventTimestamp; }
Example #23
Source File: CepOperator.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Advances the time for the given NFA to the given timestamp. This means that no more events with timestamp * <b>lower</b> than the given timestamp should be passed to the nfa, This can lead to pruning and timeouts. */ private void advanceTime(NFAState nfaState, long timestamp) throws Exception { try (SharedBufferAccessor<IN> sharedBufferAccessor = partialMatches.getAccessor()) { Collection<Tuple2<Map<String, List<IN>>, Long>> timedOut = nfa.advanceTime(sharedBufferAccessor, nfaState, timestamp); if (!timedOut.isEmpty()) { processTimedOutSequences(timedOut); } } }
Example #24
Source File: NFA.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Prunes states assuming there will be no events with timestamp <b>lower</b> than the given one. * It clears the sharedBuffer and also emits all timed out partial matches. * * @param sharedBufferAccessor the accessor to SharedBuffer object that we need to work upon while processing * @param nfaState The NFAState object that we need to affect while processing * @param timestamp timestamp that indicates that there will be no more events with lower timestamp * @return all timed outed partial matches * @throws Exception Thrown if the system cannot access the state. */ public Collection<Tuple2<Map<String, List<T>>, Long>> advanceTime( final SharedBufferAccessor<T> sharedBufferAccessor, final NFAState nfaState, final long timestamp) throws Exception { final Collection<Tuple2<Map<String, List<T>>, Long>> timeoutResult = new ArrayList<>(); final PriorityQueue<ComputationState> newPartialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); for (ComputationState computationState : nfaState.getPartialMatches()) { if (isStateTimedOut(computationState, timestamp)) { if (handleTimeout) { // extract the timed out event pattern Map<String, List<T>> timedOutPattern = sharedBufferAccessor.materializeMatch(extractCurrentMatches( sharedBufferAccessor, computationState)); timeoutResult.add(Tuple2.of(timedOutPattern, computationState.getStartTimestamp() + windowTime)); } sharedBufferAccessor.releaseNode(computationState.getPreviousBufferEntry()); nfaState.setStateChanged(); } else { newPartialMatches.add(computationState); } } nfaState.setNewPartialMatches(newPartialMatches); sharedBufferAccessor.advanceTime(timestamp); return timeoutResult; }
Example #25
Source File: NFA.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void addComputationState( SharedBufferAccessor<T> sharedBufferAccessor, List<ComputationState> computationStates, State<T> currentState, NodeId previousEntry, DeweyNumber version, long startTimestamp, EventId startEventId) throws Exception { ComputationState computationState = ComputationState.createState( currentState.getName(), previousEntry, version, startTimestamp, startEventId); computationStates.add(computationState); sharedBufferAccessor.lockNode(previousEntry); }
Example #26
Source File: NFATestHarness.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public Collection<Map<String, List<Event>>> consumeRecord(StreamRecord<Event> inputEvent) throws Exception { try (SharedBufferAccessor<Event> sharedBufferAccessor = sharedBuffer.getAccessor()) { nfa.advanceTime(sharedBufferAccessor, nfaState, inputEvent.getTimestamp()); return nfa.process( sharedBufferAccessor, nfaState, inputEvent.getValue(), inputEvent.getTimestamp(), afterMatchSkipStrategy, timerService); } }
Example #27
Source File: CepOperator.java From flink with Apache License 2.0 | 5 votes |
/** * Advances the time for the given NFA to the given timestamp. This means that no more events with timestamp * <b>lower</b> than the given timestamp should be passed to the nfa, This can lead to pruning and timeouts. */ private void advanceTime(NFAState nfaState, long timestamp) throws Exception { try (SharedBufferAccessor<IN> sharedBufferAccessor = partialMatches.getAccessor()) { Collection<Tuple2<Map<String, List<IN>>, Long>> timedOut = nfa.advanceTime(sharedBufferAccessor, nfaState, timestamp); if (!timedOut.isEmpty()) { processTimedOutSequences(timedOut); } } }
Example #28
Source File: NFA.java From flink with Apache License 2.0 | 5 votes |
/** * Prunes states assuming there will be no events with timestamp <b>lower</b> than the given one. * It clears the sharedBuffer and also emits all timed out partial matches. * * @param sharedBufferAccessor the accessor to SharedBuffer object that we need to work upon while processing * @param nfaState The NFAState object that we need to affect while processing * @param timestamp timestamp that indicates that there will be no more events with lower timestamp * @return all timed outed partial matches * @throws Exception Thrown if the system cannot access the state. */ public Collection<Tuple2<Map<String, List<T>>, Long>> advanceTime( final SharedBufferAccessor<T> sharedBufferAccessor, final NFAState nfaState, final long timestamp) throws Exception { final Collection<Tuple2<Map<String, List<T>>, Long>> timeoutResult = new ArrayList<>(); final PriorityQueue<ComputationState> newPartialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); for (ComputationState computationState : nfaState.getPartialMatches()) { if (isStateTimedOut(computationState, timestamp)) { if (handleTimeout) { // extract the timed out event pattern Map<String, List<T>> timedOutPattern = sharedBufferAccessor.materializeMatch(extractCurrentMatches( sharedBufferAccessor, computationState)); timeoutResult.add(Tuple2.of(timedOutPattern, computationState.getStartTimestamp() + windowTime)); } sharedBufferAccessor.releaseNode(computationState.getPreviousBufferEntry()); nfaState.setStateChanged(); } else { newPartialMatches.add(computationState); } } nfaState.setNewPartialMatches(newPartialMatches); sharedBufferAccessor.advanceTime(timestamp); return timeoutResult; }
Example #29
Source File: NFA.java From flink with Apache License 2.0 | 4 votes |
EventWrapper(T event, long timestamp, SharedBufferAccessor<T> sharedBufferAccessor) { this.event = event; this.timestamp = timestamp; this.sharedBufferAccessor = sharedBufferAccessor; }
Example #30
Source File: NFA.java From flink with Apache License 2.0 | 4 votes |
private Collection<Map<String, List<T>>> doProcess( final SharedBufferAccessor<T> sharedBufferAccessor, final NFAState nfaState, final EventWrapper event, final AfterMatchSkipStrategy afterMatchSkipStrategy, final TimerService timerService) throws Exception { final PriorityQueue<ComputationState> newPartialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); final PriorityQueue<ComputationState> potentialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); // iterate over all current computations for (ComputationState computationState : nfaState.getPartialMatches()) { final Collection<ComputationState> newComputationStates = computeNextStates( sharedBufferAccessor, computationState, event, timerService); if (newComputationStates.size() != 1) { nfaState.setStateChanged(); } else if (!newComputationStates.iterator().next().equals(computationState)) { nfaState.setStateChanged(); } //delay adding new computation states in case a stop state is reached and we discard the path. final Collection<ComputationState> statesToRetain = new ArrayList<>(); //if stop state reached in this path boolean shouldDiscardPath = false; for (final ComputationState newComputationState : newComputationStates) { if (isFinalState(newComputationState)) { potentialMatches.add(newComputationState); } else if (isStopState(newComputationState)) { //reached stop state. release entry for the stop state shouldDiscardPath = true; sharedBufferAccessor.releaseNode(newComputationState.getPreviousBufferEntry()); } else { // add new computation state; it will be processed once the next event arrives statesToRetain.add(newComputationState); } } if (shouldDiscardPath) { // a stop state was reached in this branch. release branch which results in removing previous event from // the buffer for (final ComputationState state : statesToRetain) { sharedBufferAccessor.releaseNode(state.getPreviousBufferEntry()); } } else { newPartialMatches.addAll(statesToRetain); } } if (!potentialMatches.isEmpty()) { nfaState.setStateChanged(); } List<Map<String, List<T>>> result = new ArrayList<>(); if (afterMatchSkipStrategy.isSkipStrategy()) { processMatchesAccordingToSkipStrategy(sharedBufferAccessor, nfaState, afterMatchSkipStrategy, potentialMatches, newPartialMatches, result); } else { for (ComputationState match : potentialMatches) { Map<String, List<T>> materializedMatch = sharedBufferAccessor.materializeMatch( sharedBufferAccessor.extractPatterns( match.getPreviousBufferEntry(), match.getVersion()).get(0) ); result.add(materializedMatch); sharedBufferAccessor.releaseNode(match.getPreviousBufferEntry()); } } nfaState.setNewPartialMatches(newPartialMatches); return result; }