Java Code Examples for org.apache.nifi.components.state.StateMap#get()
The following examples show how to use
org.apache.nifi.components.state.StateMap#get() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GetSplunk.java From localization_nifi with Apache License 2.0 | 6 votes |
private TimeRange loadState(StateManager stateManager) throws IOException { final StateMap stateMap = stateManager.getState(Scope.CLUSTER); if (stateMap.getVersion() < 0) { getLogger().debug("No previous state found"); return null; } final String earliest = stateMap.get(EARLIEST_TIME_KEY); final String latest = stateMap.get(LATEST_TIME_KEY); getLogger().debug("Loaded state with earliestTime of {} and latestTime of {}", new Object[] {earliest, latest}); if (StringUtils.isBlank(earliest) && StringUtils.isBlank(latest)) { return null; } else { return new TimeRange(earliest, latest); } }
Example 2
Source File: GetSplunk.java From nifi with Apache License 2.0 | 6 votes |
private TimeRange loadState(StateManager stateManager) throws IOException { final StateMap stateMap = stateManager.getState(Scope.CLUSTER); if (stateMap.getVersion() < 0) { getLogger().debug("No previous state found"); return null; } final String earliest = stateMap.get(EARLIEST_TIME_KEY); final String latest = stateMap.get(LATEST_TIME_KEY); getLogger().debug("Loaded state with earliestTime of {} and latestTime of {}", new Object[] {earliest, latest}); if (StringUtils.isBlank(earliest) && StringUtils.isBlank(latest)) { return null; } else { return new TimeRange(earliest, latest); } }
Example 3
Source File: ListS3.java From nifi with Apache License 2.0 | 5 votes |
private void restoreState(final ProcessContext context) throws IOException { final StateMap stateMap = context.getStateManager().getState(Scope.CLUSTER); if (stateMap.getVersion() == -1L || stateMap.get(CURRENT_TIMESTAMP) == null || stateMap.get(CURRENT_KEY_PREFIX+"0") == null) { currentTimestamp = 0L; currentKeys = new HashSet<>(); } else { currentTimestamp = Long.parseLong(stateMap.get(CURRENT_TIMESTAMP)); currentKeys = extractKeys(stateMap); } }
Example 4
Source File: MockStateManager.java From localization_nifi with Apache License 2.0 | 5 votes |
private String getValue(final String key, final Scope scope) { final StateMap stateMap; if (scope == Scope.CLUSTER) { stateMap = clusterStateMap; } else { stateMap = localStateMap; } return stateMap.get(key); }
Example 5
Source File: ScrollElasticsearchHttp.java From nifi with Apache License 2.0 | 5 votes |
private String loadScrollId(StateManager stateManager) throws IOException { final StateMap stateMap = stateManager.getState(Scope.LOCAL); if (stateMap.getVersion() < 0) { getLogger().debug("No previous state found"); return null; } final String scrollId = stateMap.get(SCROLL_ID_STATE); getLogger().debug("Loaded state with scrollId {}", new Object[] { scrollId }); return scrollId; }
Example 6
Source File: ListGCSBucket.java From nifi with Apache License 2.0 | 5 votes |
void restoreState(final ProcessContext context) throws IOException { final StateMap stateMap = context.getStateManager().getState(Scope.CLUSTER); if (stateMap.getVersion() == -1L || stateMap.get(CURRENT_TIMESTAMP) == null || stateMap.get(CURRENT_KEY_PREFIX+"0") == null) { currentTimestamp = 0L; currentKeys.clear(); } else { currentTimestamp = Long.parseLong(stateMap.get(CURRENT_TIMESTAMP)); currentKeys.clear(); currentKeys.addAll(extractKeys(stateMap)); } }
Example 7
Source File: AbstractListProcessor.java From nifi with Apache License 2.0 | 5 votes |
@OnScheduled public final void updateState(final ProcessContext context) throws IOException { final String path = getPath(context); final DistributedMapCacheClient client = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class); // Check if state already exists for this path. If so, we have already migrated the state. final StateMap stateMap = context.getStateManager().getState(getStateScope(context)); if (stateMap.getVersion() == -1L) { try { // Migrate state from the old way of managing state (distributed cache service and local file) // to the new mechanism (State Manager). migrateState(path, client, context.getStateManager(), getStateScope(context)); } catch (final IOException ioe) { throw new IOException("Failed to properly migrate state to State Manager", ioe); } } // When scheduled to run, check if the associated timestamp is null, signifying a clearing of state and reset the internal timestamp if (lastListedLatestEntryTimestampMillis != null && stateMap.get(LATEST_LISTED_ENTRY_TIMESTAMP_KEY) == null) { getLogger().info("Detected that state was cleared for this component. Resetting internal values."); resetTimeStates(); } if (resetState) { context.getStateManager().clear(getStateScope(context)); resetState = false; } }
Example 8
Source File: StatePeerPersistence.java From nifi with Apache License 2.0 | 5 votes |
@Override public PeerStatusCache restore() throws IOException { final StateMap state = stateManager.getState(Scope.LOCAL); final String storedPeers = state.get(STATE_KEY_PEERS); if (storedPeers != null && !storedPeers.isEmpty()) { try (final BufferedReader reader = new BufferedReader(new StringReader(storedPeers))) { return restorePeerStatuses(reader, Long.parseLong(state.get(STATE_KEY_PEERS_TIMESTAMP))); } } return null; }
Example 9
Source File: MockStateManager.java From nifi with Apache License 2.0 | 5 votes |
private String getValue(final String key, final Scope scope) { final StateMap stateMap; if (scope == Scope.CLUSTER) { stateMap = clusterStateMap; } else { stateMap = localStateMap; } return stateMap.get(key); }
Example 10
Source File: ListS3.java From localization_nifi with Apache License 2.0 | 5 votes |
private void restoreState(final ProcessContext context) throws IOException { final StateMap stateMap = context.getStateManager().getState(Scope.CLUSTER); if (stateMap.getVersion() == -1L || stateMap.get(CURRENT_TIMESTAMP) == null || stateMap.get(CURRENT_KEY_PREFIX+"0") == null) { currentTimestamp = 0L; currentKeys = new HashSet<>(); } else { currentTimestamp = Long.parseLong(stateMap.get(CURRENT_TIMESTAMP)); currentKeys = extractKeys(stateMap); } }
Example 11
Source File: ScrollElasticsearchHttp.java From localization_nifi with Apache License 2.0 | 5 votes |
private String loadScrollId(StateManager stateManager) throws IOException { final StateMap stateMap = stateManager.getState(Scope.LOCAL); if (stateMap.getVersion() < 0) { getLogger().debug("No previous state found"); return null; } final String scrollId = stateMap.get(SCROLL_ID_STATE); getLogger().debug("Loaded state with scrollId {}", new Object[] { scrollId }); return scrollId; }
Example 12
Source File: ListGCSBucket.java From localization_nifi with Apache License 2.0 | 5 votes |
void restoreState(final ProcessContext context) throws IOException { final StateMap stateMap = context.getStateManager().getState(Scope.CLUSTER); if (stateMap.getVersion() == -1L || stateMap.get(CURRENT_TIMESTAMP) == null || stateMap.get(CURRENT_KEY_PREFIX+"0") == null) { currentTimestamp = 0L; currentKeys = new HashSet<>(); } else { currentTimestamp = Long.parseLong(stateMap.get(CURRENT_TIMESTAMP)); currentKeys = extractKeys(stateMap); } }
Example 13
Source File: AbstractListProcessor.java From localization_nifi with Apache License 2.0 | 5 votes |
@OnScheduled public final void updateState(final ProcessContext context) throws IOException { final String path = getPath(context); final DistributedMapCacheClient client = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class); // Check if state already exists for this path. If so, we have already migrated the state. final StateMap stateMap = context.getStateManager().getState(getStateScope(context)); if (stateMap.getVersion() == -1L) { try { // Migrate state from the old way of managing state (distributed cache service and local file) // to the new mechanism (State Manager). migrateState(path, client, context.getStateManager(), getStateScope(context)); } catch (final IOException ioe) { throw new IOException("Failed to properly migrate state to State Manager", ioe); } } // When scheduled to run, check if the associated timestamp is null, signifying a clearing of state and reset the internal timestamp if (lastListingTime != null && stateMap.get(LISTING_TIMESTAMP_KEY) == null) { getLogger().info("Detected that state was cleared for this component. Resetting internal values."); resetTimeStates(); } if (resetState) { context.getStateManager().clear(getStateScope(context)); resetState = false; } }
Example 14
Source File: TestGetSplunk.java From nifi with Apache License 2.0 | 4 votes |
@Test public void testGetWithManagedFromCurrentUsingEventTime() throws IOException, ParseException { final String query = "search tcp:7879"; final String outputMode = GetSplunk.ATOM_VALUE.getValue(); runner.setProperty(GetSplunk.QUERY, query); runner.setProperty(GetSplunk.OUTPUT_MODE, outputMode); runner.setProperty(GetSplunk.TIME_RANGE_STRATEGY, GetSplunk.MANAGED_CURRENT_VALUE.getValue()); final String resultContent = "fake results"; final ByteArrayInputStream input = new ByteArrayInputStream(resultContent.getBytes(StandardCharsets.UTF_8)); when(service.export(eq(query), any(JobExportArgs.class))).thenReturn(input); // run once and don't shut down, shouldn't produce any results first time runner.run(1, false); runner.assertAllFlowFilesTransferred(GetSplunk.REL_SUCCESS, 0); // capture what the args were on last run verify(service, times(0)).export(eq(query), any(JobExportArgs.class)); final StateMap state = runner.getStateManager().getState(Scope.CLUSTER); Assert.assertNotNull(state); Assert.assertTrue(state.getVersion() > 0); // save the latest time from the first run which should be earliest time of next run final String lastLatest = state.get(GetSplunk.LATEST_TIME_KEY); final SimpleDateFormat format = new SimpleDateFormat(GetSplunk.DATE_TIME_FORMAT); format.setTimeZone(TimeZone.getTimeZone("UTC")); final Date lastLatestDate = format.parse(lastLatest); final String expectedLatest = format.format(new Date(lastLatestDate.getTime() + 1)); // run again runner.run(1, false); runner.assertAllFlowFilesTransferred(GetSplunk.REL_SUCCESS, 1); final ArgumentCaptor<JobExportArgs> capture = ArgumentCaptor.forClass(JobExportArgs.class); verify(service, times(1)).export(eq(query), capture.capture()); // second execution the earliest time should be the previous latest_time final JobExportArgs actualArgs = capture.getValue(); Assert.assertNotNull(actualArgs); Assert.assertEquals(expectedLatest, actualArgs.get("earliest_time")); Assert.assertNotNull(actualArgs.get("latest_time")); }
Example 15
Source File: TestGetSplunk.java From nifi with Apache License 2.0 | 4 votes |
@Test public void testGetWithManagedFromCurrentUsingIndexTime() throws IOException, ParseException { final String query = "search tcp:7879"; final String outputMode = GetSplunk.ATOM_VALUE.getValue(); runner.setProperty(GetSplunk.QUERY, query); runner.setProperty(GetSplunk.OUTPUT_MODE, outputMode); runner.setProperty(GetSplunk.TIME_RANGE_STRATEGY, GetSplunk.MANAGED_CURRENT_VALUE.getValue()); runner.setProperty(GetSplunk.TIME_FIELD_STRATEGY, GetSplunk.INDEX_TIME_VALUE.getValue()); final String resultContent = "fake results"; final ByteArrayInputStream input = new ByteArrayInputStream(resultContent.getBytes(StandardCharsets.UTF_8)); when(service.export(eq(query), any(JobExportArgs.class))).thenReturn(input); // run once and don't shut down, shouldn't produce any results first time runner.run(1, false); runner.assertAllFlowFilesTransferred(GetSplunk.REL_SUCCESS, 0); // capture what the args were on last run verify(service, times(0)).export(eq(query), any(JobExportArgs.class)); final StateMap state = runner.getStateManager().getState(Scope.CLUSTER); Assert.assertNotNull(state); Assert.assertTrue(state.getVersion() > 0); // save the latest time from the first run which should be earliest time of next run final String lastLatest = state.get(GetSplunk.LATEST_TIME_KEY); final SimpleDateFormat format = new SimpleDateFormat(GetSplunk.DATE_TIME_FORMAT); format.setTimeZone(TimeZone.getTimeZone("UTC")); final Date lastLatestDate = format.parse(lastLatest); final String expectedLatest = format.format(new Date(lastLatestDate.getTime() + 1)); // run again runner.run(1, false); runner.assertAllFlowFilesTransferred(GetSplunk.REL_SUCCESS, 1); final ArgumentCaptor<JobExportArgs> capture = ArgumentCaptor.forClass(JobExportArgs.class); verify(service, times(1)).export(eq(query), capture.capture()); // second execution the earliest time should be the previous latest_time final JobExportArgs actualArgs = capture.getValue(); Assert.assertNotNull(actualArgs); Assert.assertEquals(expectedLatest, actualArgs.get("index_earliest")); Assert.assertNotNull(actualArgs.get("index_latest")); }
Example 16
Source File: TestGetSplunk.java From localization_nifi with Apache License 2.0 | 4 votes |
@Test public void testGetWithManagedFromCurrentUsingIndexTime() throws IOException, ParseException { final String query = "search tcp:7879"; final String outputMode = GetSplunk.ATOM_VALUE.getValue(); runner.setProperty(GetSplunk.QUERY, query); runner.setProperty(GetSplunk.OUTPUT_MODE, outputMode); runner.setProperty(GetSplunk.TIME_RANGE_STRATEGY, GetSplunk.MANAGED_CURRENT_VALUE.getValue()); runner.setProperty(GetSplunk.TIME_FIELD_STRATEGY, GetSplunk.INDEX_TIME_VALUE.getValue()); final String resultContent = "fake results"; final ByteArrayInputStream input = new ByteArrayInputStream(resultContent.getBytes(StandardCharsets.UTF_8)); when(service.export(eq(query), any(JobExportArgs.class))).thenReturn(input); // run once and don't shut down, shouldn't produce any results first time runner.run(1, false); runner.assertAllFlowFilesTransferred(GetSplunk.REL_SUCCESS, 0); // capture what the args were on last run verify(service, times(0)).export(eq(query), any(JobExportArgs.class)); final StateMap state = runner.getStateManager().getState(Scope.CLUSTER); Assert.assertNotNull(state); Assert.assertTrue(state.getVersion() > 0); // save the latest time from the first run which should be earliest time of next run final String lastLatest = state.get(GetSplunk.LATEST_TIME_KEY); final SimpleDateFormat format = new SimpleDateFormat(GetSplunk.DATE_TIME_FORMAT); format.setTimeZone(TimeZone.getTimeZone("UTC")); final Date lastLatestDate = format.parse(lastLatest); final String expectedLatest = format.format(new Date(lastLatestDate.getTime() + 1)); // run again runner.run(1, false); runner.assertAllFlowFilesTransferred(GetSplunk.REL_SUCCESS, 1); final ArgumentCaptor<JobExportArgs> capture = ArgumentCaptor.forClass(JobExportArgs.class); verify(service, times(1)).export(eq(query), capture.capture()); // second execution the earliest time should be the previous latest_time final JobExportArgs actualArgs = capture.getValue(); Assert.assertNotNull(actualArgs); Assert.assertEquals(expectedLatest, actualArgs.get("index_earliest")); Assert.assertNotNull(actualArgs.get("index_latest")); }
Example 17
Source File: TestGetSplunk.java From localization_nifi with Apache License 2.0 | 4 votes |
@Test public void testGetWithManagedFromCurrentUsingEventTime() throws IOException, ParseException { final String query = "search tcp:7879"; final String outputMode = GetSplunk.ATOM_VALUE.getValue(); runner.setProperty(GetSplunk.QUERY, query); runner.setProperty(GetSplunk.OUTPUT_MODE, outputMode); runner.setProperty(GetSplunk.TIME_RANGE_STRATEGY, GetSplunk.MANAGED_CURRENT_VALUE.getValue()); final String resultContent = "fake results"; final ByteArrayInputStream input = new ByteArrayInputStream(resultContent.getBytes(StandardCharsets.UTF_8)); when(service.export(eq(query), any(JobExportArgs.class))).thenReturn(input); // run once and don't shut down, shouldn't produce any results first time runner.run(1, false); runner.assertAllFlowFilesTransferred(GetSplunk.REL_SUCCESS, 0); // capture what the args were on last run verify(service, times(0)).export(eq(query), any(JobExportArgs.class)); final StateMap state = runner.getStateManager().getState(Scope.CLUSTER); Assert.assertNotNull(state); Assert.assertTrue(state.getVersion() > 0); // save the latest time from the first run which should be earliest time of next run final String lastLatest = state.get(GetSplunk.LATEST_TIME_KEY); final SimpleDateFormat format = new SimpleDateFormat(GetSplunk.DATE_TIME_FORMAT); format.setTimeZone(TimeZone.getTimeZone("UTC")); final Date lastLatestDate = format.parse(lastLatest); final String expectedLatest = format.format(new Date(lastLatestDate.getTime() + 1)); // run again runner.run(1, false); runner.assertAllFlowFilesTransferred(GetSplunk.REL_SUCCESS, 1); final ArgumentCaptor<JobExportArgs> capture = ArgumentCaptor.forClass(JobExportArgs.class); verify(service, times(1)).export(eq(query), capture.capture()); // second execution the earliest time should be the previous latest_time final JobExportArgs actualArgs = capture.getValue(); Assert.assertNotNull(actualArgs); Assert.assertEquals(expectedLatest, actualArgs.get("earliest_time")); Assert.assertNotNull(actualArgs.get("latest_time")); }