Java Code Examples for org.apache.flink.util.Preconditions#checkNotNull()
The following examples show how to use
org.apache.flink.util.Preconditions#checkNotNull() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TableFactoryService.java From flink with Apache License 2.0 | 6 votes |
/** * Filters found factories by factory class and with matching context. */ private static <T extends TableFactory> List<T> filter( List<TableFactory> foundFactories, Class<T> factoryClass, Map<String, String> properties) { Preconditions.checkNotNull(factoryClass); Preconditions.checkNotNull(properties); List<T> classFactories = filterByFactoryClass( factoryClass, properties, foundFactories); List<T> contextFactories = filterByContext( factoryClass, properties, classFactories); return filterBySupportedProperties( factoryClass, properties, classFactories, contextFactories); }
Example 2
Source File: SingleLogicalSlot.java From flink with Apache License 2.0 | 6 votes |
public SingleLogicalSlot( SlotRequestId slotRequestId, SlotContext slotContext, @Nullable SlotSharingGroupId slotSharingGroupId, Locality locality, SlotOwner slotOwner, boolean willBeOccupiedIndefinitely) { this.slotRequestId = Preconditions.checkNotNull(slotRequestId); this.slotContext = Preconditions.checkNotNull(slotContext); this.slotSharingGroupId = slotSharingGroupId; this.locality = Preconditions.checkNotNull(locality); this.slotOwner = Preconditions.checkNotNull(slotOwner); this.willBeOccupiedIndefinitely = willBeOccupiedIndefinitely; this.releaseFuture = new CompletableFuture<>(); this.state = State.ALIVE; this.payload = null; }
Example 3
Source File: TaskExecutorSubmissionTest.java From flink with Apache License 2.0 | 6 votes |
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor( String taskName, ExecutionAttemptID eid, Class<? extends AbstractInvokable> abstractInvokable, int maxNumberOfSubtasks, List<ResultPartitionDeploymentDescriptor> producedPartitions, List<InputGateDeploymentDescriptor> inputGates ) throws IOException { Preconditions.checkNotNull(producedPartitions); Preconditions.checkNotNull(inputGates); return createTaskDeploymentDescriptor( jobId, testName.getMethodName(), eid, new SerializedValue<>(new ExecutionConfig()), taskName, maxNumberOfSubtasks, 0, 1, 0, new Configuration(), new Configuration(), abstractInvokable.getName(), producedPartitions, inputGates, Collections.emptyList(), Collections.emptyList(), 0); }
Example 4
Source File: TaskCheckpointStatistics.java From flink with Apache License 2.0 | 6 votes |
@JsonCreator public TaskCheckpointStatistics( @JsonProperty(FIELD_NAME_ID) long checkpointId, @JsonProperty(FIELD_NAME_CHECKPOINT_STATUS) CheckpointStatsStatus checkpointStatus, @JsonProperty(FIELD_NAME_LATEST_ACK_TIMESTAMP) long latestAckTimestamp, @JsonProperty(FIELD_NAME_STATE_SIZE) long stateSize, @JsonProperty(FIELD_NAME_DURATION) long duration, @JsonProperty(FIELD_NAME_ALIGNMENT_BUFFERED) long alignmentBuffered, @JsonProperty(FIELD_NAME_NUM_SUBTASKS) int numSubtasks, @JsonProperty(FIELD_NAME_NUM_ACK_SUBTASKS) int numAckSubtasks) { this.checkpointId = checkpointId; this.checkpointStatus = Preconditions.checkNotNull(checkpointStatus); this.latestAckTimestamp = latestAckTimestamp; this.stateSize = stateSize; this.duration = duration; this.alignmentBuffered = alignmentBuffered; this.numSubtasks = numSubtasks; this.numAckSubtasks = numAckSubtasks; }
Example 5
Source File: AggregateOperator.java From flink with Apache License 2.0 | 6 votes |
/** * Grouped aggregation. * * @param input * @param function * @param field */ public AggregateOperator(Grouping<IN> input, Aggregations function, int field, String aggregateLocationName) { super(Preconditions.checkNotNull(input).getInputDataSet(), input.getInputDataSet().getType()); Preconditions.checkNotNull(function); this.aggregateLocationName = aggregateLocationName; if (!input.getInputDataSet().getType().isTupleType()) { throw new InvalidProgramException("Aggregating on field positions is only possible on tuple data types."); } TupleTypeInfoBase<?> inType = (TupleTypeInfoBase<?>) input.getInputDataSet().getType(); if (field < 0 || field >= inType.getArity()) { throw new IllegalArgumentException("Aggregation field position is out of range."); } AggregationFunctionFactory factory = function.getFactory(); AggregationFunction<?> aggFunct = factory.createAggregationFunction(inType.getTypeAt(field).getTypeClass()); // set the aggregation fields this.aggregationFunctions.add(aggFunct); this.fields.add(field); this.grouping = input; }
Example 6
Source File: HiveTableOutputFormat.java From flink with Apache License 2.0 | 6 votes |
public HiveTableOutputFormat(JobConf jobConf, ObjectPath tablePath, CatalogTable table, HiveTablePartition hiveTablePartition, Properties tableProperties, boolean overwrite) { super(jobConf.getCredentials()); Preconditions.checkNotNull(table, "table cannot be null"); Preconditions.checkNotNull(hiveTablePartition, "HiveTablePartition cannot be null"); Preconditions.checkNotNull(tableProperties, "Table properties cannot be null"); HadoopUtils.mergeHadoopConf(jobConf); this.jobConf = jobConf; this.tablePath = tablePath; this.partitionColumns = table.getPartitionKeys(); TableSchema tableSchema = table.getSchema(); this.fieldNames = tableSchema.getFieldNames(); this.fieldTypes = tableSchema.getFieldDataTypes(); this.hiveTablePartition = hiveTablePartition; this.tableProperties = tableProperties; this.overwrite = overwrite; isPartitioned = partitionColumns != null && !partitionColumns.isEmpty(); isDynamicPartition = isPartitioned && partitionColumns.size() > hiveTablePartition.getPartitionSpec().size(); hiveVersion = Preconditions.checkNotNull(jobConf.get(HiveCatalogValidator.CATALOG_HIVE_VERSION), "Hive version is not defined"); }
Example 7
Source File: Pattern.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Applies a stop condition for a looping state. It allows cleaning the underlying state. * * @param untilCondition a condition an event has to satisfy to stop collecting events into looping state * @return The same pattern with applied untilCondition */ public Pattern<T, F> until(IterativeCondition<F> untilCondition) { Preconditions.checkNotNull(untilCondition, "The condition cannot be null"); if (this.untilCondition != null) { throw new MalformedPatternException("Only one until condition can be applied."); } if (!quantifier.hasProperty(Quantifier.QuantifierProperty.LOOPING)) { throw new MalformedPatternException("The until condition is only applicable to looping states."); } ClosureCleaner.clean(untilCondition, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true); this.untilCondition = untilCondition; return this; }
Example 8
Source File: SingleOutputStreamOperator.java From flink with Apache License 2.0 | 5 votes |
/** * Sets the minimum and preferred resources for this operator, and the lower and upper resource limits will * be considered in dynamic resource resize feature for future plan. * * @param minResources The minimum resources for this operator. * @param preferredResources The preferred resources for this operator. * @return The operator with set minimum and preferred resources. */ private SingleOutputStreamOperator<T> setResources(ResourceSpec minResources, ResourceSpec preferredResources) { Preconditions.checkNotNull(minResources, "The min resources must be not null."); Preconditions.checkNotNull(preferredResources, "The preferred resources must be not null."); Preconditions.checkArgument(minResources.isValid() && preferredResources.isValid() && minResources.lessThanOrEqual(preferredResources), "The values in resources must be not less than 0 and the preferred resources must be greater than the min resources."); transformation.setResources(minResources, preferredResources); return this; }
Example 9
Source File: CollectionDataType.java From flink with Apache License 2.0 | 5 votes |
@Override public DataType bridgedTo(Class<?> newConversionClass) { return new CollectionDataType( logicalType, Preconditions.checkNotNull(newConversionClass, "New conversion class must not be null."), elementDataType); }
Example 10
Source File: ResourceManagerPartitionTrackerImpl.java From flink with Apache License 2.0 | 5 votes |
private Set<ResourceID> getHostingTaskExecutors(IntermediateDataSetID dataSetId) { Preconditions.checkNotNull(dataSetId); Map<ResourceID, Set<ResultPartitionID>> trackedPartitions = dataSetToTaskExecutors.get(dataSetId); if (trackedPartitions == null) { return Collections.emptySet(); } else { return trackedPartitions.keySet(); } }
Example 11
Source File: LeaderRetrievalHandler.java From flink with Apache License 2.0 | 5 votes |
protected LeaderRetrievalHandler( @Nonnull GatewayRetriever<? extends T> leaderRetriever, @Nonnull Time timeout, @Nonnull Map<String, String> responseHeaders) { this.leaderRetriever = Preconditions.checkNotNull(leaderRetriever); this.timeout = Preconditions.checkNotNull(timeout); this.responseHeaders = Preconditions.checkNotNull(responseHeaders); }
Example 12
Source File: QueryableStateClient.java From flink with Apache License 2.0 | 5 votes |
/** * Returns a future holding the request result. * @param jobId JobID of the job the queryable state belongs to. * @param queryableStateName Name under which the state is queryable. * @param key The key that the state we request is associated with. * @param namespace The namespace of the state. * @param keyTypeInfo The {@link TypeInformation} of the keys. * @param namespaceTypeInfo The {@link TypeInformation} of the namespace. * @param stateDescriptor The {@link StateDescriptor} of the state we want to query. * @return Future holding the immutable {@link State} object containing the result. */ private <K, N, S extends State, V> CompletableFuture<S> getKvState( final JobID jobId, final String queryableStateName, final K key, final N namespace, final TypeInformation<K> keyTypeInfo, final TypeInformation<N> namespaceTypeInfo, final StateDescriptor<S, V> stateDescriptor) { Preconditions.checkNotNull(jobId); Preconditions.checkNotNull(queryableStateName); Preconditions.checkNotNull(key); Preconditions.checkNotNull(namespace); Preconditions.checkNotNull(keyTypeInfo); Preconditions.checkNotNull(namespaceTypeInfo); Preconditions.checkNotNull(stateDescriptor); TypeSerializer<K> keySerializer = keyTypeInfo.createSerializer(executionConfig); TypeSerializer<N> namespaceSerializer = namespaceTypeInfo.createSerializer(executionConfig); stateDescriptor.initializeSerializerUnlessSet(executionConfig); final byte[] serializedKeyAndNamespace; try { serializedKeyAndNamespace = KvStateSerializer .serializeKeyAndNamespace(key, keySerializer, namespace, namespaceSerializer); } catch (IOException e) { return FutureUtils.getFailedFuture(e); } return getKvState(jobId, queryableStateName, key.hashCode(), serializedKeyAndNamespace) .thenApply(stateResponse -> createState(stateResponse, stateDescriptor)); }
Example 13
Source File: SavepointMetadata.java From flink with Apache License 2.0 | 5 votes |
public SavepointMetadata(int maxParallelism, Collection<MasterState> masterStates, Collection<OperatorState> initialStates) { Preconditions.checkArgument(maxParallelism > 0 && maxParallelism <= UPPER_BOUND_MAX_PARALLELISM, "Maximum parallelism must be between 1 and " + UPPER_BOUND_MAX_PARALLELISM + ". Found: " + maxParallelism); this.maxParallelism = maxParallelism; this.masterStates = Preconditions.checkNotNull(masterStates); this.operatorStateIndex = new HashMap<>(initialStates.size()); initialStates.forEach(existingState -> operatorStateIndex.put( existingState.getOperatorID(), OperatorStateSpec.existing(existingState))); }
Example 14
Source File: TestingJobMasterService.java From flink with Apache License 2.0 | 4 votes |
@Override public JobMasterGateway getGateway() { Preconditions.checkNotNull(jobMasterGateway, "TestingJobMasterService has not been started yet."); return jobMasterGateway; }
Example 15
Source File: SlotOccupiedException.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public SlotOccupiedException(String message, AllocationID allocationId, JobID jobId) { super(message); this.allocationId = Preconditions.checkNotNull(allocationId); this.jobId = Preconditions.checkNotNull(jobId); }
Example 16
Source File: FileArchivedExecutionGraphStore.java From flink with Apache License 2.0 | 4 votes |
public FileArchivedExecutionGraphStore( File rootDir, Time expirationTime, int maximumCapacity, long maximumCacheSizeBytes, ScheduledExecutor scheduledExecutor, Ticker ticker) throws IOException { final File storageDirectory = initExecutionGraphStorageDirectory(rootDir); LOG.info( "Initializing {}: Storage directory {}, expiration time {}, maximum cache size {} bytes.", FileArchivedExecutionGraphStore.class.getSimpleName(), storageDirectory, expirationTime.toMilliseconds(), maximumCacheSizeBytes); this.storageDir = Preconditions.checkNotNull(storageDirectory); Preconditions.checkArgument( storageDirectory.exists() && storageDirectory.isDirectory(), "The storage directory must exist and be a directory."); this.jobDetailsCache = CacheBuilder.newBuilder() .expireAfterWrite(expirationTime.toMilliseconds(), TimeUnit.MILLISECONDS) .maximumSize(maximumCapacity) .removalListener( (RemovalListener<JobID, JobDetails>) notification -> deleteExecutionGraphFile(notification.getKey())) .ticker(ticker) .build(); this.archivedExecutionGraphCache = CacheBuilder.newBuilder() .maximumWeight(maximumCacheSizeBytes) .weigher(this::calculateSize) .build(new CacheLoader<JobID, ArchivedExecutionGraph>() { @Override public ArchivedExecutionGraph load(JobID jobId) throws Exception { return loadExecutionGraph(jobId); }}); this.cleanupFuture = scheduledExecutor.scheduleWithFixedDelay( jobDetailsCache::cleanUp, expirationTime.toMilliseconds(), expirationTime.toMilliseconds(), TimeUnit.MILLISECONDS); this.shutdownHook = ShutdownHookUtil.addShutdownHook(this, getClass().getSimpleName(), LOG); this.numFinishedJobs = 0; this.numFailedJobs = 0; this.numCanceledJobs = 0; }
Example 17
Source File: SiddhiStream.java From bahir-flink with Apache License 2.0 | 4 votes |
/** * @param cepEnvironment SiddhiCEP cepEnvironment. */ public SiddhiStream(SiddhiCEP cepEnvironment) { Preconditions.checkNotNull(cepEnvironment,"SiddhiCEP cepEnvironment is null"); this.cepEnvironment = cepEnvironment; }
Example 18
Source File: StreamElementQueueTest.java From flink with Apache License 2.0 | 4 votes |
public StreamElementQueueTest(AsyncDataStream.OutputMode outputMode) { this.outputMode = Preconditions.checkNotNull(outputMode); }
Example 19
Source File: OrcTableSource.java From flink with Apache License 2.0 | 2 votes |
/** * Sets the ORC schema of the files to read as a String. * * @param orcSchema The ORC schema of the files to read as a String. * @return The builder. */ public Builder forOrcSchema(String orcSchema) { Preconditions.checkNotNull(orcSchema, "ORC schema must not be null."); this.schema = TypeDescription.fromString(orcSchema); return this; }
Example 20
Source File: GraphAlgorithmWrappingBase.java From flink with Apache License 2.0 | 2 votes |
/** * First test whether the algorithm configurations can be merged before the * call to {@link #mergeConfiguration}. * * @param other the algorithm with which to compare configuration * @return true if and only if configuration can be merged and the * algorithm's output can be reused * * @see #mergeConfiguration(GraphAlgorithmWrappingBase) */ protected boolean canMergeConfigurationWith(GraphAlgorithmWrappingBase other) { Preconditions.checkNotNull(other); return this.getClass().equals(other.getClass()); }