private static void checkLimit(int value, ConfigOption<Integer> option) { if (value < -1) { throw new IllegalConfigurationException("Invalid value for '" + option.key() + "': " + value); } }
private static void checkTimeout(long timeout, ConfigOption<Long> option) { if (timeout < 0) { throw new IllegalConfigurationException("Invalid value for '" + option.key() + "': " + timeout); } } }
throw new IllegalConfigurationException( "The given configuration directory name '" + configDir + "' (" + confDirFile.getAbsolutePath() + ") does not describe an existing directory."); throw new IllegalConfigurationException( "The Flink config file '" + yamlConfigFile + "' (" + confDirFile.getAbsolutePath() + ") does not exist.");
throw new IllegalConfigurationException("Invalid port configuration. Port must be between 0" + "and 65535, but was " + port + "."); throw new IllegalConfigurationException("Invalid port configuration. Port must be between 0" + "and 65535, but was " + start + "."); throw new IllegalConfigurationException("Invalid port configuration. Port must be between 0" + "and 65535, but was " + end + ".");
Preconditions.checkArgument(!host.contains(":")); } catch (Exception e) { throw new IllegalConfigurationException("The configured hostname is not valid", e);
throw new IllegalConfigurationException("The default file system scheme ('" + CoreOptions.DEFAULT_FILESYSTEM_SCHEME + "') is invalid: " + stringifiedUri, e);
@Override public RocksDBStateBackend createFromConfig(Configuration config) throws IllegalConfigurationException, IOException { // we need to explicitly read the checkpoint directory here, because that // is a required constructor parameter final String checkpointDirURI = config.getString(CheckpointingOptions.CHECKPOINTS_DIRECTORY); if (checkpointDirURI == null) { throw new IllegalConfigurationException( "Cannot create the RocksDB state backend: The configuration does not specify the " + "checkpoint directory '" + CheckpointingOptions.CHECKPOINTS_DIRECTORY.key() + '\''); } return new RocksDBStateBackend(checkpointDirURI).configure(config); } }
/** * Creates the high-availability services for a single-job Flink YARN application, to be * used in the Application Master that runs both ResourceManager and JobManager. * * @param flinkConfig The Flink configuration. * @param hadoopConfig The Hadoop configuration for the YARN cluster. * * @return The created high-availability services. * * @throws IOException Thrown, if the high-availability services could not be initialized. */ public static YarnHighAvailabilityServices forSingleJobAppMaster( Configuration flinkConfig, org.apache.hadoop.conf.Configuration hadoopConfig) throws IOException { checkNotNull(flinkConfig, "flinkConfig"); checkNotNull(hadoopConfig, "hadoopConfig"); final HighAvailabilityMode mode = HighAvailabilityMode.fromConfig(flinkConfig); switch (mode) { case NONE: return new YarnIntraNonHaMasterServices(flinkConfig, hadoopConfig); case ZOOKEEPER: throw new UnsupportedOperationException("to be implemented"); default: throw new IllegalConfigurationException("Unrecognized high availability mode: " + mode); } }
throw new IllegalConfigurationException("Unrecognized high availability mode: " + mode);
throw new IllegalConfigurationException("Config parameter '" + YarnConfigOptions.APP_MASTER_RPC_ADDRESS.key() + "' is missing."); throw new IllegalConfigurationException("Config parameter '" + YarnConfigOptions.APP_MASTER_RPC_PORT.key() + "' is missing."); throw new IllegalConfigurationException("Invalid value for '" + YarnConfigOptions.APP_MASTER_RPC_PORT.key() + "' - port must be in [1, 65535]");
throw new IllegalConfigurationException( String.format("The number of requested virtual cores per node %d" + " exceeds the maximum number of virtual cores %d available in the Yarn Cluster." +
throw new IllegalConfigurationException("Invalid configuration for RocksDB state " + "backend's local storage directories: " + e.getMessage(), e);
if (entropyInjectionKey != null) { if (entropyInjectionKey.matches(INVALID_ENTROPY_KEY_CHARS)) { throw new IllegalConfigurationException("Invalid character in value for " + ENTROPY_INJECT_KEY_OPTION.key() + " : " + entropyInjectionKey); throw new IllegalConfigurationException( ENTROPY_INJECT_LENGTH_OPTION.key() + " must configure a value > 0");
private JobGraph createJobGraph() { // make sure that all vertices start immediately jobGraph.setScheduleMode(ScheduleMode.EAGER); // Generate deterministic hashes for the nodes in order to identify them across // submission iff they didn't change. Map<Integer, byte[]> hashes = defaultStreamGraphHasher.traverseStreamGraphAndGenerateHashes(streamGraph); // Generate legacy version hashes for backwards compatibility List<Map<Integer, byte[]>> legacyHashes = new ArrayList<>(legacyStreamGraphHashers.size()); for (StreamGraphHasher hasher : legacyStreamGraphHashers) { legacyHashes.add(hasher.traverseStreamGraphAndGenerateHashes(streamGraph)); } Map<Integer, List<Tuple2<byte[], byte[]>>> chainedOperatorHashes = new HashMap<>(); setChaining(hashes, legacyHashes, chainedOperatorHashes); setPhysicalEdges(); setSlotSharingAndCoLocation(); configureCheckpointing(); JobGraphGenerator.addUserArtifactEntries(streamGraph.getEnvironment().getCachedFiles(), jobGraph); // set the ExecutionConfig last when it has been finalized try { jobGraph.setExecutionConfig(streamGraph.getExecutionConfig()); } catch (IOException e) { throw new IllegalConfigurationException("Could not serialize the ExecutionConfig." + "This indicates that non-serializable types (like custom serializers) were registered"); } return jobGraph; }
long maxAlign = taskManagerConfig.getLong(TaskManagerOptions.TASK_CHECKPOINT_ALIGNMENT_BYTES_LIMIT); if (!(maxAlign == -1 || maxAlign > 0)) { throw new IllegalConfigurationException( TaskManagerOptions.TASK_CHECKPOINT_ALIGNMENT_BYTES_LIMIT.key() + " must be positive or -1 (infinite)");
throw new IllegalConfigurationException(MESOS_RM_TASKS_GPUS.key() + " cannot be negative"); containerType = ContainerType.DOCKER; if (imageName == null || imageName.length() == 0) { throw new IllegalConfigurationException(MESOS_RM_CONTAINER_IMAGE_NAME.key() + " must be specified for docker container type"); throw new IllegalConfigurationException("invalid container type: " + containerTypeString);
throw new IllegalConfigurationException(MesosOptions.MASTER_URL.key() + " must be configured.");
private void validate() { if (!StringUtils.isBlank(keytab)) { // principal is required if (StringUtils.isBlank(principal)) { throw new IllegalConfigurationException("Kerberos login configuration is invalid; keytab requires a principal."); } // check the keytab is readable File keytabFile = new File(keytab); if (!keytabFile.exists() || !keytabFile.isFile() || !keytabFile.canRead()) { throw new IllegalConfigurationException("Kerberos login configuration is invalid; keytab is unreadable"); } } }
private void validate() { if(!StringUtils.isBlank(keytab)) { // principal is required if(StringUtils.isBlank(principal)) { throw new IllegalConfigurationException("Kerberos login configuration is invalid; keytab requires a principal."); } // check the keytab is readable File keytabFile = new File(keytab); if(!keytabFile.exists() || !keytabFile.isFile() || !keytabFile.canRead()) { throw new IllegalConfigurationException("Kerberos login configuration is invalid; keytab is unreadable"); } } }
private void validate() { if (!StringUtils.isBlank(keytab)) { // principal is required if (StringUtils.isBlank(principal)) { throw new IllegalConfigurationException("Kerberos login configuration is invalid; keytab requires a principal."); } // check the keytab is readable File keytabFile = new File(keytab); if (!keytabFile.exists() || !keytabFile.isFile() || !keytabFile.canRead()) { throw new IllegalConfigurationException("Kerberos login configuration is invalid; keytab is unreadable"); } } }