@Override public synchronized Option<URL> resolve(Path remoteFile) { Option<URL> resolved = Option.apply(paths.get(remoteFile)); return resolved; }
/** * Transition the worker to a launched state. * @return a new worker instance (does not mutate the current instance). */ public Worker launchWorker(Protos.SlaveID slaveID, String hostname) { return new Worker(taskID, profile, Option.apply(slaveID), Option.apply(hostname), WorkerState.Launched); }
/** * A test Netty web server with HTTPS support * @param port HTTP port to bind on * @param application The Application to load in this server * @param sslPort HTTPS port to bind on */ public TestServer(int port, Application application, int sslPort) { super(port, application.getWrappedApplication(), Option.<Object>apply(sslPort), play.libs.Scala.<ServerProvider>None()); }
@Override public Option<MapStatus> stop(boolean success) { try { taskContext.taskMetrics().incPeakExecutionMemory(getPeakMemoryUsedBytes()); if (stopping) { return Option.apply(null); } else { stopping = true; if (success) { if (mapStatus == null) { throw new IllegalStateException("Cannot call stop(true) without having called write()"); } return Option.apply(mapStatus); } else { return Option.apply(null); } } } finally { if (sorter != null) { // If sorter is non-null, then this implies that we called stop() in response to an error, // so we need to clean up memory and spill files created by the sorter sorter.cleanupResources(); } } } }
@Override public Option<MapStatus> stop(boolean success) { try { taskContext.taskMetrics().incPeakExecutionMemory(getPeakMemoryUsedBytes()); if (stopping) { return Option.apply(null); } else { stopping = true; if (success) { if (mapStatus == null) { throw new IllegalStateException("Cannot call stop(true) without having called write()"); } return Option.apply(mapStatus); } else { return Option.apply(null); } } } finally { if (sorter != null) { // If sorter is non-null, then this implies that we called stop() in response to an error, // so we need to clean up memory and spill files created by the sorter sorter.cleanupResources(); } } } }
@Override public Option<MapStatus> stop(boolean success) { try { taskContext.taskMetrics().incPeakExecutionMemory(getPeakMemoryUsedBytes()); if (stopping) { return Option.apply(null); } else { stopping = true; if (success) { if (mapStatus == null) { throw new IllegalStateException("Cannot call stop(true) without having called write()"); } return Option.apply(mapStatus); } else { return Option.apply(null); } } } finally { if (sorter != null) { // If sorter is non-null, then this implies that we called stop() in response to an error, // so we need to clean up memory and spill files created by the sorter sorter.cleanupResources(); } } } }
/** * Triggers a savepoint for the job identified by the job id. The savepoint will be written to the given savepoint * directory, or {@link org.apache.flink.configuration.CheckpointingOptions#SAVEPOINT_DIRECTORY} if it is null. * * @param jobId job id * @param savepointDirectory directory the savepoint should be written to * @return path future where the savepoint is located * @throws FlinkException if no connection to the cluster could be established */ public CompletableFuture<String> triggerSavepoint(JobID jobId, @Nullable String savepointDirectory) throws FlinkException { final ActorGateway jobManager = getJobManagerGateway(); Future<Object> response = jobManager.ask(new JobManagerMessages.TriggerSavepoint(jobId, Option.<String>apply(savepointDirectory)), new FiniteDuration(1, TimeUnit.HOURS)); CompletableFuture<Object> responseFuture = FutureUtils.toJava(response); return responseFuture.thenApply((responseMessage) -> { if (responseMessage instanceof JobManagerMessages.TriggerSavepointSuccess) { JobManagerMessages.TriggerSavepointSuccess success = (JobManagerMessages.TriggerSavepointSuccess) responseMessage; return success.savepointPath(); } else if (responseMessage instanceof JobManagerMessages.TriggerSavepointFailure) { JobManagerMessages.TriggerSavepointFailure failure = (JobManagerMessages.TriggerSavepointFailure) responseMessage; throw new CompletionException(failure.cause()); } else { throw new CompletionException( new IllegalStateException("Unknown JobManager response of type " + responseMessage.getClass())); } }); }
/** * Get the persisted framework ID. * @return the current ID or empty if none is yet persisted. * @throws Exception on ZK failures, interruptions. */ @Override public Option<Protos.FrameworkID> getFrameworkID() throws Exception { synchronized (startStopLock) { verifyIsRunning(); Option<Protos.FrameworkID> frameworkID; byte[] value = frameworkIdInZooKeeper.getValue(); if (value.length == 0) { frameworkID = Option.empty(); } else { frameworkID = Option.apply(Protos.FrameworkID.newBuilder().setValue(new String(value, ConfigConstants.DEFAULT_CHARSET)).build()); } return frameworkID; } }
void maybeAddPartitions(int minPartitionNum) { KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener"); AdminZkClient adminZkClient = new AdminZkClient(zkClient); try { scala.collection.Map<Object, scala.collection.Seq<Object>> existingAssignment = getPartitionAssignment(zkClient, _topic); int partitionNum = existingAssignment.size(); if (partitionNum < minPartitionNum) { LOG.info("MultiClusterTopicManagementService will increase partition of the topic {} " + "in cluster {} from {} to {}.", _topic, _zkConnect, partitionNum, minPartitionNum); scala.Option<scala.collection.Map<java.lang.Object, scala.collection.Seq<java.lang.Object>>> replicaAssignment = scala.Option.apply(null); scala.Option<Seq<Object>> brokerList = scala.Option.apply(null); adminZkClient.addPartitions(_topic, existingAssignment, adminZkClient.getBrokerMetadatas(RackAwareMode.Disabled$.MODULE$, brokerList), minPartitionNum, replicaAssignment, false); } } finally { zkClient.close(); } }
Option<String> containerVolOpt = Option.<String>apply(flinkConfig.getString(MESOS_RM_CONTAINER_VOLUMES)); Option<String> dockerParamsOpt = Option.<String>apply(flinkConfig.getString(MESOS_RM_CONTAINER_DOCKER_PARAMETERS)); Option<String> uriParamsOpt = Option.<String>apply(flinkConfig.getString(MESOS_TM_URIS)); Option<String> taskManagerHostname = Option.apply(flinkConfig.getString(MESOS_TM_HOSTNAME)); Option<String> tmBootstrapCommand = Option.apply(flinkConfig.getString(MESOS_TM_BOOTSTRAP_CMD)); gpus, containerType, Option.apply(imageName), containeredParameters, containerVolumes,
protected void registered(Registered message) { connectionMonitor.tell(message, selfActor); try { workerStore.setFrameworkID(Option.apply(message.frameworkId())); } catch (Exception ex) { onFatalError(new ResourceManagerException("Unable to store the assigned framework ID.", ex)); return; } launchCoordinator.tell(message, selfActor); reconciliationCoordinator.tell(message, selfActor); taskMonitor.tell(message, selfActor); }
scala.Option<String> stringNone = scala.Option.apply(null); KafkaServer server = new KafkaServer(kafkaConfig, SystemTime$.MODULE$, stringNone); server.startup();
@SuppressWarnings("unchecked") private ClientBuilder setDefaultSettings(ClientBuilder builder) { return builder.name(clientName) .codec(ThriftClientFramedCodec.apply(Option.apply(clientId))) .failFast(false) .noFailureAccrual() // disable retries on finagle client builder, as there is only one host per finagle client // we should throw exception immediately on first failure, so DL client could quickly detect // failures and retry other proxies. .retries(1) .keepAlive(true); }
scala.Option<String> stringNone = scala.Option.apply(null); KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0)); server.startup();
scala.Option<String> stringNone = scala.Option.apply(null); KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0)); server.startup();
scala.Option<String> stringNone = scala.Option.apply(null); KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0)); server.startup();
private void testMergingSpills( final boolean transferToEnabled, String compressionCodecName, boolean encrypt) throws Exception { if (compressionCodecName != null) { conf.set("spark.shuffle.compress", "true"); conf.set("spark.io.compression.codec", compressionCodecName); } else { conf.set("spark.shuffle.compress", "false"); } conf.set(org.apache.spark.internal.config.package$.MODULE$.IO_ENCRYPTION_ENABLED(), encrypt); SerializerManager manager; if (encrypt) { manager = new SerializerManager(serializer, conf, Option.apply(CryptoStreamUtils.createKey(conf))); } else { manager = new SerializerManager(serializer, conf); } when(blockManager.serializerManager()).thenReturn(manager); testMergingSpills(transferToEnabled, encrypt); }
private void testMergingSpills( final boolean transferToEnabled, String compressionCodecName, boolean encrypt) throws Exception { if (compressionCodecName != null) { conf.set("spark.shuffle.compress", "true"); conf.set("spark.io.compression.codec", compressionCodecName); } else { conf.set("spark.shuffle.compress", "false"); } conf.set(org.apache.spark.internal.config.package$.MODULE$.IO_ENCRYPTION_ENABLED(), encrypt); SerializerManager manager; if (encrypt) { manager = new SerializerManager(serializer, conf, Option.apply(CryptoStreamUtils.createKey(conf))); } else { manager = new SerializerManager(serializer, conf); } when(blockManager.serializerManager()).thenReturn(manager); testMergingSpills(transferToEnabled, encrypt); }
private void testMergingSpills( final boolean transferToEnabled, String compressionCodecName, boolean encrypt) throws Exception { if (compressionCodecName != null) { conf.set("spark.shuffle.compress", "true"); conf.set("spark.io.compression.codec", compressionCodecName); } else { conf.set("spark.shuffle.compress", "false"); } conf.set(org.apache.spark.internal.config.package$.MODULE$.IO_ENCRYPTION_ENABLED(), encrypt); SerializerManager manager; if (encrypt) { manager = new SerializerManager(serializer, conf, Option.apply(CryptoStreamUtils.createKey(conf))); } else { manager = new SerializerManager(serializer, conf); } when(blockManager.serializerManager()).thenReturn(manager); testMergingSpills(transferToEnabled, encrypt); }
new MesosConfiguration(masterUrl, frameworkInfo, scala.Option.apply(credential));