Refine search
/** * Update the persisted framework ID. * @param frameworkID the new ID or empty to remove the persisted ID. * @throws Exception on ZK failures, interruptions. */ @Override public void setFrameworkID(Option<Protos.FrameworkID> frameworkID) throws Exception { synchronized (startStopLock) { verifyIsRunning(); byte[] value = frameworkID.isDefined() ? frameworkID.get().getValue().getBytes(ConfigConstants.DEFAULT_CHARSET) : new byte[0]; frameworkIdInZooKeeper.setValue(value); } }
private List<Future<DLSN>> asyncWriteBulk(List<LogRecord> records) { final ArrayList<Future<DLSN>> results = new ArrayList<Future<DLSN>>(records.size()); Iterator<LogRecord> iterator = records.iterator(); while (iterator.hasNext()) { LogRecord record = iterator.next(); Future<DLSN> future = asyncWrite(record, !iterator.hasNext()); results.add(future); // Abort early if an individual write has already failed. Option<Try<DLSN>> result = future.poll(); if (result.isDefined() && result.get().isThrow()) { break; } } if (records.size() > results.size()) { appendCancelledFutures(results, records.size() - results.size()); } return results; }
public static String findKryoRegistratorJar(HiveConf conf) throws FileNotFoundException { // find the jar in local maven repo for testing if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) { String repo = System.getProperty("maven.local.repository"); String version = System.getProperty("hive.version"); String jarName = HIVE_KRYO_REG_JAR_NAME + "-" + version + ".jar"; String[] parts = new String[]{repo, "org", "apache", "hive", HIVE_KRYO_REG_JAR_NAME, version, jarName}; String jar = Joiner.on(File.separator).join(parts); if (!new File(jar).exists()) { throw new FileNotFoundException(jar + " doesn't exist."); } return jar; } Option<String> option = SparkContext.jarOfClass(SparkClientUtilities.class); if (!option.isDefined()) { throw new FileNotFoundException("Cannot find the path to hive-exec.jar"); } File path = new File(option.get()); File[] jars = path.getParentFile().listFiles((dir, name) -> name.startsWith(HIVE_KRYO_REG_JAR_NAME)); if (jars != null && jars.length > 0) { return jars[0].getAbsolutePath(); } throw new FileNotFoundException("Cannot find the " + HIVE_KRYO_REG_JAR_NAME + " jar under " + path.getParent()); }
@Override public boolean stopWorker(RegisteredMesosWorkerNode workerNode) { LOG.info("Stopping worker {}.", workerNode.getResourceID()); try { if (workersInLaunch.containsKey(workerNode.getResourceID())) { // update persistent state of worker to Released MesosWorkerStore.Worker worker = workersInLaunch.remove(workerNode.getResourceID()); worker = worker.releaseWorker(); workerStore.putWorker(worker); workersBeingReturned.put(extractResourceID(worker.taskID()), worker); taskMonitor.tell(new TaskMonitor.TaskGoalStateUpdated(extractGoalState(worker)), selfActor); if (worker.hostname().isDefined()) { // tell the launch coordinator that the task is being unassigned from the host, for planning purposes launchCoordinator.tell(new LaunchCoordinator.Unassign(worker.taskID(), worker.hostname().get()), selfActor); } } else if (workersBeingReturned.containsKey(workerNode.getResourceID())) { LOG.info("Ignoring request to stop worker {} because it is already being stopped.", workerNode.getResourceID()); } else { LOG.warn("Unrecognized worker {}.", workerNode.getResourceID()); } } catch (Exception e) { onFatalError(new ResourceManagerException("Unable to release a worker.", e)); } return true; }
/** * Create the Mesos scheduler driver based on this configuration. * @param scheduler the scheduler to use. * @param implicitAcknowledgements whether to configure the driver for implicit acknowledgements. * @return a scheduler driver. */ public SchedulerDriver createDriver(Scheduler scheduler, boolean implicitAcknowledgements) { MesosSchedulerDriver schedulerDriver; if (this.credential().isDefined()) { schedulerDriver = new MesosSchedulerDriver(scheduler, frameworkInfo.build(), this.masterUrl(), implicitAcknowledgements, this.credential().get().build()); } else { schedulerDriver = new MesosSchedulerDriver(scheduler, frameworkInfo.build(), this.masterUrl(), implicitAcknowledgements); } return schedulerDriver; }
if (taskManagerHostnameOption.isDefined()) { .matcher(taskManagerHostnameOption.get()) .replaceAll(Matcher.quoteReplacement(taskID.getValue())); if (params.bootstrapCommand().isDefined()) { launchCommand.append(params.bootstrapCommand().get()).append(" && "); switch (params.containerType()) { case MESOS: if (params.containerImageName().isDefined()) { containerInfo .setMesos(Protos.ContainerInfo.MesosInfo.newBuilder() .setType(Protos.Image.Type.DOCKER) .setDocker(Protos.Image.Docker.newBuilder() .setName(params.containerImageName().get())))); assert(params.containerImageName().isDefined()); containerInfo .setType(Protos.ContainerInfo.Type.DOCKER)
/** * Wraps a Scala Option, handling None as null. * * @param opt the scala option. * @param <T> the type in the Option. * @return the value of the option, or null if opt.isDefined is false. */ public static <T> T orNull(scala.Option<T> opt) { if (opt.isDefined()) { return opt.get(); } return null; }
/** * Wraps a Scala Option, handling None as null. * * @param opt the scala option. * @param <T> the type in the Option. * @return the value of the option, or null if opt.isDefined is false. */ public static <T> T orNull(scala.Option<T> opt) { if (opt.isDefined()) { return opt.get(); } return null; }
/** * Wraps a Scala Option, handling None as null. * * @param opt the scala option. * @param <T> the type in the Option. * @return the value of the option, or null if opt.isDefined is false. */ public static <T> T orNull(scala.Option<T> opt) { if (opt.isDefined()) { return opt.get(); } return null; }