Map<Integer, String> rackByBroker = new HashMap<>(); for (BrokerMetadata bm : JavaConversions.seqAsJavaList(AdminUtils.getBrokerMetadatas(zkUtils, RackAwareMode.Enforced$.MODULE$, Option.empty()))) {
private Predicate buildPredicate(Split split, CategoricalValueEncodings categoricalValueEncodings) { if (split == null) { // Left child always applies, but is evaluated second return new True(); } int featureIndex = inputSchema.predictorToFeatureIndex(split.feature()); FieldName fieldName = FieldName.create(inputSchema.getFeatureNames().get(featureIndex)); if (split.featureType().equals(FeatureType.Categorical())) { // Note that categories in MLlib model select the *left* child but the // convention here will be that the predicate selects the *right* child // So the predicate will evaluate "not in" this set // More ugly casting @SuppressWarnings("unchecked") Collection<Double> javaCategories = (Collection<Double>) (Collection<?>) JavaConversions.seqAsJavaList(split.categories()); Set<Integer> negativeEncodings = javaCategories.stream().map(Double::intValue).collect(Collectors.toSet()); Map<Integer,String> encodingToValue = categoricalValueEncodings.getEncodingValueMap(featureIndex); List<String> negativeValues = negativeEncodings.stream().map(encodingToValue::get).collect(Collectors.toList()); String joinedValues = TextUtils.joinPMMLDelimited(negativeValues); return new SimpleSetPredicate(fieldName, SimpleSetPredicate.BooleanOperator.IS_NOT_IN, new Array(Array.Type.STRING, joinedValues)); } else { // For MLlib, left means <= threshold, so right means > return new SimplePredicate(fieldName, SimplePredicate.Operator.GREATER_THAN) .setValue(Double.toString(split.threshold())); } }
Assert.assertTrue(reducerRdd.creationSite().longForm().contains("Reducer 2")); List<Dependency<?>> rdds = JavaConversions.seqAsJavaList(reducerRdd.dependencies()); Assert.assertEquals(1, rdds.size()); RDD shuffledRdd = rdds.get(0).rdd(); Assert.assertTrue(shuffledRdd.creationSite().longForm().contains("Reducer 2")); rdds = JavaConversions.seqAsJavaList(shuffledRdd.dependencies()); Assert.assertEquals(1, rdds.size()); RDD mapRdd = rdds.get(0).rdd(); Assert.assertTrue(mapRdd.creationSite().longForm().contains("Map 1")); rdds = JavaConversions.seqAsJavaList(mapRdd.dependencies()); Assert.assertEquals(1, rdds.size()); RDD hadoopRdd = rdds.get(0).rdd();
@Override public boolean conditionMet() { final Set<String> allTopics = new HashSet<>(scala.collection.JavaConversions.seqAsJavaList(zkUtils.getAllTopics())); return !allTopics.removeAll(deletedTopics); } }
allTopics.addAll(scala.collection.JavaConversions.seqAsJavaList(zkUtils.getAllTopics()));
private <T> List<T> toList(Seq<T> sequence) { return scala.collection.JavaConversions.seqAsJavaList(sequence); }
public static List<GCEvent> parseLog(String log) { return JavaConversions.seqAsJavaList(Parser.parseLog(log)); } }
private static Set<Integer> brokerHostnamesToBrokerIds( ZkUtils zkUtils, Set<String> brokerHostnameSet, boolean checkPresence) { List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster()); Set<Integer> brokerIdSet = Sets.newHashSet(); for (Broker broker : brokers) { BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT); if (brokerHostnameSet.contains(endpoint.host())) { brokerIdSet.add(broker.id()); } } Preconditions.checkArgument(!checkPresence || brokerHostnameSet.size() == brokerIdSet.size(), "Some hostnames could not be found! We found: " + brokerIdSet); return brokerIdSet; }
/** * Initiate a JavaRDD with the number of parallelism. * * @param seq input data as list. * @param numSlices number of slices (parallelism). * @param evidence type of the initial element. * @return the newly initiated JavaRDD. */ @Override public <T> RDD<T> parallelize(final Seq<T> seq, final int numSlices, final ClassTag<T> evidence) { final List<T> javaList = scala.collection.JavaConversions.seqAsJavaList(seq); return JavaRDD.of(this.sparkContext, javaList, numSlices).rdd(); }
private Map<Integer, String> getRackAssignment(ZkUtils zkUtils) { List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster()); Map<Integer, String> rackAssignment = Maps.newHashMap(); if (!disableRackAwareness) { for (Broker broker : brokers) { scala.Option<String> rack = broker.rack(); if (rack.isDefined()) { rackAssignment.put(broker.id(), rack.get()); } } } return rackAssignment; }
private static void printCurrentBrokers(ZkUtils zkUtils) throws JSONException { List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster()); JSONArray json = new JSONArray(); for (Broker broker : brokers) { BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT); JSONObject brokerJson = new JSONObject(); brokerJson.put("id", broker.id()); brokerJson.put("host", endpoint.host()); brokerJson.put("port", endpoint.port()); if (broker.rack().isDefined()) { brokerJson.put("rack", broker.rack().get()); } json.put(brokerJson); } System.out.println("CURRENT BROKERS:"); System.out.println(json.toString()); }
/** * Returns a {@link SimpleConsumer} connected to the given {@link Broker} */ private static SimpleConsumer getSimpleConsumer(final Broker broker) { // BrokerHost, BrokerPort, timeout, buffer size, client id EndPoint endpoint = JavaConversions.seqAsJavaList(broker.endPoints()).get(0); return new SimpleConsumer(endpoint.host(), endpoint.port(), 100000, 64 * 1024, CLIENT_ID); }
public KafkaBridge(Configuration atlasConf, AtlasClientV2 atlasClientV2) throws Exception { String zookeeperConnect = getZKConnection(atlasConf); int sessionTimeOutMs = atlasConf.getInt(ZOOKEEPER_SESSION_TIMEOUT_MS, DEFAULT_ZOOKEEPER_SESSION_TIMEOUT_MS) ; int connectionTimeOutMs = atlasConf.getInt(ZOOKEEPER_CONNECTION_TIMEOUT_MS, DEFAULT_ZOOKEEPER_CONNECTION_TIMEOUT_MS); ZkClient zkClient = new ZkClient(zookeeperConnect, sessionTimeOutMs, connectionTimeOutMs, ZKStringSerializer$.MODULE$); this.atlasClientV2 = atlasClientV2; this.clusterName = atlasConf.getString(KAFKA_CLUSTER_NAME, DEFAULT_CLUSTER_NAME); this.zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), JaasUtils.isZkSecurityEnabled()); this.availableTopics = scala.collection.JavaConversions.seqAsJavaList(zkUtils.getAllTopics()); }
private Sort nthArgSort(KLabel klabel, int n) { java.util.Set<Tuple2<Seq<Sort>,Sort>> sigs = mutable(JavaConversions.mapAsJavaMap(module.signatureFor()).get(klabel)); if (sigs == null) { throw new IllegalArgumentException("Not found signature for label: " + klabel); } Sort sort = null; for (Tuple2<Seq<Sort>,Sort> sig : sigs) { List<Sort> sorts = JavaConversions.seqAsJavaList(sig._1()); if (n >= sorts.size()) continue; sort = sorts.get(n); if (!sort.equals(Sorts.K())) { return sort; } } return sort; } }
@Override public List<String> listTables(ExecutionContext context) { /** * TODO: currently Shell can only talk to Kafka system, but we should use a general way * to connect to different systems. */ lastErrorMsg = ""; String address = context.getConfigMap().getOrDefault(SAMZA_SQL_SYSTEM_KAFKA_ADDRESS, DEFAULT_SERVER_ADDRESS); List<String> tables = null; try { ZkUtils zkUtils = new ZkUtils(new ZkClient(address, DEFAULT_ZOOKEEPER_CLIENT_TIMEOUT), new ZkConnection(address), false); tables = JavaConversions.seqAsJavaList(zkUtils.getAllTopics()) .stream() .map(x -> SAMZA_SYSTEM_KAFKA + "." + x) .collect(Collectors.toList()); } catch (ZkTimeoutException ex) { String msg = "listTables failed with exception "; lastErrorMsg = msg + ex.toString(); LOG.error(msg, ex); } return tables; }
public static StructType subsetSchema(StructType schema, final List<String> fieldNames) { Seq<StructField> fieldSeq = schema.toTraversable().filter(new AbstractFunction1<StructField, Object>() { @Override public Object apply(StructField field) { return fieldNames.contains(field.name()); } }).toSeq(); StructType subset = DataTypes.createStructType(JavaConversions.seqAsJavaList(fieldSeq)); return subset; }
private void updateActorListFromCluster() { List<String> actorList = new ArrayList<String>(); ActorSelection remoteActorSelection = null; scala.collection.immutable.List<Address> seedNodeList = cluster.settings().SeedNodes().toList(); for (Address address : JavaConversions.seqAsJavaList(seedNodeList)) { if (isLocalAddress(address)) { continue; } actorList.add(address.toString()); } for (String path : actorList) { if (!isLocalActor(path)) { path = path + communicator.path().toStringWithoutAddress(); remoteActorSelection = actorSystem.actorSelection(path); if (remoteActorSelection != null) { remoteActors.add(remoteActorSelection); } else { LOG.error("Failed to get actor selection for " + path); } } } }
private <T> ComposableFuture<List<T>> _list(final ComposableFuture<QueryResult> queryRes, final ResultSetMapper<T> mapper) { return queryRes.map(res -> { final Option<ResultSet> rowsOption = res.rows(); final List<T> response = new ArrayList<>(); if (rowsOption.isDefined()) { final ResultSet resultSet = rowsOption.get(); final List<String> columnNames = JavaConversions.seqAsJavaList(resultSet.columnNames()); final Iterator<RowData> rows = resultSet.iterator(); while (rows.hasNext()) { final RowData row = rows.next(); final T obj = mapper.map(new TypedRowDataImpl(row), columnNames); response.add(obj); } } return response; }); }
private <T> ComposableFuture<T> _get(final ComposableFuture<QueryResult> queryRes, final ResultSetMapper<T> mapper) { return queryRes.map(res -> { final Option<ResultSet> rowsOption = res.rows(); if (rowsOption.isDefined()) { final ResultSet resultSet = rowsOption.get(); final List<String> columnNames = JavaConversions.seqAsJavaList(resultSet.columnNames()); final Iterator<RowData> rows = resultSet.iterator(); if (rows.hasNext()) { final RowData row = rows.next(); return mapper.map(new TypedRowDataImpl(row), columnNames); } } return null; }); }
private List<BrokerMetadata> getBrokerMetadatas() { ZkClient zkClient = new ZkClient(getZookeeperConnectString(), 1000, 1000, ZKStringSerializer$.MODULE$); ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(getZookeeperConnectString()), false); return JavaConversions.seqAsJavaList(AdminUtils.getBrokerMetadatas(zkUtils, Enforced$.MODULE$, Option.empty())); }