public static SparkConf getSparkConfBasedOn(SparkSession.Builder sparkSessionBuilder) { try { SparkConf sparkConf = new SparkConf(); Field options = sparkSessionBuilder.getClass().getDeclaredField("org$apache$spark$sql$SparkSession$Builder$$options"); options.setAccessible(true); Iterator iterator = ((scala.collection.mutable.HashMap) options.get(sparkSessionBuilder)).iterator(); while (iterator.hasNext()) { Tuple2 x = (Tuple2) iterator.next(); sparkConf.set((String) (x)._1, (String) (x)._2); } return sparkConf; } catch (Exception e) { throw new RuntimeException(e); } }
HashSet<ServiceInstance> serviceInstances = new HashSet<ServiceInstance>(); while (endpointAddressesIterator.hasNext()) { serviceInstances.add(endpointAddressToServiceInstance(endpointAddressesIterator.next()));
@Override public void write(scala.collection.Iterator<Product2<K, V>> records) throws IOException { // Keep track of success so we know if we encountered an exception // We do this rather than a standard try/catch/re-throw to handle // generic throwables. boolean success = false; try { while (records.hasNext()) { insertRecordIntoSorter(records.next()); } closeAndWriteOutput(); success = true; } finally { if (sorter != null) { try { sorter.cleanupResources(); } catch (Exception e) { // Only throw this error if we won't be masking another // error. if (success) { throw e; } else { logger.error("In addition to a failure during writing, we failed during " + "cleanup.", e); } } } } }
long totalBytes = 0; while (iterator.hasNext()) { final MessageAndOffset messageAndOffset = iterator.next();
@Override public void write(scala.collection.Iterator<Product2<K, V>> records) throws IOException { // Keep track of success so we know if we encountered an exception // We do this rather than a standard try/catch/re-throw to handle // generic throwables. boolean success = false; try { while (records.hasNext()) { insertRecordIntoSorter(records.next()); } closeAndWriteOutput(); success = true; } finally { if (sorter != null) { try { sorter.cleanupResources(); } catch (Exception e) { // Only throw this error if we won't be masking another // error. if (success) { throw e; } else { logger.error("In addition to a failure during writing, we failed during " + "cleanup.", e); } } } } }
@Override public void write(scala.collection.Iterator<Product2<K, V>> records) throws IOException { // Keep track of success so we know if we encountered an exception // We do this rather than a standard try/catch/re-throw to handle // generic throwables. boolean success = false; try { while (records.hasNext()) { insertRecordIntoSorter(records.next()); } closeAndWriteOutput(); success = true; } finally { if (sorter != null) { try { sorter.cleanupResources(); } catch (Exception e) { // Only throw this error if we won't be masking another // error. if (success) { throw e; } else { logger.error("In addition to a failure during writing, we failed during " + "cleanup.", e); } } } } }
public Result runBoon() { String result = null; String error = null; long time; Iterator<Object> query = null; long now = System.currentTimeMillis(); try { if (!optionAsValues) { throw new UnsupportedOperationException("Not supported!"); } io.gatling.jsonpath.JsonPath jsonPath = JsonPath$.MODULE$.compile(path).right().get(); JsonParser jsonParser = new JsonParserCharArray(); Object jsonModel = jsonParser.parse(json); query = jsonPath.query(jsonModel); } catch (Exception e) { error = getError(e); } finally { time = System.currentTimeMillis() - now; if (query != null) { List<Object> res = new ArrayList<Object>(); while (query.hasNext()) { res.add(query.next()); } ObjectMapper mapper = new ObjectMapperImpl(); result = mapper.toJson(res); } return new Result("boon", time, result, error); } }
private static void reassignPartitions(KafkaZkClient zkClient, Collection<Broker> brokers, String topic, int partitionCount, int replicationFactor) { scala.collection.mutable.ArrayBuffer<BrokerMetadata> brokersMetadata = new scala.collection.mutable.ArrayBuffer<>(brokers.size()); for (Broker broker : brokers) { brokersMetadata.$plus$eq(new BrokerMetadata(broker.id(), broker.rack())); } scala.collection.Map<Object, Seq<Object>> assignedReplicas = AdminUtils.assignReplicasToBrokers(brokersMetadata, partitionCount, replicationFactor, 0, 0); scala.collection.immutable.Map<TopicPartition, Seq<Object>> newAssignment = new scala.collection.immutable.HashMap<>(); scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = assignedReplicas.iterator(); while (it.hasNext()) { scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next(); TopicPartition tp = new TopicPartition(topic, (Integer) scalaTuple._1); newAssignment = newAssignment.$plus(new scala.Tuple2<>(tp, scalaTuple._2)); } scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic); scala.collection.Map<Object, scala.collection.Seq<Object>> currentAssignment = zkClient.getPartitionAssignmentForTopics(topicList).apply(topic); String currentAssignmentJson = formatAsReassignmentJson(topic, currentAssignment); String newAssignmentJson = formatAsReassignmentJson(topic, assignedReplicas); LOG.info("Reassign partitions for topic " + topic); LOG.info("Current partition replica assignment " + currentAssignmentJson); LOG.info("New partition replica assignment " + newAssignmentJson); zkClient.createPartitionReassignment(newAssignment); }
private static List<PartitionInfo> getPartitionInfo(KafkaZkClient zkClient, String topic) { scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic); scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments = zkClient.getPartitionAssignmentForTopics(topicList).apply(topic); List<PartitionInfo> partitionInfoList = new ArrayList<>(); scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator(); while (it.hasNext()) { scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next(); Integer partition = (Integer) scalaTuple._1(); scala.Option<Object> leaderOption = zkClient.getLeaderForPartition(new TopicPartition(topic, partition)); Node leader = leaderOption.isEmpty() ? null : new Node((Integer) leaderOption.get(), "", -1); Node[] replicas = new Node[scalaTuple._2().size()]; for (int i = 0; i < replicas.length; i++) { Integer brokerId = (Integer) scalaTuple._2().apply(i); replicas[i] = new Node(brokerId, "", -1); } partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null)); } return partitionInfoList; }
final Product2<K, V> record = records.next(); final K key = record._1(); partitionWriters[partitioner.getPartition(key)].write(key, record._2());
final Product2<K, V> record = records.next(); final K key = record._1(); partitionWriters[partitioner.getPartition(key)].write(key, record._2());
final Product2<K, V> record = records.next(); final K key = record._1(); partitionWriters[partitioner.getPartition(key)].write(key, record._2());
private List<Tuple2<Object, Object>> readRecordsFromFile() throws IOException { final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>(); long startOffset = 0; for (int i = 0; i < NUM_PARTITITONS; i++) { final long partitionSize = partitionSizesInMergedFile[i]; if (partitionSize > 0) { FileInputStream fin = new FileInputStream(mergedOutputFile); fin.getChannel().position(startOffset); InputStream in = new LimitedInputStream(fin, partitionSize); in = blockManager.serializerManager().wrapForEncryption(in); if (conf.getBoolean("spark.shuffle.compress", true)) { in = CompressionCodec$.MODULE$.createCodec(conf).compressedInputStream(in); } DeserializationStream recordsStream = serializer.newInstance().deserializeStream(in); Iterator<Tuple2<Object, Object>> records = recordsStream.asKeyValueIterator(); while (records.hasNext()) { Tuple2<Object, Object> record = records.next(); assertEquals(i, hashPartitioner.getPartition(record._1())); recordsList.add(record); } recordsStream.close(); startOffset += partitionSize; } } return recordsList; }
private List<Tuple2<Object, Object>> readRecordsFromFile() throws IOException { final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>(); long startOffset = 0; for (int i = 0; i < NUM_PARTITITONS; i++) { final long partitionSize = partitionSizesInMergedFile[i]; if (partitionSize > 0) { FileInputStream fin = new FileInputStream(mergedOutputFile); fin.getChannel().position(startOffset); InputStream in = new LimitedInputStream(fin, partitionSize); in = blockManager.serializerManager().wrapForEncryption(in); if (conf.getBoolean("spark.shuffle.compress", true)) { in = CompressionCodec$.MODULE$.createCodec(conf).compressedInputStream(in); } DeserializationStream recordsStream = serializer.newInstance().deserializeStream(in); Iterator<Tuple2<Object, Object>> records = recordsStream.asKeyValueIterator(); while (records.hasNext()) { Tuple2<Object, Object> record = records.next(); assertEquals(i, hashPartitioner.getPartition(record._1())); recordsList.add(record); } recordsStream.close(); startOffset += partitionSize; } } return recordsList; }
private List<Tuple2<Object, Object>> readRecordsFromFile() throws IOException { final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>(); long startOffset = 0; for (int i = 0; i < NUM_PARTITITONS; i++) { final long partitionSize = partitionSizesInMergedFile[i]; if (partitionSize > 0) { FileInputStream fin = new FileInputStream(mergedOutputFile); fin.getChannel().position(startOffset); InputStream in = new LimitedInputStream(fin, partitionSize); in = blockManager.serializerManager().wrapForEncryption(in); if (conf.getBoolean("spark.shuffle.compress", true)) { in = CompressionCodec$.MODULE$.createCodec(conf).compressedInputStream(in); } DeserializationStream recordsStream = serializer.newInstance().deserializeStream(in); Iterator<Tuple2<Object, Object>> records = recordsStream.asKeyValueIterator(); while (records.hasNext()) { Tuple2<Object, Object> record = records.next(); assertEquals(i, hashPartitioner.getPartition(record._1())); recordsList.add(record); } recordsStream.close(); startOffset += partitionSize; } } return recordsList; }
@Override public boolean next() { if (!iterator.hasNext()) { return false; } currentRow = iterator.next(); return true; }
protected List<String> group() { List<String> groups = new ArrayList<>(); scala.collection.immutable.List<GroupOverview> list = adminClient.listAllConsumerGroupsFlattened(); if (list == null) return groups; Iterator<GroupOverview> iterator = list.iterator(); while (iterator.hasNext()) { groups.add(iterator.next().groupId()); } return groups; }
public Iterator<UnsafeRow> sort(Iterator<UnsafeRow> inputIterator) throws IOException { while (inputIterator.hasNext()) { insertRow(inputIterator.next()); } return sort(); }
public Iterator<UnsafeRow> sort(Iterator<UnsafeRow> inputIterator) throws IOException { while (inputIterator.hasNext()) { insertRow(inputIterator.next()); } return sort(); }
@Override public void onTaskEnd(SparkListenerTaskEnd taskEnd) { Iterator<AccumulatorV2<?, ?>> iterator = taskEnd.taskMetrics().accumulators().iterator(); while (iterator.hasNext()) { AccumulatorV2 accumulator = iterator.next(); if (taskEnd.stageId() == 1 && accumulator.isRegistered() && accumulator.name().isDefined() && accumulator.name().get().equals("internal.metrics.shuffle.read.recordsRead")) { stageOneShuffleReadTaskRecordsCountMap.put(taskEnd.taskInfo().taskId(), (Long) accumulator.value()); } } } });