@Override public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { Settings settings = HadoopSettingsManager.loadFrom(job); Collection<PartitionDefinition> partitions = RestService.findPartitions(settings, log); EsInputSplit[] splits = new EsInputSplit[partitions.size()]; int index = 0; for (PartitionDefinition part : partitions) { splits[index++] = new EsInputSplit(part); } log.info(String.format("Created [%d] splits", splits.length)); return splits; }
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; LinkedHashMap copy = new LinkedHashMap(conf); copy.putAll(spoutConfig); StormSettings settings = new StormSettings(copy); InitializationUtils.setValueReaderIfNotSet(settings, JdkValueReader.class, log); ackReads = settings.getStormSpoutReliable(); if (ackReads) { inTransitQueue = new LinkedHashMap<Object, Object>(); replayQueue = new LinkedList<Object[]>(); retries = new HashMap<Object, Integer>(); queueSize = settings.getStormSpoutReliableQueueSize(); tupleRetries = settings.getStormSpoutReliableRetriesPerTuple(); tupleFailure = settings.getStormSpoutReliableTupleFailureHandling(); } int totalTasks = context.getComponentTasks(context.getThisComponentId()).size(); int currentTask = context.getThisTaskIndex(); // match the partitions based on the current topology List<PartitionDefinition> partitions = RestService.findPartitions(settings, log); List<PartitionDefinition> assigned = RestService.assignPartitions(partitions, currentTask, totalTasks); iterator = RestService.multiReader(settings, assigned, log); }
@Override public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { Settings settings = HadoopSettingsManager.loadFrom(job); Collection<PartitionDefinition> partitions = RestService.findPartitions(settings, log); EsInputSplit[] splits = new EsInputSplit[partitions.size()]; int index = 0; for (PartitionDefinition part : partitions) { splits[index++] = new EsInputSplit(part); } log.info(String.format("Created [%d] splits", splits.length)); return splits; }
@Override public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { Settings settings = HadoopSettingsManager.loadFrom(job); Collection<PartitionDefinition> partitions = RestService.findPartitions(settings, log); ShardInputSplit[] splits = new ShardInputSplit[partitions.size()]; int index = 0; for (PartitionDefinition part : partitions) { splits[index++] = new ShardInputSplit(part.nodeIp, part.nodePort, part.nodeId, part.nodeName, part.shardId, part.onlyNode, part.serializedMapping, part.serializedSettings); } log.info(String.format("Created [%d] shard-splits", splits.length)); return splits; }
@Override public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { Settings settings = HadoopSettingsManager.loadFrom(job); Collection<PartitionDefinition> partitions = RestService.findPartitions(settings, log); EsInputSplit[] splits = new EsInputSplit[partitions.size()]; int index = 0; for (PartitionDefinition part : partitions) { splits[index++] = new EsInputSplit(part); } log.info(String.format("Created [%d] splits", splits.length)); return splits; }
@Override public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { Settings settings = HadoopSettingsManager.loadFrom(job); Collection<PartitionDefinition> partitions = RestService.findPartitions(settings, log); EsInputSplit[] splits = new EsInputSplit[partitions.size()]; int index = 0; for (PartitionDefinition part : partitions) { splits[index++] = new EsInputSplit(part); } log.info(String.format("Created [%d] splits", splits.length)); return splits; }
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; LinkedHashMap copy = new LinkedHashMap(conf); copy.putAll(spoutConfig); StormSettings settings = new StormSettings(copy); InitializationUtils.setValueReaderIfNotSet(settings, JdkValueReader.class, log); ackReads = settings.getStormSpoutReliable(); if (ackReads) { inTransitQueue = new LinkedHashMap<Object, Object>(); replayQueue = new LinkedList<Object[]>(); retries = new HashMap<Object, Integer>(); queueSize = settings.getStormSpoutReliableQueueSize(); tupleRetries = settings.getStormSpoutReliableRetriesPerTuple(); tupleFailure = settings.getStormSpoutReliableTupleFailureHandling(); } int totalTasks = context.getComponentTasks(context.getThisComponentId()).size(); int currentTask = context.getThisTaskIndex(); // match the partitions based on the current topology List<PartitionDefinition> partitions = RestService.findPartitions(settings, log); List<PartitionDefinition> assigned = RestService.assignPartitions(partitions, currentTask, totalTasks); iterator = RestService.multiReader(settings, assigned, log); }