private void lazyInitWrite() { if (!initialized) { this.initialized = true; this.writeClient = RestService.createWriter(clientSettings, -1, 0, LOG).repository; } }
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; LinkedHashMap copy = new LinkedHashMap(conf); copy.putAll(spoutConfig); StormSettings settings = new StormSettings(copy); InitializationUtils.setValueReaderIfNotSet(settings, JdkValueReader.class, log); ackReads = settings.getStormSpoutReliable(); if (ackReads) { inTransitQueue = new LinkedHashMap<Object, Object>(); replayQueue = new LinkedList<Object[]>(); retries = new HashMap<Object, Integer>(); queueSize = settings.getStormSpoutReliableQueueSize(); tupleRetries = settings.getStormSpoutReliableRetriesPerTuple(); tupleFailure = settings.getStormSpoutReliableTupleFailureHandling(); } int totalTasks = context.getComponentTasks(context.getThisComponentId()).size(); int currentTask = context.getThisTaskIndex(); // match the partitions based on the current topology List<PartitionDefinition> partitions = RestService.findPartitions(settings, log); List<PartitionDefinition> assigned = RestService.assignPartitions(partitions, currentTask, totalTasks); iterator = RestService.multiReader(settings, assigned, log); }
@Test public void testNoAlias() throws IOException { for (EsMajorVersion version : ES_VERSIONS) { Map<String, Object> map = MAPPER.readValue(getClass().getResourceAsStream("get-aliases-empty-response.json"), TreeMap.class); GetAliasesRequestBuilder.Response response = new GetAliasesRequestBuilder.Response(map); Map<String, IndicesAliases.Alias> aliases = response.getIndices().getAliases("index1"); SearchRequestBuilder searchRequest = new SearchRequestBuilder(version, false); RestService.applyAliasMetadata(version, aliases, searchRequest, "index1", "alias1"); assertNull(searchRequest.query()); assertNull(searchRequest.routing()); } }
@Test public void testEmpty() { Settings settings = new PropertiesSettings(); settings.setMaxDocsPerPartition(10000); settings.setInternalVersion(EsMajorVersion.LATEST); settings.setProperty(ES_RESOURCE_READ, "_all"); assertEquals(RestService.findShardPartitions(settings, null, Collections.<String, NodeInfo>emptyMap(), Collections.<List<Map<String,Object>>>emptyList(), LOGGER).size(), 0); assertEquals(RestService.findSlicePartitions(null, settings, null, Collections.<String, NodeInfo>emptyMap(), Collections.<List<Map<String,Object>>>emptyList(), LOGGER).size(), 0); }
repository = initMultiIndices(settings, currentSplit, resource, log); } else { repository = initAliasWrite(response, settings, currentSplit, resource, log); } else { repository = initSingleIndex(settings, currentSplit, resource, log);
String pinAddress = checkLocality(partition.getLocations(), log); if (pinAddress != null) { if (log.isDebugEnabled()) { Map<String, IndicesAliases.Alias> aliases = indicesAliases.getAliases(partition.getIndex()); if (aliases != null && aliases.size() > 0) { requestBuilder = applyAliasMetadata(version, aliases, requestBuilder, partition.getIndex(), indices);
private ScrollQuery getCurrent() { if (finished) { return null; } for (boolean hasValue = false; !hasValue; ) { if (currentReader == null) { if (definitionIterator.hasNext()) { currentReader = RestService.createReader(settings, definitionIterator.next(), log); } else { finished = true; return null; } } if (currentScroll == null) { currentScroll = currentReader.scrollQuery(); } hasValue = currentScroll.hasNext(); if (!hasValue) { currentScroll.close(); currentScroll = null; currentReader.close(); currentReader = null; } } return currentScroll; }
@Override public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { Settings settings = HadoopSettingsManager.loadFrom(job); Collection<PartitionDefinition> partitions = RestService.findPartitions(settings, log); EsInputSplit[] splits = new EsInputSplit[partitions.size()]; int index = 0; for (PartitionDefinition part : partitions) { splits[index++] = new EsInputSplit(part); } log.info(String.format("Created [%d] splits", splits.length)); return splits; }
public static PartitionWriter createWriter(Settings settings, int currentSplit, int totalSplits, Log log) { Version.logVersion(); InitializationUtils.validateSettings(settings); InitializationUtils.discoverEsVersion(settings, log); InitializationUtils.discoverNodesIfNeeded(settings, log); InitializationUtils.filterNonClientNodesIfNeeded(settings, log); InitializationUtils.filterNonDataNodesIfNeeded(settings, log); List<String> nodes = SettingsUtils.discoveredOrDeclaredNodes(settings); // check invalid splits (applicable when running in non-MR environments) - in this case fall back to Random.. int selectedNode = (currentSplit < 0) ? new Random().nextInt(nodes.size()) : currentSplit % nodes.size(); // select the appropriate nodes first, to spread the load before-hand SettingsUtils.pinNode(settings, nodes.get(selectedNode)); Resource resource = new Resource(settings, false); log.info(String.format("Writing to [%s]", resource)); // single index vs multi indices IndexExtractor iformat = ObjectUtils.instantiate(settings.getMappingIndexExtractorClassName(), settings); iformat.compile(resource.toString()); RestRepository repository = (iformat.hasPattern() ? initMultiIndices(settings, currentSplit, resource, log) : initSingleIndex(settings, currentSplit, resource, log)); return new PartitionWriter(settings, currentSplit, totalSplits, repository); }
@Test public void testShardPartitions() throws IOException { List<List<Map<String, Object>>> shards = MAPPER.readValue(getClass().getResourceAsStream("search-shards-response.json"), ArrayList.class); List<PartitionDefinition> partitions = RestService.findShardPartitions(null, null, Collections.<String, NodeInfo>emptyMap(), shards, LOGGER); Collections.sort(partitions); assertEquals(partitions.size(), 34); assertEquals(new HashSet(partitions).size(), 34); assertArrayEquals(partitions.toArray(), EXPECTED_SHARDS_PARTITIONS); }
List<PartitionDefinition> partitions = RestService.findSlicePartitions(client, settings, null, Collections.<String, NodeInfo>emptyMap(), shards, LOGGER); List<PartitionDefinition> partitions = RestService.findSlicePartitions(client, settings, null, Collections.<String, NodeInfo>emptyMap(), shards, LOGGER); List<PartitionDefinition> partitions = RestService.findSlicePartitions(client, settings, null, Collections.<String, NodeInfo>emptyMap(), shards, LOGGER); List<PartitionDefinition> partitions = RestService.findSlicePartitions(client, settings, null, Collections.<String, NodeInfo>emptyMap(), shards, LOGGER); assertEquals(partitions.size(), 34);
repository = initMultiIndices(settings, currentSplit, resource, log); } else { repository = initAliasWrite(response, settings, currentSplit, resource, log); } else { repository = initSingleIndex(settings, currentSplit, resource, log);
partitions = findSlicePartitions(client.getRestClient(), settings, mapping, nodesMap, shards, log); } else { partitions = findShardPartitions(settings, mapping, nodesMap, shards, log);
String pinAddress = checkLocality(partition.getLocations(), log); if (pinAddress != null) { if (log.isDebugEnabled()) { Map<String, IndicesAliases.Alias> aliases = indicesAliases.getAliases(partition.getIndex()); if (aliases != null && aliases.size() > 0) { requestBuilder = applyAliasMetadata(version, aliases, requestBuilder, partition.getIndex(), indices);
void init(EsInputSplit esSplit, Configuration cfg, Progressable progressable) { // get a copy to override the host/port Settings settings = HadoopSettingsManager.loadFrom(cfg).copy().load(esSplit.getPartition().getSerializedSettings()); if (log.isTraceEnabled()) { log.trace(String.format("Init shard reader from cfg %s", HadoopCfgUtils.asProperties(cfg))); log.trace(String.format("Init shard reader w/ settings %s", settings)); } this.esSplit = esSplit; // initialize mapping/ scroll reader InitializationUtils.setValueReaderIfNotSet(settings, WritableValueReader.class, log); PartitionDefinition part = esSplit.getPartition(); PartitionReader partitionReader = RestService.createReader(settings, part, log); this.scrollReader = partitionReader.scrollReader; this.client = partitionReader.client; this.queryBuilder = partitionReader.queryBuilder; this.progressable = progressable; // in Hadoop-like envs (Spark) the progressable might be null and thus the heart-beat is not needed if (progressable != null) { beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); } if (log.isDebugEnabled()) { log.debug(String.format("Initializing RecordReader for [%s]", esSplit)); } }
@Override public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { Settings settings = HadoopSettingsManager.loadFrom(job); Collection<PartitionDefinition> partitions = RestService.findPartitions(settings, log); EsInputSplit[] splits = new EsInputSplit[partitions.size()]; int index = 0; for (PartitionDefinition part : partitions) { splits[index++] = new EsInputSplit(part); } log.info(String.format("Created [%d] splits", splits.length)); return splits; }
repository = initMultiIndices(settings, currentSplit, resource, log); } else { repository = initAliasWrite(response, settings, currentSplit, resource, log); } else { repository = initSingleIndex(settings, currentSplit, resource, log);
partitions = findSlicePartitions(client.getRestClient(), settings, mapping, nodesMap, shards, log); } else { partitions = findShardPartitions(settings, mapping, nodesMap, shards, log);
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
String pinAddress = checkLocality(partition.getLocations(), log); if (pinAddress != null) { if (log.isDebugEnabled()) { Map<String, IndicesAliases.Alias> aliases = indicesAliases.getAliases(partition.getIndex()); if (aliases != null && aliases.size() > 0) { requestBuilder = applyAliasMetadata(version, aliases, requestBuilder, partition.getIndex(), indices);