private ScrollQuery getCurrent() { if (finished) { return null; } for (boolean hasValue = false; !hasValue; ) { if (currentReader == null) { if (definitionIterator.hasNext()) { currentReader = RestService.createReader(settings, definitionIterator.next(), log); } else { finished = true; return null; } } if (currentScroll == null) { currentScroll = currentReader.scrollQuery(); } hasValue = currentScroll.hasNext(); if (!hasValue) { currentScroll.close(); currentScroll = null; currentReader.close(); currentReader = null; } } return currentScroll; }
void init(EsInputSplit esSplit, Configuration cfg, Progressable progressable) { // get a copy to override the host/port Settings settings = HadoopSettingsManager.loadFrom(cfg).copy().load(esSplit.getPartition().getSerializedSettings()); if (log.isTraceEnabled()) { log.trace(String.format("Init shard reader from cfg %s", HadoopCfgUtils.asProperties(cfg))); log.trace(String.format("Init shard reader w/ settings %s", settings)); } this.esSplit = esSplit; // initialize mapping/ scroll reader InitializationUtils.setValueReaderIfNotSet(settings, WritableValueReader.class, log); PartitionDefinition part = esSplit.getPartition(); PartitionReader partitionReader = RestService.createReader(settings, part, log); this.scrollReader = partitionReader.scrollReader; this.client = partitionReader.client; this.queryBuilder = partitionReader.queryBuilder; this.progressable = progressable; // in Hadoop-like envs (Spark) the progressable might be null and thus the heart-beat is not needed if (progressable != null) { beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); } if (log.isDebugEnabled()) { log.debug(String.format("Initializing RecordReader for [%s]", esSplit)); } }
private ScrollQuery getCurrent() { if (finished) { return null; } for (boolean hasValue = false; !hasValue; ) { if (currentReader == null) { if (definitionIterator.hasNext()) { currentReader = RestService.createReader(settings, definitionIterator.next(), log); } else { finished = true; return null; } } if (currentScroll == null) { currentScroll = currentReader.scrollQuery(); } hasValue = currentScroll.hasNext(); if (!hasValue) { currentScroll.close(); currentScroll = null; currentReader.close(); currentReader = null; } } return currentScroll; }
void init(EsInputSplit esSplit, Configuration cfg, Progressable progressable) { // get a copy to override the host/port Settings settings = HadoopSettingsManager.loadFrom(cfg).copy().load(esSplit.getPartition().getSerializedSettings()); if (log.isTraceEnabled()) { log.trace(String.format("Init shard reader from cfg %s", HadoopCfgUtils.asProperties(cfg))); log.trace(String.format("Init shard reader w/ settings %s", settings)); } this.esSplit = esSplit; // initialize mapping/ scroll reader InitializationUtils.setValueReaderIfNotSet(settings, WritableValueReader.class, log); PartitionDefinition part = esSplit.getPartition(); PartitionReader partitionReader = RestService.createReader(settings, part, log); this.scrollReader = partitionReader.scrollReader; this.client = partitionReader.client; this.queryBuilder = partitionReader.queryBuilder; this.progressable = progressable; // in Hadoop-like envs (Spark) the progressable might be null and thus the heart-beat is not needed if (progressable != null) { beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); } if (log.isDebugEnabled()) { log.debug(String.format("Initializing RecordReader for [%s]", esSplit)); } }
void init(ShardInputSplit esSplit, Configuration cfg, Progressable progressable) { // get a copy to override the host/port Settings settings = HadoopSettingsManager.loadFrom(cfg).copy().load(esSplit.settings); if (log.isTraceEnabled()) { log.trace(String.format("Init shard reader from cfg %s", HadoopCfgUtils.asProperties(cfg))); log.trace(String.format("Init shard reader w/ settings %s", esSplit.settings)); } this.esSplit = esSplit; // initialize mapping/ scroll reader InitializationUtils.setValueReaderIfNotSet(settings, WritableValueReader.class, log); PartitionDefinition part = new PartitionDefinition(esSplit.nodeIp, esSplit.httpPort, esSplit.nodeName, esSplit.nodeId, esSplit.shardId, esSplit.onlyNode, settings.save(), esSplit.mapping); PartitionReader partitionReader = RestService.createReader(settings, part, log); this.scrollReader = partitionReader.scrollReader; this.client = partitionReader.client; this.queryBuilder = partitionReader.queryBuilder; this.progressable = progressable; // in Hadoop-like envs (Spark) the progressable might be null and thus the heart-beat is not needed if (progressable != null) { beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); } if (log.isDebugEnabled()) { log.debug(String.format("Initializing RecordReader for [%s]", esSplit)); } }
private ScrollQuery getCurrent() { if (finished) { return null; } for (boolean hasValue = false; !hasValue; ) { if (currentReader == null) { if (definitionIterator.hasNext()) { currentReader = RestService.createReader(settings, definitionIterator.next(), log); } else { finished = true; return null; } } if (currentScroll == null) { currentScroll = currentReader.scrollQuery(); } hasValue = currentScroll.hasNext(); if (!hasValue) { currentScroll.close(); currentScroll = null; currentReader.close(); currentReader = null; } } return currentScroll; }
private ScrollQuery getCurrent() { if (finished) { return null; } for (boolean hasValue = false; !hasValue; ) { if (currentReader == null) { if (definitionIterator.hasNext()) { currentReader = RestService.createReader(settings, definitionIterator.next(), log); } else { finished = true; return null; } } if (currentScroll == null) { currentScroll = currentReader.scrollQuery(); } hasValue = currentScroll.hasNext(); if (!hasValue) { currentScroll.close(); currentScroll = null; currentReader.close(); currentReader = null; } } return currentScroll; }
private ScrollQuery getCurrent() { if (finished) { return null; } for (boolean hasValue = false; !hasValue;) { if (currentReader == null) { if (definitionIterator.hasNext()) { currentReader = RestService.createReader(settings, definitionIterator.next(), log); } else { finished = true; return null; } } if (currentScroll == null) { currentScroll = currentReader.scrollQuery(); } hasValue = currentScroll.hasNext(); if (!hasValue) { currentScroll.close(); currentScroll = null; currentReader.close(); currentReader = null; } } return currentScroll; }
void init(EsInputSplit esSplit, Configuration cfg, Progressable progressable) { // get a copy to override the host/port Settings settings = HadoopSettingsManager.loadFrom(cfg).copy().load(esSplit.getPartition().getSerializedSettings()); if (log.isTraceEnabled()) { log.trace(String.format("Init shard reader from cfg %s", HadoopCfgUtils.asProperties(cfg))); log.trace(String.format("Init shard reader w/ settings %s", settings)); } this.esSplit = esSplit; // initialize mapping/ scroll reader InitializationUtils.setValueReaderIfNotSet(settings, WritableValueReader.class, log); PartitionDefinition part = esSplit.getPartition(); PartitionReader partitionReader = RestService.createReader(settings, part, log); this.scrollReader = partitionReader.scrollReader; this.client = partitionReader.client; this.queryBuilder = partitionReader.queryBuilder; this.progressable = progressable; // in Hadoop-like envs (Spark) the progressable might be null and thus the heart-beat is not needed if (progressable != null) { beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); } if (log.isDebugEnabled()) { log.debug(String.format("Initializing RecordReader for [%s]", esSplit)); } }
void init(EsInputSplit esSplit, Configuration cfg, Progressable progressable) { // get a copy to override the host/port Settings settings = HadoopSettingsManager.loadFrom(cfg).copy().load(esSplit.getPartition().getSerializedSettings()); if (log.isTraceEnabled()) { log.trace(String.format("Init shard reader from cfg %s", HadoopCfgUtils.asProperties(cfg))); log.trace(String.format("Init shard reader w/ settings %s", settings)); } this.esSplit = esSplit; // initialize mapping/ scroll reader InitializationUtils.setValueReaderIfNotSet(settings, WritableValueReader.class, log); PartitionDefinition part = esSplit.getPartition(); PartitionReader partitionReader = RestService.createReader(settings, part, log); this.scrollReader = partitionReader.scrollReader; this.client = partitionReader.client; this.queryBuilder = partitionReader.queryBuilder; this.progressable = progressable; // in Hadoop-like envs (Spark) the progressable might be null and thus the heart-beat is not needed if (progressable != null) { beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); } if (log.isDebugEnabled()) { log.debug(String.format("Initializing RecordReader for [%s]", esSplit)); } }