@Override public boolean next(K key, V value) throws IOException { if (scrollQuery == null) { if (beat != null) { beat.start(); } scrollQuery = queryBuilder.build(client, scrollReader); size = scrollQuery.getSize(); if (log.isTraceEnabled()) { log.trace(String.format("Received scroll [%s], size [%d] for query [%s]", scrollQuery, size, queryBuilder)); } } boolean hasNext = scrollQuery.hasNext(); if (!hasNext) { return false; } Object[] next = scrollQuery.next(); // NB: the left assignment is not needed since method override // the writable content however for consistency, they are below currentKey = setCurrentKey(key, next[0]); currentValue = setCurrentValue(value, next[1]); // keep on counting read++; return true; }
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
@Override public boolean next(K key, V value) throws IOException { if (scrollQuery == null) { if (beat != null) { beat.start(); } scrollQuery = queryBuilder.build(client, scrollReader); size = scrollQuery.getSize(); if (log.isTraceEnabled()) { log.trace(String.format("Received scroll [%s], size [%d] for query [%s]", scrollQuery, size, queryBuilder)); } } boolean hasNext = scrollQuery.hasNext(); if (!hasNext) { return false; } Object[] next = scrollQuery.next(); // NB: the left assignment is not needed since method override // the writable content however for consistency, they are below currentKey = setCurrentKey(key, next[0]); currentValue = setCurrentValue(value, next[1]); // keep on counting read++; return true; }
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
@Override public boolean next(K key, V value) throws IOException { if (scrollQuery == null) { if (beat != null) { beat.start(); } scrollQuery = queryBuilder.build(client, scrollReader); size = scrollQuery.getSize(); if (log.isTraceEnabled()) { log.trace(String.format("Received scroll [%s], size [%d] for query [%s]", scrollQuery, size, queryBuilder)); } } boolean hasNext = scrollQuery.hasNext(); if (!hasNext) { return false; } Object[] next = scrollQuery.next(); // NB: the left assignment is not needed since method override // the writable content however for consistency, they are below currentKey = setCurrentKey(key, next[0]); currentValue = setCurrentValue(value, next[1]); // keep on counting read++; return true; }
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
@Override public boolean next(K key, V value) throws IOException { if (scrollQuery == null) { if (beat != null) { beat.start(); } scrollQuery = queryBuilder.build(client, scrollReader); size = scrollQuery.getSize(); if (log.isTraceEnabled()) { log.trace(String.format("Received scroll [%s], size [%d] for query [%s]", scrollQuery, size, queryBuilder)); } } boolean hasNext = scrollQuery.hasNext(); if (!hasNext) { return false; } Object[] next = scrollQuery.next(); // NB: the left assignment is not needed since method override // the writable content however for consistency, they are below currentKey = setCurrentKey(key, next[0]); currentValue = setCurrentValue(value, next[1]); // keep on counting read++; return true; }
@Override public boolean next(K key, V value) throws IOException { if (scrollQuery == null) { if (beat != null) { beat.start(); } scrollQuery = queryBuilder.build(client, scrollReader); size = scrollQuery.getSize(); if (log.isTraceEnabled()) { log.trace(String.format("Received scroll [%s], size [%d] for query [%s]", scrollQuery, size, queryBuilder)); } } boolean hasNext = scrollQuery.hasNext(); if (!hasNext) { return false; } Object[] next = scrollQuery.next(); // NB: the left assignment is not needed since method override // the writable content however for consistency, they are below currentKey = setCurrentKey(key, next[0]); currentValue = setCurrentValue(value, next[1]); // keep on counting read++; return true; }