public EsInputRecordReader(org.apache.hadoop.mapred.InputSplit split, Configuration job, Reporter reporter) { reporter.setStatus(split.toString()); init((EsInputSplit) split, job, reporter); }
@Override public boolean next(K key, V value) throws IOException { if (scrollQuery == null) { if (beat != null) { beat.start(); } scrollQuery = queryBuilder.build(client, scrollReader); size = scrollQuery.getSize(); if (log.isTraceEnabled()) { log.trace(String.format("Received scroll [%s], size [%d] for query [%s]", scrollQuery, size, queryBuilder)); } } boolean hasNext = scrollQuery.hasNext(); if (!hasNext) { return false; } Object[] next = scrollQuery.next(); // NB: the left assignment is not needed since method override // the writable content however for consistency, they are below currentKey = setCurrentKey(key, next[0]); currentValue = setCurrentValue(value, next[1]); // keep on counting read++; return true; }
@Override public boolean next(K key, V value) throws IOException { if (scrollQuery == null) { if (beat != null) { beat.start(); } scrollQuery = queryBuilder.build(client, scrollReader); size = scrollQuery.getSize(); if (log.isTraceEnabled()) { log.trace(String.format("Received scroll [%s], size [%d] for query [%s]", scrollQuery, size, queryBuilder)); } } boolean hasNext = scrollQuery.hasNext(); if (!hasNext) { return false; } Object[] next = scrollQuery.next(); // NB: the left assignment is not needed since method override // the writable content however for consistency, they are below currentKey = setCurrentKey(key, next[0]); currentValue = setCurrentValue(value, next[1]); // keep on counting read++; return true; }
@Override public boolean next(K key, V value) throws IOException { if (scrollQuery == null) { if (beat != null) { beat.start(); } scrollQuery = queryBuilder.build(client, scrollReader); size = scrollQuery.getSize(); if (log.isTraceEnabled()) { log.trace(String.format("Received scroll [%s], size [%d] for query [%s]", scrollQuery, size, queryBuilder)); } } boolean hasNext = scrollQuery.hasNext(); if (!hasNext) { return false; } Object[] next = scrollQuery.next(); // NB: the left assignment is not needed since method override // the writable content however for consistency, they are below currentKey = setCurrentKey(key, next[0]); currentValue = setCurrentValue(value, next[1]); // keep on counting read++; return true; }
@Override public boolean next(K key, V value) throws IOException { if (scrollQuery == null) { if (beat != null) { beat.start(); } scrollQuery = queryBuilder.build(client, scrollReader); size = scrollQuery.getSize(); if (log.isTraceEnabled()) { log.trace(String.format("Received scroll [%s], size [%d] for query [%s]", scrollQuery, size, queryBuilder)); } } boolean hasNext = scrollQuery.hasNext(); if (!hasNext) { return false; } Object[] next = scrollQuery.next(); // NB: the left assignment is not needed since method override // the writable content however for consistency, they are below currentKey = setCurrentKey(key, next[0]); currentValue = setCurrentValue(value, next[1]); // keep on counting read++; return true; }
public EsInputRecordReader(org.apache.hadoop.mapred.InputSplit split, Configuration job, Reporter reporter) { reporter.setStatus(split.toString()); init((EsInputSplit) split, job, reporter); }
public EsInputRecordReader(org.apache.hadoop.mapred.InputSplit split, Configuration job, Reporter reporter) { reporter.setStatus(split.toString()); init((EsInputSplit) split, job, reporter); }
@Override public float getProgress() { return size == 0 ? 0 : ((float) getPos()) / size; }
public EsInputRecordReader(org.apache.hadoop.mapred.InputSplit split, Configuration job, Reporter reporter) { reporter.setStatus(split.toString()); init((EsInputSplit) split, job, reporter); }
@Override public void initialize(InputSplit split, TaskAttemptContext context) throws IOException { org.elasticsearch.hadoop.mr.compat.TaskAttemptContext compatContext = CompatHandler.taskAttemptContext(context); compatContext.setStatus(split.toString()); init((EsInputSplit) split, compatContext.getConfiguration(), compatContext); }
@Override public boolean nextKeyValue() throws IOException { // new API call routed to old API // under the new API always create new objects since consumers can (and sometimes will) modify them currentKey = createKey(); currentValue = createValue(); return next(currentKey, currentValue); }
@Override public void initialize(InputSplit split, TaskAttemptContext context) throws IOException { org.elasticsearch.hadoop.mr.compat.TaskAttemptContext compatContext = CompatHandler.taskAttemptContext(context); compatContext.setStatus(split.toString()); init((EsInputSplit) split, compatContext.getConfiguration(), compatContext); }
@Override public void initialize(InputSplit split, TaskAttemptContext context) throws IOException { org.elasticsearch.hadoop.mr.compat.TaskAttemptContext compatContext = CompatHandler.taskAttemptContext(context); compatContext.setStatus(split.toString()); init((EsInputSplit) split, compatContext.getConfiguration(), compatContext); }
@Override public float getProgress() { return size == 0 ? 0 : ((float) getPos()) / size; }
@Override public float getProgress() { return size == 0 ? 0 : ((float) getPos()) / size; }
@Override public boolean nextKeyValue() throws IOException { // new API call routed to old API // under the new API always create new objects since consumers can (and sometimes will) modify them currentKey = createKey(); currentValue = createValue(); return next(currentKey, currentValue); }
@Override public boolean nextKeyValue() throws IOException { // new API call routed to old API // under the new API always create new objects since consumers can (and sometimes will) modify them currentKey = createKey(); currentValue = createValue(); return next(currentKey, currentValue); }
@Override public float getProgress() { return size == 0 ? 0 : ((float) getPos()) / size; }
@Override public boolean nextKeyValue() throws IOException { // new API call routed to old API // under the new API always create new objects since consumers can (and sometimes will) modify them currentKey = createKey(); currentValue = createValue(); return next(currentKey, currentValue); }
@Override public void initialize(InputSplit split, TaskAttemptContext context) throws IOException { org.elasticsearch.hadoop.mr.compat.TaskAttemptContext compatContext = CompatHandler.taskAttemptContext(context); compatContext.setStatus(split.toString()); init((EsInputSplit) split, compatContext.getConfiguration(), compatContext); }