/** * Central place to set common split configuration not handled by split constructors. The * intention is to make it harder to miss optional setters in future refactor. */ public static void updateSplit(org.apache.accumulo.core.client.mapreduce.RangeInputSplit split, org.apache.accumulo.core.client.mapreduce.InputTableConfig tableConfig, Level logLevel) { split.setFetchedColumns(tableConfig.getFetchedColumns()); split.setIterators(tableConfig.getIterators()); split.setLogLevel(logLevel); split.setSamplerConfiguration(tableConfig.getSamplerConfiguration()); }
@Override protected List<IteratorSetting> contextIterators(TaskAttemptContext context, String tableName) { return getInputTableConfig(context, tableName).getIterators(); } };
public RangeInputSplit(RangeInputSplit split) throws IOException { this.range = split.getRange(); this.setLocations(split.getLocations()); this.setTableName(split.getTableName()); this.setTableId(split.getTableId()); }
/** * Use {@link #getTableName} * * @deprecated since 1.6.1, use getTableName() instead. */ @Deprecated public String getTable() { return getTableName(); }
@Override public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IOException { super.initialize(inSplit, attempt); rowIterator = new RowIterator(scannerIterator); currentK = new Text(); currentV = null; }
public org.apache.accumulo.core.client.Instance getInstance( org.apache.accumulo.core.client.ClientConfiguration base) { if (null == instanceName) { return null; } if (null == zooKeepers) { return null; } return new org.apache.accumulo.core.client.ZooKeeperInstance( base.withInstance(getInstanceName()).withZkHosts(getZooKeepers())); }
@Override public float getProgress() throws IOException { if (numKeysRead > 0 && currentKey == null) return 1.0f; return split.getProgress(currentKey); }
/** * Creates a batch scan config object out of a previously serialized batch scan config object. * * @param input * the data input of the serialized batch scan config */ public InputTableConfig(DataInput input) throws IOException { readFields(input); }
protected RangeInputSplit(String table, String tableId, Range range, String[] locations) { this.range = range; setLocations(locations); this.tableName = table; this.tableId = tableId; }
@Override protected List<IteratorSetting> contextIterators(TaskAttemptContext context, String tableName) { return getIterators(context); }
/** * Use {@link #setTableName} * * @deprecated since 1.6.1, use setTableName() instead. */ @Deprecated public void setTable(String table) { setTableName(table); }
@Override public RecordWriter<Text,Mutation> getRecordWriter(TaskAttemptContext attempt) throws IOException { try { return new AccumuloRecordWriter(attempt); } catch (Exception e) { throw new IOException(e); } }
/** * Configures the iterators on a scanner for the given table name. * * @param context * the Hadoop context for the configured job * @param scanner * the scanner for which to configure the iterators * @param tableName * the table name for which the scanner is configured * @since 1.6.0 * @deprecated since 1.7.0; Use {@link #contextIterators} instead. */ @Deprecated protected void setupIterators(TaskAttemptContext context, Scanner scanner, String tableName, RangeInputSplit split) { setupIterators(context, (ScannerBase) scanner, tableName, split); }
@Override public String toString() { StringBuilder sb = new StringBuilder(256); sb.append("BatchInputSplit:"); sb.append(" Ranges: ").append(Arrays.asList(ranges)); sb.append(super.toString()); return sb.toString(); }
/** * Gets the serialized token from either the configuration or the token file. * * @since 1.5.0 * @deprecated since 1.6.0; Use {@link #getAuthenticationToken(JobContext)} instead. */ @Deprecated protected static byte[] getToken(JobContext context) { return AuthenticationTokenSerializer.serialize(getAuthenticationToken(context)); }
/** * Initialize a scanner over the given input split using this task attempt configuration. * * @deprecated since 1.7.0; Use {@link #contextIterators} instead. */ @Deprecated protected void setupIterators(TaskAttemptContext context, Scanner scanner, org.apache.accumulo.core.client.mapreduce.RangeInputSplit split) { setupIterators(context, scanner, null, split); } }
/** * Gets the serialized token from either the configuration or the token file. * * @since 1.5.0 * @deprecated since 1.6.0; Use {@link #getAuthenticationToken(JobContext)} instead. */ @Deprecated protected static byte[] getToken(JobContext job) { return AuthenticationToken.AuthenticationTokenSerializer.serialize(getAuthenticationToken(job)); }
/** * @see #getInstance(org.apache.accumulo.core.client.ClientConfiguration) * @deprecated since 1.7.0, use getInstance(ClientConfiguration) instead. */ @Deprecated public org.apache.accumulo.core.client.Instance getInstance() { return getInstance(org.apache.accumulo.core.client.ClientConfiguration.loadDefault()); }
@Override public float getProgress() throws IOException { if (numKeysRead > 0 && currentKey == null) return 1.0f; return baseSplit.getProgress(currentKey); }
/** * Apply the configured iterators from the configuration to the scanner. * * @param context * the Hadoop context for the configured job * @param scanner * the scanner to configure * @deprecated since 1.7.0; Use {@link #contextIterators} instead. */ @Deprecated protected void setupIterators(TaskAttemptContext context, Scanner scanner) { // tableName is given as null as it will be ignored in eventual call to #contextIterators setupIterators(context, scanner, null, null); }