Authorizations authorizations = baseSplit.getAuths(); if (null == authorizations) { authorizations = getScanAuthorizations(job); String table = baseSplit.getTableName(); job, baseSplit.getTableName()); scanner = client.createBatchScanner(baseSplit.getTableName(), authorizations, scanThreads); setupIterators(job, scanner, baseSplit.getTableName(), baseSplit); if (classLoaderContext != null) { scanner.setClassLoaderContext(classLoaderContext); Boolean isOffline = baseSplit.isOffline(); if (isOffline == null) { isOffline = tableConfig.isOfflineScan(); Boolean isIsolated = baseSplit.isIsolatedScan(); if (isIsolated == null) { isIsolated = tableConfig.shouldUseIsolatedScanners(); Boolean usesLocalIterators = baseSplit.usesLocalIterators(); if (usesLocalIterators == null) { usesLocalIterators = tableConfig.shouldUseLocalIterators(); scanner = new OfflineScanner(client, Table.ID.of(baseSplit.getTableId()), authorizations); } else {
if (autoAdjust) { RangeInputSplit split = new RangeInputSplit(tableName, tableId.canonicalID(), ke.clip(r), new String[] {location}); org.apache.accumulo.core.clientImpl.mapreduce.SplitUtils.updateSplit(split, tableConfig, logLevel); split.setOffline(tableConfig.isOfflineScan()); split.setIsolatedScan(tableConfig.shouldUseIsolatedScanners()); split.setUsesLocalIterators(tableConfig.shouldUseLocalIterators()); splits.add(split); } else { RangeInputSplit split = new RangeInputSplit(tableName, tableId.canonicalID(), entry.getKey(), entry.getValue().toArray(new String[0])); org.apache.accumulo.core.clientImpl.mapreduce.SplitUtils.updateSplit(split, tableConfig, logLevel); split.setOffline(tableConfig.isOfflineScan()); split.setIsolatedScan(tableConfig.shouldUseIsolatedScanners()); split.setUsesLocalIterators(tableConfig.shouldUseLocalIterators());
/** * Configures the iterators on a scanner for the given table name. * * @param job * the Hadoop job configuration * @param scanner * the scanner for which to configure the iterators * @param tableName * the table name for which the scanner is configured * @since 1.7.0 */ private void setupIterators(JobConf job, ScannerBase scanner, String tableName, org.apache.accumulo.core.client.mapreduce.RangeInputSplit split) { List<IteratorSetting> iterators = null; if (split == null) { iterators = jobIterators(job, tableName); } else { iterators = split.getIterators(); if (iterators == null) { iterators = jobIterators(job, tableName); } } for (IteratorSetting iterator : iterators) scanner.addScanIterator(iterator); }
public RangeInputSplit(RangeInputSplit split) throws IOException { this.range = split.getRange(); this.setLocations(split.getLocations()); this.setTableName(split.getTableName()); this.setTableId(split.getTableId()); }
public org.apache.accumulo.core.client.Instance getInstance( org.apache.accumulo.core.client.ClientConfiguration base) { if (null == instanceName) { return null; } if (null == zooKeepers) { return null; } return new org.apache.accumulo.core.client.ZooKeeperInstance( base.withInstance(getInstanceName()).withZkHosts(getZooKeepers())); }
log.debug("Initializing input split: " + baseSplit.toString()); Instance instance = baseSplit.getInstance(getClientConfiguration(job)); if (null == instance) { instance = getInstance(job); String principal = baseSplit.getPrincipal(); if (null == principal) { principal = getPrincipal(job); AuthenticationToken token = baseSplit.getToken(); if (null == token) { token = getAuthenticationToken(job); Authorizations authorizations = baseSplit.getAuths(); if (null == authorizations) { authorizations = getScanAuthorizations(job); String table = baseSplit.getTableName(); InputTableConfig tableConfig = getInputTableConfig(job, baseSplit.getTableName()); .createBatchScanner(baseSplit.getTableName(), authorizations, scanThreads); setupIterators(job, scanner, baseSplit.getTableName(), baseSplit); if (null != classLoaderContext) { scanner.setClassLoaderContext(classLoaderContext); Boolean isOffline = baseSplit.isOffline(); if (null == isOffline) {
Assert.assertEquals(getAdminPrincipal(), risplit.getPrincipal()); Assert.assertEquals(table, risplit.getTableName()); Assert.assertEquals(getAdminToken(), risplit.getToken()); Assert.assertEquals(auths, risplit.getAuths()); Assert.assertEquals(getConnector().getInstance().getInstanceName(), risplit.getInstanceName()); Assert.assertEquals(isolated, risplit.isIsolatedScan()); Assert.assertEquals(localIters, risplit.usesLocalIterators()); Assert.assertEquals(fetchColumns, risplit.getFetchedColumns()); Assert.assertEquals(level, risplit.getLogLevel());
private static Scanner setupScanner(final Context context, final String tableName, final Configuration config) throws IOException { final RangeInputSplit split = (RangeInputSplit) context.getInputSplit(); final Range splitRange = split.getRange(); final Scanner scanner = AccumuloRyaUtils.getScanner(tableName, config); scanner.setRange(splitRange); return scanner; }
final Configuration conf = context.getConfiguration(); split = (RangeInputSplit) context.getInputSplit(); final Range range = split.getRange(); parentTableName = split.getTableName(); parentTablePrefix = conf.get(MRUtils.TABLE_PREFIX_PROPERTY); for (final TABLE_LAYOUT layout : TABLE_LAYOUT.values()) {
(org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split; Level level = accSplit.getLogLevel(); if (level != null) { log.setLevel(level);
/** * Use {@link #getTableName} * * @deprecated since 1.6.1, use getTableName() instead. */ @Deprecated public String getTable() { return getTableName(); }
public Instance getInstance(ClientConfiguration base) { if (null == instanceName) { return null; } if (isMockInstance()) { return DeprecationUtil.makeMockInstance(getInstanceName()); } if (null == zooKeepers) { return null; } return new ZooKeeperInstance(base.withInstance(getInstanceName()).withZkHosts(getZooKeepers())); }
@Override public List<InputSplit> getSplits(JobContext context) throws IOException { List<InputSplit> oldSplits = super.getSplits(context); List<InputSplit> newSplits = new ArrayList<>(oldSplits.size()); // Copy only the necessary information for (InputSplit oldSplit : oldSplits) { // @formatter:off org.apache.accumulo.core.client.mapreduce.RangeInputSplit newSplit = new org.apache.accumulo.core.client.mapreduce.RangeInputSplit( (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) oldSplit); // @formatter:on newSplits.add(newSplit); } return newSplits; } }
log.debug("Initializing input split: " + split.toString()); Instance instance = split.getInstance(getClientConfiguration(attempt)); if (null == instance) { instance = getInstance(attempt); String principal = split.getPrincipal(); if (null == principal) { principal = getPrincipal(attempt); AuthenticationToken token = split.getToken(); if (null == token) { token = getAuthenticationToken(attempt); Authorizations authorizations = split.getAuths(); if (null == authorizations) { authorizations = getScanAuthorizations(attempt); String table = split.getTableName(); InputTableConfig tableConfig = getInputTableConfig(attempt, split.getTableName()); scanner = instance.getConnector(principal, token).createBatchScanner(split.getTableName(), authorizations, scanThreads); setupIterators(attempt, scanner, split.getTableName(), split); if (null != classLoaderContext) { scanner.setClassLoaderContext(classLoaderContext); Boolean isOffline = split.isOffline();
public RangeInputSplit(RangeInputSplit split) throws IOException { this.range = split.getRange(); this.setLocations(split.getLocations()); this.setTableName(split.getTableName()); this.setTableId(split.getTableId()); }
@Override public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { try { ByteArrayInputStream bais = new ByteArrayInputStream( context.getConfiguration().get(PROPS_CONF_KEY).getBytes(StandardCharsets.UTF_8)); env = new Environment(new FluoConfiguration(bais)); ti = new TransactionImpl(env, context.getConfiguration().getLong(TIMESTAMP_CONF_KEY, -1)); // TODO this uses non public Accumulo API! RangeInputSplit ris = (RangeInputSplit) split; Span span = SpanUtil.toSpan(ris.getRange()); HashSet<Column> columns = new HashSet<>(); for (String fam : context.getConfiguration().getStrings(FAMS_CONF_KEY, new String[0])) { columns.add(new Column(fam)); } cellIterator = ti.scanner().over(span).fetch(columns).build().iterator(); } catch (Exception e) { throw new IOException(e); } }
(org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split; Level level = accSplit.getLogLevel(); if (level != null) { log.setLevel(level);
/** * Use {@link #getTableName} * * @deprecated since 1.6.1, use getTableName() instead. */ @Deprecated public String getTable() { return getTableName(); }
String table = split.getTableName(); InputTableConfig tableConfig = getInputTableConfig(attempt, split.getTableName()); scanner = client.createBatchScanner(split.getTableName(), authorizations, scanThreads); setupIterators(attempt, scanner, split.getTableName(), split); if (classLoaderContext != null) { scanner.setClassLoaderContext(classLoaderContext); Boolean isOffline = split.isOffline(); if (isOffline == null) { isOffline = tableConfig.isOfflineScan(); Boolean isIsolated = split.isIsolatedScan(); if (isIsolated == null) { isIsolated = tableConfig.shouldUseIsolatedScanners(); Boolean usesLocalIterators = split.usesLocalIterators(); if (usesLocalIterators == null) { usesLocalIterators = tableConfig.shouldUseLocalIterators(); scanner = new OfflineScanner(client, Table.ID.of(split.getTableId()), authorizations); } else { scanner = new ScannerImpl(client, Table.ID.of(split.getTableId()), authorizations); setupIterators(attempt, scanner, split.getTableName(), split); } catch (Exception e) { throw new IOException(e);
public RangeInputSplit(RangeInputSplit split) throws IOException { this.range = split.getRange(); this.setLocations(split.getLocations()); this.setTableName(split.getTableName()); this.setTableId(split.getTableId()); }