/** * Configures a {@link org.apache.accumulo.core.client.ZooKeeperInstance} for this job. * * @param job * the Hadoop job instance to be configured * @param instanceName * the Accumulo instance name * @param zooKeepers * a comma-separated list of zookeeper servers * @since 1.5.0 * @deprecated since 1.6.0 */ @Deprecated public static void setZooKeeperInstance(Job job, String instanceName, String zooKeepers) { setZooKeeperInstance(job, org.apache.accumulo.core.client.ClientConfiguration.create() .withInstance(instanceName).withZkHosts(zooKeepers)); }
Level logLevel = getLogLevel(job); log.setLevel(logLevel); validateOptions(job); Map<String,InputTableConfig> tableConfigs = getInputTableConfigs(job); try { if (tableConfig.isOfflineScan()) { binnedRanges = binOfflineTable(job, tableId, ranges); while (binnedRanges == null) { binnedRanges = binOfflineTable(job, tableId, ranges);
log.debug("Initializing input split: " + split); Authorizations authorizations = getScanAuthorizations(attempt); String classLoaderContext = getClassLoaderContext(attempt); String table = split.getTableName(); InputTableConfig tableConfig = getInputTableConfig(attempt, split.getTableName());
@Override protected void setupAccumuloInput(final Job job) throws AccumuloSecurityException { // set up accumulo input if (!hdfsInput) { job.setInputFormatClass(AccumuloInputFormat.class); } else { job.setInputFormatClass(AccumuloHDFSFileInputFormat.class); } AbstractInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd)); InputFormatBase.setInputTableName(job, RdfCloudTripleStoreUtils.layoutPrefixToTable(rdfTableLayout, tablePrefix)); AbstractInputFormat.setScanAuthorizations(job, authorizations); if (!mock) { AbstractInputFormat.setZooKeeperInstance(job, new ClientConfiguration().withInstance(instance).withZkHosts(zk)); } else { AbstractInputFormat.setMockInstance(job, instance); } if (ttl != null) { final IteratorSetting setting = new IteratorSetting(1, "fi", AgeOffFilter.class); AgeOffFilter.setTTL(setting, Long.valueOf(ttl)); InputFormatBase.addIterator(job, setting); } for (final IteratorSetting iteratorSetting : AccumuloRyaUtils.COMMON_REG_EX_FILTER_SETTINGS) { InputFormatBase.addIterator(job, iteratorSetting); } }
Level logLevel = getLogLevel(context); log.setLevel(logLevel); validateOptions(context); Random random = new Random(); LinkedList<InputSplit> splits = new LinkedList<>(); Map<String,InputTableConfig> tableConfigs = getInputTableConfigs(context); for (Map.Entry<String,InputTableConfig> tableConfigEntry : tableConfigs.entrySet()) { Instance instance = getInstance(context); String tableId; Authorizations auths = getScanAuthorizations(context); String principal = getPrincipal(context); AuthenticationToken token = getAuthenticationToken(context); try { if (tableConfig.isOfflineScan()) { binnedRanges = binOfflineTable(context, tableId, ranges); while (binnedRanges == null) { binnedRanges = binOfflineTable(context, tableId, ranges); ClientContext clientContext = new ClientContext(getInstance(context), new Credentials(getPrincipal(context), getAuthenticationToken(context)), getClientConfiguration(context)); while (!tl.binRanges(clientContext, ranges, binnedRanges).isEmpty()) { if (!DeprecationUtil.isMockInstance(instance)) {
AbstractInputFormat.setConnectorInfo( job, accumuloOptions.getUser(), new PasswordToken(accumuloOptions.getPassword())); InputFormatBase.setInputTableName(job, ingestOptions.getQualifiedTableName()); AbstractInputFormat.setZooKeeperInstance( job, new ClientConfiguration().withInstance(accumuloOptions.getInstance()).withZkHosts( accumuloOptions.getZookeeper())); AbstractInputFormat.setScanAuthorizations( job, new Authorizations(ingestOptions.getVisibilityOptions().getVisibility()));
log.debug("Initializing input split: " + split.toString()); Instance instance = split.getInstance(getClientConfiguration(attempt)); if (null == instance) { instance = getInstance(attempt); principal = getPrincipal(attempt); token = getAuthenticationToken(attempt); authorizations = getScanAuthorizations(attempt); String classLoaderContext = getClassLoaderContext(attempt); String table = split.getTableName(); InputTableConfig tableConfig = getInputTableConfig(attempt, split.getTableName()); authorizations); } else { ClientConfiguration clientConf = getClientConfiguration(attempt); ClientContext context = new ClientContext(instance, new Credentials(principal, token), clientConf);
log.debug("Initializing input split: " + split); ClientInfo info = getClientInfo(attempt); ClientContext context = new ClientContext(info); AccumuloClient client; throw new IllegalStateException(e); Authorizations authorizations = getScanAuthorizations(attempt); String classLoaderContext = getClassLoaderContext(attempt); String table = split.getTableName(); InputTableConfig tableConfig = getInputTableConfig(attempt, split.getTableName());
public void setAuthorizations(String auths) { if (auths == null || auths.isEmpty()) { AbstractInputFormat.setScanAuthorizations(job, Authorizations.EMPTY); } else { AbstractInputFormat.setScanAuthorizations(job, new Authorizations(auths)); } }
@Override protected List<IteratorSetting> contextIterators(TaskAttemptContext context, String tableName) { return getInputTableConfig(context, tableName).getIterators(); } };
AbstractInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd)); AbstractInputFormat.setScanAuthorizations(job, authorizations); if (!mock) { AbstractInputFormat.setZooKeeperInstance(job, new ClientConfiguration().withInstance(instance).withZkHosts(zk)); } else { AbstractInputFormat.setMockInstance(job, instance);
@Override protected List<IteratorSetting> contextIterators(TaskAttemptContext context, String tableName) { return getInputTableConfig(context, tableName).getIterators(); } };
public AccumuloConfiguration(Instance instance, String accumuloUser, String accumuloPassword, boolean isMock) throws AccumuloSecurityException, IOException { //NOTE: new Job(new Configuration()) does not work in scala shell due to the toString method's implementation //to get it to work in scala override the toString method and it will work //initialize fields, these are needed for lazy initialization of connector this.zkInstance = instance; this.accumuloUser = accumuloUser; this.accumuloPassword = accumuloPassword; this.job = new Job(new Configuration()); AbstractInputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword)); AccumuloOutputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword)); AbstractInputFormat.setScanAuthorizations(job, new Authorizations()); if (isMock) { AbstractInputFormat.setMockInstance(job, instance.getInstanceName()); AccumuloOutputFormat.setMockInstance(job, instance.getInstanceName()); } else { this.clientConfig = new ClientConfiguration(); this.clientConfig.withInstance(instance.getInstanceName()); this.clientConfig.withZkHosts(instance.getZooKeepers()); AbstractInputFormat.setZooKeeperInstance(job, clientConfig); AccumuloOutputFormat.setZooKeeperInstance(job, this.clientConfig); } }
Level logLevel = getLogLevel(context); log.setLevel(logLevel); validateOptions(context); Random random = new SecureRandom(); LinkedList<InputSplit> splits = new LinkedList<>(); Map<String,InputTableConfig> tableConfigs = getInputTableConfigs(context); for (Map.Entry<String,InputTableConfig> tableConfigEntry : tableConfigs.entrySet()) { InputTableConfig tableConfig = tableConfigEntry.getValue(); ClientContext clientContext = new ClientContext(getClientInfo(context)); Table.ID tableId; try { if (tableConfig.isOfflineScan()) { binnedRanges = binOfflineTable(context, tableId, ranges); while (binnedRanges == null) { binnedRanges = binOfflineTable(context, tableId, ranges);
/** * Configures a {@link org.apache.accumulo.core.client.ZooKeeperInstance} for this job. * * @param job * the Hadoop job instance to be configured * @param instanceName * the Accumulo instance name * @param zooKeepers * a comma-separated list of zookeeper servers * @since 1.5.0 * @deprecated since 1.6.0; Use {@link #setZooKeeperInstance(Job, ClientConfiguration)} instead. */ @Deprecated public static void setZooKeeperInstance(Job job, String instanceName, String zooKeepers) { setZooKeeperInstance(job, new ClientConfiguration().withInstance(instanceName).withZkHosts(zooKeepers)); }
@Override protected List<IteratorSetting> contextIterators(TaskAttemptContext context, String tableName) { return getInputTableConfig(context, tableName).getIterators(); } };
job.setInputFormatClass(AccumuloHDFSFileInputFormat.class); AbstractInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd)); InputFormatBase.setInputTableName(job, RdfCloudTripleStoreUtils.layoutPrefixToTable(rdfTableLayout, tablePrefix)); AbstractInputFormat.setScanAuthorizations(job, authorizations); if (!mock) { AbstractInputFormat.setZooKeeperInstance(job, new ClientConfiguration().withInstance(instance).withZkHosts(zk)); } else { AbstractInputFormat.setMockInstance(job, instance);