private void connectToTable() { if (this.conf == null) { this.conf = HBaseConfiguration.create(); } try { Connection conn = ConnectionFactory.createConnection(conf); super.table = (HTable) conn.getTable(TableName.valueOf(tableName)); } catch (TableNotFoundException tnfe) { LOG.error("The table " + tableName + " not found ", tnfe); throw new RuntimeException("HBase table '" + tableName + "' not found.", tnfe); } catch (IOException ioe) { LOG.error("Exception while creating connection to HBase.", ioe); throw new RuntimeException("Cannot create connection to HBase.", ioe); } }
/** * Utility method to add hbase-default.xml and hbase-site.xml properties to a new map * if they are not already present in the jobConf. * @param jobConf Job configuration * @param newJobProperties Map to which new properties should be added */ private void addHBaseResources(Configuration jobConf, Map<String, String> newJobProperties) { Configuration conf = new Configuration(false); HBaseConfiguration.addHbaseResources(conf); for (Entry<String, String> entry : conf) { if (jobConf.get(entry.getKey()) == null) { newJobProperties.put(entry.getKey(), entry.getValue()); } } }
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); conf.set(TableInputFormat.INPUT_TABLE, table); conf.set(TableInputFormat.SCAN, convertScanToString(scan)); conf.setStrings("io.serializations", conf.get("io.serializations"), MutationSerialization.class.getName(), ResultSerialization.class.getName(), CellSerialization.class.getName());
/** * Generates a {@link Configuration} instance by applying property overrides prefixed by * a cluster profile key to the base Configuration. Override properties are extracted by * the {@link #subset(Configuration, String)} method, then the merged on top of the base * Configuration and returned. * * @param baseConf the base configuration to use, containing prefixed override properties * @param clusterKey the ZooKeeper quorum cluster key to apply, or {@code null} if none * @param overridePrefix the property key prefix to match for override properties, * or {@code null} if none * @return the merged configuration with override properties and cluster key applied */ public static Configuration createClusterConf(Configuration baseConf, String clusterKey, String overridePrefix) throws IOException { Configuration clusterConf = HBaseConfiguration.create(baseConf); if (clusterKey != null && !clusterKey.isEmpty()) { applyClusterKeyToConf(clusterConf, clusterKey); } if (overridePrefix != null && !overridePrefix.isEmpty()) { Configuration clusterSubset = HBaseConfiguration.subset(clusterConf, overridePrefix); HBaseConfiguration.merge(clusterConf, clusterSubset); } return clusterConf; }
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); List<String> scanStrings = new ArrayList<>(); scanStrings.add(convertScanToString(scan)); job.getConfiguration().setStrings(MultiTableInputFormat.SCANS, scanStrings.toArray(new String[scanStrings.size()]));
Configuration localConfig = HBaseConfiguration.create(this.conf); localConfig.set("hbase.client.connection.impl", RpcTimeoutConnection.class.getName()); int pause = 10; localConfig.setInt("hbase.client.pause", pause); localConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10); localConfig.setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, pause - 1); Connection connection = ConnectionFactory.createConnection(localConfig); Table table = connection.getTable(TableName.META_TABLE_NAME); Throwable t = null; try { table.exists(new Get(Bytes.toBytes("abc"))); } catch (SocketTimeoutException e) { table.close(); connection.close();
static List<TableCoprocessorAttribute> getTableCoprocessorAttrsFromSchema(Configuration conf, TableDescriptor htd) { return htd.getCoprocessorDescriptors().stream().map(cp -> { Path path = cp.getJarPath().map(p -> new Path(p)).orElse(null); Configuration ourConf; if (!cp.getProperties().isEmpty()) { // do an explicit deep copy of the passed configuration ourConf = new Configuration(false); HBaseConfiguration.merge(ourConf, conf); cp.getProperties().forEach((k, v) -> ourConf.set(k, v)); } else { ourConf = conf; } return new TableCoprocessorAttribute(path, cp.getClassName(), cp.getPriority(), ourConf); }).collect(Collectors.toList()); }
@BeforeClass public static void setUpBeforeClass() throws Exception { conf1.setInt("hfile.format.version", 3); conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); conf1.setInt("replication.source.size.capacity", 10240); conf1.setLong("replication.source.sleepforretries", 100); conf1.setInt("hbase.regionserver.maxlogs", 10); conf2 = HBaseConfiguration.create(conf1); conf2.setInt("hfile.format.version", 3); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); table.addFamily(fam); try (Connection conn = ConnectionFactory.createConnection(conf1); Admin admin = conn.getAdmin()) { admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); try (Connection conn = ConnectionFactory.createConnection(conf2); Admin admin = conn.getAdmin()) { admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); htable1 = utility1.getConnection().getTable(TABLE_NAME); htable2 = utility2.getConnection().getTable(TABLE_NAME);
public static void main(String[] args) throws Exception { Configuration config = HBaseConfiguration.create(); if (args.length > 0) { config.set("hbase.rootdir", args[0]); } HTable table = new HTable(config, "WordCount"); for (String word : WordSpout.words) { Get get = new Get(Bytes.toBytes(word)); Result result = table.get(get); byte[] countBytes = result.getValue(Bytes.toBytes("cf"), Bytes.toBytes("count")); byte[] wordBytes = result.getValue(Bytes.toBytes("cf"), Bytes.toBytes("word")); String wordStr = Bytes.toString(wordBytes); System.out.println(wordStr); long count = Bytes.toLong(countBytes); System.out.println("Word: '" + wordStr + "', Count: " + count); } } }
int maxRetries = conf.getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10); throw new IOException(e); Configuration newConf = HBaseConfiguration.create(conf); newConf.set(LoadIncrementalHFiles.CREATE_TABLE_CONF_KEY, "no"); loadHFiles.setConf(newConf); TableName tableName = TableName.valueOf(tableNameString); Table table = this.connection.getTable(tableName); try (RegionLocator locator = connection.getRegionLocator(tableName)) {
@Test public void testSubset() throws Exception { Configuration conf = HBaseConfiguration.create(); // subset is used in TableMapReduceUtil#initCredentials to support different security // configurations between source and destination clusters, so we'll use that as an example String prefix = "hbase.mapred.output."; conf.set("hbase.security.authentication", "kerberos"); conf.set("hbase.regionserver.kerberos.principal", "hbasesource"); HBaseConfiguration.setWithPrefix(conf, prefix, ImmutableMap.of( "hbase.regionserver.kerberos.principal", "hbasedest", "", "shouldbemissing") .entrySet()); Configuration subsetConf = HBaseConfiguration.subset(conf, prefix); assertNull(subsetConf.get(prefix + "hbase.regionserver.kerberos.principal")); assertEquals("hbasedest", subsetConf.get("hbase.regionserver.kerberos.principal")); assertNull(subsetConf.get("hbase.security.authentication")); assertNull(subsetConf.get("")); Configuration mergedConf = HBaseConfiguration.create(conf); HBaseConfiguration.merge(mergedConf, subsetConf); assertEquals("hbasedest", mergedConf.get("hbase.regionserver.kerberos.principal")); assertEquals("kerberos", mergedConf.get("hbase.security.authentication")); assertEquals("shouldbemissing", mergedConf.get(prefix)); }
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(Bytes .toBytes(tableName))); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc, Bytes.toBytes("aaaa"), Bytes.toBytes("zzzz"), 3); conf.setLong(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 100); conf.setInt(HConstants.HBASE_CLIENT_PAUSE, 500); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); conf.setBoolean(HConstants.HBASE_CLIENT_FAST_FAIL_MODE_ENABLED, true); CallQueueTooBigPffeInterceptor.class, PreemptiveFastFailInterceptor.class); final Connection connection = ConnectionFactory.createConnection(conf); Configuration newConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); newConf.setInt("hbase.ipc.server.max.callqueue.length", 0); srs.onConfigurationChange(newConf); CallQueueTooBigPffeInterceptor.numCallQueueTooBig.get()); newConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); newConf.setInt("hbase.ipc.server.max.callqueue.length", 250); srs.onConfigurationChange(newConf);
private MyRecordWriter getMyRecordWriter(JobConf jobConf) throws IOException { String hbaseTableName = jobConf.get(HBaseSerDe.HBASE_TABLE_NAME); jobConf.set(TableOutputFormat.OUTPUT_TABLE, hbaseTableName); final boolean walEnabled = HiveConf.getBoolVar( jobConf, HiveConf.ConfVars.HIVE_HBASE_WAL_ENABLED); final Connection conn = ConnectionFactory.createConnection(HBaseConfiguration.create(jobConf)); final BufferedMutator table = conn.getBufferedMutator(TableName.valueOf(hbaseTableName)); return new MyRecordWriter(table, conn, walEnabled); }
Configuration conf = HBaseConfiguration.create(); TEST_UTIL = new HBaseTestingUtility(conf); TEST_UTIL.startMiniCluster(regionServerNum); final TableName table = TableName.valueOf(name.getMethodName()); byte [] family = Bytes.toBytes("family"); LOG.info("Creating table with " + regionNum + " regions"); Table ht = TEST_UTIL.createMultiRegionTable(table, family, regionNum); int numRegions = -1; try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(table)) { numRegions = r.getStartKeys().length; ht.close(); TEST_UTIL.shutdownMiniCluster();
public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); HBaseHelper helper = HBaseHelper.getHelper(conf); if (!helper.existsTable("testtable")) { helper.createTable("testtable", "colfam1"); } Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(TableName.valueOf("testtable")); // vv GetCloneExample Get get1 = new Get(Bytes.toBytes("row1")); get1.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1")); Get get2 = new Get(get1); Result result = table.get(get2); System.out.println("Result : " + result); // ^^ GetCloneExample table.close(); connection.close(); helper.close(); } }
private static Connection createConnection(int port, boolean useHttp) throws IOException { Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, ThriftConnection.class.getName()); if (useHttp) { conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS, ThriftConnection.HTTPThriftClientBuilder.class.getName()); } String host = HConstants.LOCALHOST; if (useHttp) { host = "http://" + host; } conf.set(Constants.HBASE_THRIFT_SERVER_NAME, host); conf.setInt(Constants.HBASE_THRIFT_SERVER_PORT, port); return ConnectionFactory.createConnection(conf); }
@BeforeClass public static void setUpBeforeClass() throws Exception { conf = HBaseConfiguration.create(); conf.set("replication.replicationsource.implementation", ReplicationSourceDummy.class.getCanonicalName()); conf.setLong("replication.sleep.before.failover", 2000); conf.setInt("replication.source.maxretriesmultiplier", 10); utility = new HBaseTestingUtility(conf); utility.startMiniZKCluster(); setupZkAndReplication(); }
/** * Starts the HBase client by opening a connection to the HBase server. * * @throws IOException */ public void start() throws IOException { final Configuration hbaseConfig = HBaseConfiguration.create(); hbaseConfig.set("hbase.zookeeper.quorum", zookeeperAddress); hbaseConfig.set("hbase.zookeeper.property.clientPort", Integer.toString(zookeeperPort)); connection = ConnectionFactory.createConnection(hbaseConfig); logger.info("HBase client started."); }