private HTableDescriptor createHtd(boolean isStripe) throws Exception { HTableDescriptor htd = new HTableDescriptor(TABLE_NAME); htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY)); String noSplitsPolicy = DisabledRegionSplitPolicy.class.getName(); htd.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, noSplitsPolicy); if (isStripe) { htd.setConfiguration(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName()); if (initialStripeCount != null) { htd.setConfiguration( StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, initialStripeCount.toString()); htd.setConfiguration( HStore.BLOCKING_STOREFILES_KEY, Long.toString(10 * initialStripeCount)); } else { htd.setConfiguration(HStore.BLOCKING_STOREFILES_KEY, "500"); } if (splitSize != null) { htd.setConfiguration(StripeStoreConfig.SIZE_TO_SPLIT_KEY, splitSize.toString()); } if (splitParts != null) { htd.setConfiguration(StripeStoreConfig.SPLIT_PARTS_KEY, splitParts.toString()); } } else { htd.setConfiguration(HStore.BLOCKING_STOREFILES_KEY, "10"); // default } return htd; }
@Override protected void initTable() throws IOException { // Do the same as the LoadTestTool does, but with different table configuration. HTableDescriptor htd = new HTableDescriptor(getTablename()); htd.setConfiguration(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName()); htd.setConfiguration(HStore.BLOCKING_STOREFILES_KEY, "100"); HColumnDescriptor hcd = new HColumnDescriptor(HFileTestUtil.DEFAULT_COLUMN_FAMILY); HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), htd, hcd); }
@Before public void setUp() throws Exception { Admin admin = TEST_UTIL.getAdmin(); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); htd.setOwner(USER_OWNER); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY1); hcd.setMaxVersions(10); htd.addFamily(hcd); hcd = new HColumnDescriptor(TEST_FAMILY2); hcd.setMaxVersions(10); htd.addFamily(hcd); // Enable backwards compatible early termination behavior in the HTD. We // want to confirm that the per-table configuration is properly picked up. htd.setConfiguration(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, "true"); admin.createTable(htd); TEST_UTIL.waitUntilAllRegionsAssigned(TEST_TABLE.getTableName()); }
private void createTableWithNonDefaultProperties() throws Exception { final long startTime = System.currentTimeMillis(); final String sourceTableNameAsString = STRING_TABLE_NAME + startTime; originalTableName = TableName.valueOf(sourceTableNameAsString); // enable replication on a column family HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM); HColumnDescriptor bloomFilterColumn = new HColumnDescriptor(BLOOMFILTER_FAM); HColumnDescriptor dataBlockColumn = new HColumnDescriptor(COMPRESSED_FAM); HColumnDescriptor blockSizeColumn = new HColumnDescriptor(BLOCKSIZE_FAM); maxVersionsColumn.setMaxVersions(MAX_VERSIONS); bloomFilterColumn.setBloomFilterType(BLOOM_TYPE); dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE); blockSizeColumn.setBlocksize(BLOCK_SIZE); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString)); htd.addFamily(maxVersionsColumn); htd.addFamily(bloomFilterColumn); htd.addFamily(dataBlockColumn); htd.addFamily(blockSizeColumn); htd.setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE); htd.setConfiguration(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE); assertTrue(htd.getConfiguration().size() > 0); admin.createTable(htd); Table original = UTIL.getConnection().getTable(originalTableName); originalTableName = TableName.valueOf(sourceTableNameAsString); originalTableDescriptor = admin.getTableDescriptor(originalTableName); originalTableDescription = originalTableDescriptor.toStringCustomizedValues(); original.close(); }
/** * Test that we add and remove strings from configuration properly. */ @Test public void testAddGetRemoveConfiguration() throws Exception { HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName())); String key = "Some"; String value = "value"; desc.setConfiguration(key, value); assertEquals(value, desc.getConfigurationValue(key)); desc.removeConfiguration(key); assertEquals(null, desc.getConfigurationValue(key)); }
protected void createTable(HTableDescriptor htd) throws Exception { deleteTable(); if (util.getHBaseClusterInterface() instanceof MiniHBaseCluster) { LOG.warn("Test does not make a lot of sense for minicluster. Will set flush size low."); htd.setConfiguration(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, "1048576"); } byte[][] splits = new RegionSplitter.HexStringSplit().split( util.getHBaseClusterInterface().getClusterMetrics().getLiveServerMetrics().size()); util.getAdmin().createTable(htd, splits); }
HTableDescriptor desc = new HTableDescriptor(tableName); desc.addCoprocessor(WaitingForMultiMutationsObserver.class.getName()); desc.setConfiguration("hbase.rowlock.wait.duration", String.valueOf(5000)); desc.addFamily(new HColumnDescriptor(FAMILY)); TEST_UTIL.getAdmin().createTable(desc);
desc.addCoprocessor(MultiRowMutationEndpoint.class.getName()); desc.addCoprocessor(WaitingForMultiMutationsObserver.class.getName()); desc.setConfiguration("hbase.rowlock.wait.duration", String.valueOf(5000)); desc.addFamily(new HColumnDescriptor(FAMILY)); TEST_UTIL.getAdmin().createTable(desc);
final TableName tableName = TableName.valueOf("testCellSizeLimit"); HTableDescriptor htd = new HTableDescriptor(tableName); htd.setConfiguration(HRegion.HBASE_MAX_CELL_SIZE_KEY, Integer.toString(10 * 1024)); // 10K HColumnDescriptor fam = new HColumnDescriptor(FAMILY); htd.addFamily(fam);
htd2.removeFamily(Bytes.toBytes(cf3)); htd2.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString());
htd.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString()); checkTableIsLegal(htd);
/** * Test that we add and remove strings from configuration properly. */ @Test public void testAddGetRemoveConfiguration() throws Exception { HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName())); String key = "Some"; String value = "value"; desc.setConfiguration(key, value); assertEquals(value, desc.getConfigurationValue(key)); desc.removeConfiguration(key); assertEquals(null, desc.getConfigurationValue(key)); }
/** * Enable or Disable the memstore replication from the primary region to the replicas. * The replication will be used only for meta operations (e.g. flush, compaction, ...) * * @param memstoreReplication true if the new data written to the primary region * should be replicated. * false if the secondaries can tollerate to have new * data only when the primary flushes the memstore. */ public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) { setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE); // If the memstore replication is setup, we do not have to wait for observing a flush event // from primary before starting to serve reads, because gaps from replication is not applicable setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, Boolean.toString(memstoreReplication)); return this; }
private static void addConfig( final HTableDescriptor desc, final String namespace, final String qualifier, final int priority, final String serverOpName, final String operationClassName, final ImmutableSet<ServerOpScope> scopes, final Map<String, String> properties) { final String basePrefix = new StringBuilder(ServerSideOperationUtils.SERVER_OP_PREFIX).append(".").append( HBaseUtils.writeTableNameAsConfigSafe(namespace)).append(".").append( HBaseUtils.writeTableNameAsConfigSafe(qualifier)).append(".").append( serverOpName).append(".").toString(); desc.setConfiguration( basePrefix + ServerSideOperationUtils.SERVER_OP_CLASS_KEY, ByteArrayUtils.byteArrayToString(URLClassloaderUtils.toClassId(operationClassName))); desc.setConfiguration( basePrefix + ServerSideOperationUtils.SERVER_OP_PRIORITY_KEY, Integer.toString(priority)); desc.setConfiguration( basePrefix + ServerSideOperationUtils.SERVER_OP_SCOPES_KEY, scopes.stream().map(ServerOpScope::name).collect(Collectors.joining(","))); final String optionsPrefix = String.format(basePrefix + ServerSideOperationUtils.SERVER_OP_OPTIONS_PREFIX + "."); for (final Entry<String, String> e : properties.entrySet()) { desc.setConfiguration(optionsPrefix + e.getKey(), e.getValue()); } }
private HTableDescriptor createHtd(boolean isStripe) throws Exception { HTableDescriptor htd = new HTableDescriptor(TABLE_NAME); htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY)); String noSplitsPolicy = DisabledRegionSplitPolicy.class.getName(); htd.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, noSplitsPolicy); if (isStripe) { htd.setConfiguration(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName()); if (initialStripeCount != null) { htd.setConfiguration( StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, initialStripeCount.toString()); htd.setConfiguration( HStore.BLOCKING_STOREFILES_KEY, Long.toString(10 * initialStripeCount)); } else { htd.setConfiguration(HStore.BLOCKING_STOREFILES_KEY, "500"); } if (splitSize != null) { htd.setConfiguration(StripeStoreConfig.SIZE_TO_SPLIT_KEY, splitSize.toString()); } if (splitParts != null) { htd.setConfiguration(StripeStoreConfig.SPLIT_PARTS_KEY, splitParts.toString()); } } else { htd.setConfiguration(HStore.BLOCKING_STOREFILES_KEY, "10"); // default } return htd; }
@Override protected void initTable() throws IOException { // Do the same as the LoadTestTool does, but with different table configuration. HTableDescriptor htd = new HTableDescriptor(getTablename()); htd.setConfiguration(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName()); htd.setConfiguration(HStore.BLOCKING_STOREFILES_KEY, "100"); HColumnDescriptor hcd = new HColumnDescriptor(HFileTestUtil.DEFAULT_COLUMN_FAMILY); HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), htd, hcd); }
private void createTableWithNonDefaultProperties() throws Exception { final long startTime = System.currentTimeMillis(); final String sourceTableNameAsString = STRING_TABLE_NAME + startTime; originalTableName = TableName.valueOf(sourceTableNameAsString); // enable replication on a column family HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM); HColumnDescriptor bloomFilterColumn = new HColumnDescriptor(BLOOMFILTER_FAM); HColumnDescriptor dataBlockColumn = new HColumnDescriptor(COMPRESSED_FAM); HColumnDescriptor blockSizeColumn = new HColumnDescriptor(BLOCKSIZE_FAM); maxVersionsColumn.setMaxVersions(MAX_VERSIONS); bloomFilterColumn.setBloomFilterType(BLOOM_TYPE); dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE); blockSizeColumn.setBlocksize(BLOCK_SIZE); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString)); htd.addFamily(maxVersionsColumn); htd.addFamily(bloomFilterColumn); htd.addFamily(dataBlockColumn); htd.addFamily(blockSizeColumn); htd.setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE); htd.setConfiguration(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE); assertTrue(htd.getConfiguration().size() > 0); admin.createTable(htd); Table original = UTIL.getConnection().getTable(originalTableName); originalTableName = TableName.valueOf(sourceTableNameAsString); originalTableDescriptor = admin.getTableDescriptor(originalTableName); originalTableDescription = originalTableDescriptor.toStringCustomizedValues(); original.close(); }
@Before public void setUp() throws Exception { Admin admin = TEST_UTIL.getAdmin(); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); htd.setOwner(USER_OWNER); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY1); hcd.setMaxVersions(10); htd.addFamily(hcd); hcd = new HColumnDescriptor(TEST_FAMILY2); hcd.setMaxVersions(10); htd.addFamily(hcd); // Enable backwards compatible early termination behavior in the HTD. We // want to confirm that the per-table configuration is properly picked up. htd.setConfiguration(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, "true"); admin.createTable(htd); TEST_UTIL.waitUntilAllRegionsAssigned(TEST_TABLE.getTableName()); }
/** * @param ts A pb TableSchema instance. * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>. */ public static HTableDescriptor convert(final TableSchema ts) { List<ColumnFamilySchema> list = ts.getColumnFamiliesList(); HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()]; int index = 0; for (ColumnFamilySchema cfs: list) { hcds[index++] = HColumnDescriptor.convert(cfs); } HTableDescriptor htd = new HTableDescriptor( ProtobufUtil.toTableName(ts.getTableName()), hcds); for (BytesBytesPair a: ts.getAttributesList()) { htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray()); } for (NameStringPair a: ts.getConfigurationList()) { htd.setConfiguration(a.getName(), a.getValue()); } return htd; }
protected void createTable(HTableDescriptor htd) throws Exception { deleteTable(); if (util.getHBaseClusterInterface() instanceof MiniHBaseCluster) { LOG.warn("Test does not make a lot of sense for minicluster. Will set flush size low."); htd.setConfiguration(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, "1048576"); } byte[][] splits = new RegionSplitter.HexStringSplit().split( util.getHBaseClusterInterface().getClusterMetrics().getLiveServerMetrics().size()); util.getAdmin().createTable(htd, splits); }