@Override public void setCompression(HColumnDescriptor cd, String algo) { cd.setCompressionType(Compression.Algorithm.valueOf(algo)); }
@Override public void setCompression(HColumnDescriptor cd, String algo) { cd.setCompressionType(Compression.Algorithm.valueOf(algo)); }
@Override public void setCompression(HColumnDescriptor cd, String algo) { cd.setCompressionType(Compression.Algorithm.valueOf(algo)); }
@Override public void setCompression(HColumnDescriptor cd, String algo) { cd.setCompressionType(Compression.Algorithm.valueOf(algo)); }
protected HTableDescriptor getTableDescriptor() { if (TABLE_DESCRIPTOR == null) { TABLE_DESCRIPTOR = new HTableDescriptor(tableName); HColumnDescriptor family = new HColumnDescriptor(FAMILY_NAME); family.setDataBlockEncoding(blockEncoding); family.setCompressionType(compression); if (inMemoryCF) { family.setInMemory(true); } TABLE_DESCRIPTOR.addFamily(family); } return TABLE_DESCRIPTOR; }
private void setupMockColumnFamiliesForCompression(Table table, Map<String, Compression.Algorithm> familyToCompression) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) { mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) .setMaxVersions(1) .setCompressionType(entry.getValue()) .setBlockCacheEnabled(false) .setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); }
case "snappy": { logger.info("hbase will use snappy to compress data"); cf.setCompressionType(Algorithm.SNAPPY); break; cf.setCompressionType(Algorithm.LZO); break; case "gzip": { logger.info("hbase will use gzip to compress data"); cf.setCompressionType(Algorithm.GZ); break; cf.setCompressionType(Algorithm.LZ4); break; default: { logger.info("hbase will not use any compression algorithm to compress data"); cf.setCompressionType(Algorithm.NONE);
private static void create(Admin admin, TableName tableName, byte[]... families) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { HColumnDescriptor colDesc = new HColumnDescriptor(family); colDesc.setMaxVersions(1); colDesc.setCompressionType(Algorithm.GZ); desc.addFamily(colDesc); } try { admin.createTable(desc); } catch (TableExistsException tee) { /* Ignore */ } }
@Test public void testBlocksScanned() throws Exception { byte [] tableName = Bytes.toBytes("TestBlocksScanned"); HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName)); table.addFamily( new HColumnDescriptor(FAMILY) .setMaxVersions(10) .setBlockCacheEnabled(true) .setBlocksize(BLOCK_SIZE) .setCompressionType(Compression.Algorithm.NONE) ); _testBlocksScanned(table); }
@Test public void testBlocksScannedWithEncoding() throws Exception { byte [] tableName = Bytes.toBytes("TestBlocksScannedWithEncoding"); HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName)); table.addFamily( new HColumnDescriptor(FAMILY) .setMaxVersions(10) .setBlockCacheEnabled(true) .setDataBlockEncoding(DataBlockEncoding.FAST_DIFF) .setBlocksize(BLOCK_SIZE) .setCompressionType(Compression.Algorithm.NONE) ); _testBlocksScanned(table); }
/** * This utility method creates a new Hbase HColumnDescriptor object based on a * Thrift ColumnDescriptor "struct". * * @param in Thrift ColumnDescriptor object * @return HColumnDescriptor * @throws IllegalArgument if the column name is empty */ static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in) throws IllegalArgument { Compression.Algorithm comp = Compression.getCompressionAlgorithmByName(in.compression.toLowerCase(Locale.ROOT)); BloomType bt = BloomType.valueOf(in.bloomFilterType); if (in.name == null || !in.name.hasRemaining()) { throw new IllegalArgument("column name is empty"); } byte [] parsedName = CellUtil.parseColumn(Bytes.getBytes(in.name))[0]; HColumnDescriptor col = new HColumnDescriptor(parsedName) .setMaxVersions(in.maxVersions) .setCompressionType(comp) .setInMemory(in.inMemory) .setBlockCacheEnabled(in.blockCacheEnabled) .setTimeToLive(in.timeToLive > 0 ? in.timeToLive : Integer.MAX_VALUE) .setBloomFilterType(bt); return col; }
/** * Creates a pre-split table for load testing. If the table already exists, * logs a warning and continues. * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); desc.setDurability(durability); desc.setRegionReplication(regionReplication); HColumnDescriptor hcd = new HColumnDescriptor(columnFamily); hcd.setDataBlockEncoding(dataBlockEncoding); hcd.setCompressionType(compression); return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer); }
/** * Create a set of column descriptors with the combination of compression, * encoding, bloom codecs available. * @param prefix family names prefix * @return the list of column descriptors */ public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) { List<HColumnDescriptor> htds = new ArrayList<>(); long familyId = 0; for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) { for (DataBlockEncoding encodingType: DataBlockEncoding.values()) { for (BloomType bloomType: BloomType.values()) { String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId); HColumnDescriptor htd = new HColumnDescriptor(name); htd.setCompressionType(compressionType); htd.setDataBlockEncoding(encodingType); htd.setBloomFilterType(bloomType); htds.add(htd); familyId++; } } } return htds; }
/** * Creates a pre-split table for load testing. If the table already exists, * logs a warning and continues. * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, byte[][] columnFamilies, Algorithm compression, DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); desc.setDurability(durability); desc.setRegionReplication(regionReplication); HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length]; for (int i = 0; i < columnFamilies.length; i++) { HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]); hcd.setDataBlockEncoding(dataBlockEncoding); hcd.setCompressionType(compression); hcds[i] = hcd; } return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer); }
/** * Create an HTableDescriptor from provided TestOptions. */ protected static HTableDescriptor getTableDescriptor(TestOptions opts) { HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(opts.tableName)); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); HColumnDescriptor familyDesc = new HColumnDescriptor(familyName); familyDesc.setDataBlockEncoding(opts.blockEncoding); familyDesc.setCompressionType(opts.compression); familyDesc.setBloomFilterType(opts.bloomType); familyDesc.setBlocksize(opts.blockSize); if (opts.inMemoryCF) { familyDesc.setInMemory(true); } familyDesc.setInMemoryCompaction(opts.inMemoryCompaction); tableDesc.addFamily(familyDesc); } if (opts.replicas != DEFAULT_OPTS.replicas) { tableDesc.setRegionReplication(opts.replicas); } if (opts.splitPolicy != null && !opts.splitPolicy.equals(DEFAULT_OPTS.splitPolicy)) { tableDesc.setRegionSplitPolicyClassName(opts.splitPolicy); } return tableDesc; }
protected void prepareForLoadTest() throws IOException { LOG.info("Starting load test: dataBlockEncoding=" + dataBlockEncoding + ", isMultiPut=" + isMultiPut); numKeys = numKeys(); Admin admin = TEST_UTIL.getAdmin(); while (admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) .getLiveServerMetrics().size() < NUM_RS) { LOG.info("Sleeping until " + NUM_RS + " RSs are online"); Threads.sleepWithoutInterrupt(1000); } admin.close(); HTableDescriptor htd = new HTableDescriptor(TABLE); HColumnDescriptor hcd = new HColumnDescriptor(CF) .setCompressionType(compression) .setDataBlockEncoding(dataBlockEncoding); createPreSplitLoadTestTable(htd, hcd); LoadTestDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator(CF); writerThreads = prepareWriterThreads(dataGen, conf, TABLE); readerThreads = prepareReaderThreads(dataGen, conf, TABLE, 100); }
@Test public void testThreeStoreFiles() throws IOException { region = TEST_UTIL.createTestRegion(TABLE_NAME, new HColumnDescriptor(FAMILY) .setCompressionType(Compression.Algorithm.GZ) .setBloomFilterType(bloomType) .setMaxVersions(TestMultiColumnScanner.MAX_VERSIONS)); createStoreFile(new int[] {1, 2, 6}); createStoreFile(new int[] {1, 2, 3, 7}); createStoreFile(new int[] {1, 9}); scanColSet(new int[]{1, 4, 6, 7}, new int[]{1, 6, 7}); HBaseTestingUtility.closeRegionAndWAL(region); }
.setCompressionType(comprAlgo) .setBloomFilterType(bloomType) .setMaxVersions(3)
hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); hcd.setBloomFilterType(BloomType.ROW); hcd.setCompressionType(Algorithm.SNAPPY); hcd.setMobEnabled(true); hcd.setMobThreshold(1000L);
hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); hcd.setBloomFilterType(BloomType.ROW); hcd.setCompressionType(Algorithm.SNAPPY); hcd.setMobEnabled(true); hcd.setMobThreshold(1000L);