public static Algorithm getCompressionAlgorithmByName(String compressName) { Algorithm[] algos = Algorithm.class.getEnumConstants(); for (Algorithm a : algos) { if (a.getName().equals(compressName)) { return a; } } throw new IllegalArgumentException( "Unsupported compression algorithm name: " + compressName); }
@Override public String toString() { return "writer=" + (path != null ? path.toString() : null) + ", name=" + name + ", compression=" + compressAlgo.getName(); }
static String[] getSupportedAlgorithms() { Algorithm[] algos = Algorithm.class.getEnumConstants(); String[] ret = new String[algos.length]; int i = 0; for (Algorithm a : algos) { ret[i++] = a.getName(); } return ret; } }
Compression.Algorithm.NONE.getName()); final boolean compactionExclude = conf.getBoolean( "hbase.mapreduce.hfileoutputformat.compaction.exclude", false);
setValue(COMPRESSION, Compression.Algorithm.NONE.getName());
/** * Serialize column family to compression algorithm map to configuration. * Invoked while configuring the MR job for incremental load. * * Package-private for unit tests only. * * @throws IOException * on failure to read column family descriptors */ static void configureCompression(HTable table, Configuration conf) throws IOException { StringBuilder compressionConfigValue = new StringBuilder(); HTableDescriptor tableDescriptor = table.getTableDescriptor(); if(tableDescriptor == null){ // could happen with mock table instance return; } Collection<HColumnDescriptor> families = tableDescriptor.getFamilies(); int i = 0; for (HColumnDescriptor familyDescriptor : families) { if (i++ > 0) { compressionConfigValue.append('&'); } compressionConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8")); compressionConfigValue.append('='); compressionConfigValue.append(URLEncoder.encode(familyDescriptor.getCompression().getName(), "UTF-8")); } // Get rid of the last ampersand conf.set(COMPRESSION_CONF_KEY, compressionConfigValue.toString()); }
static public AFamilyDescriptor hcdToAFD(HColumnDescriptor hcd) throws IOException { AFamilyDescriptor afamily = new AFamilyDescriptor(); afamily.name = ByteBuffer.wrap(hcd.getName()); String compressionAlgorithm = hcd.getCompressionType().getName(); if (compressionAlgorithm == "LZO") { afamily.compression = ACompressionAlgorithm.LZO; } else if (compressionAlgorithm == "GZ") { afamily.compression = ACompressionAlgorithm.GZ; } else { afamily.compression = ACompressionAlgorithm.NONE; } afamily.maxVersions = hcd.getMaxVersions(); afamily.blocksize = hcd.getBlocksize(); afamily.inMemory = hcd.isInMemory(); afamily.timeToLive = hcd.getTimeToLive(); afamily.blockCacheEnabled = hcd.isBlockCacheEnabled(); return afamily; }
public static void testCompression(Compression.Algorithm algo) throws IOException { if (compressionTestResults[algo.ordinal()] != null) { if (compressionTestResults[algo.ordinal()]) { return ; // already passed test, dont do it again. } else { // failed. throw new IOException("Compression algorithm '" + algo.getName() + "'" + " previously failed test."); } } Configuration conf = HBaseConfiguration.create(); try { Compressor c = algo.getCompressor(); algo.returnCompressor(c); compressionTestResults[algo.ordinal()] = true; // passes } catch (Throwable t) { compressionTestResults[algo.ordinal()] = false; // failure throw new IOException(t); } }
/** * Compression types supported in hbase. * LZO is not bundled as part of the hbase distribution. * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a> * for how to enable it. * @param type Compression type setting. * @return this (for chained invocation) */ public HColumnDescriptor setCompactionCompressionType( Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase()); }
@Override public String toString() { return "reader=" + path.toString() + (!isFileInfoLoaded()? "": ", compression=" + compressAlgo.getName() + ", cacheConf=" + cacheConf + ", firstKey=" + toStringFirstKey() + ", lastKey=" + toStringLastKey()) + ", avgKeyLen=" + avgKeyLen + ", avgValueLen=" + avgValueLen + ", entries=" + trailer.getEntryCount() + ", length=" + fileSize; }
/** * Compression types supported in hbase. * LZO is not bundled as part of the hbase distribution. * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a> * for how to enable it. * @param type Compression type setting. * @return this (for chained invocation) */ public HColumnDescriptor setCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION, type.getName().toUpperCase()); }