if (compressionEnabled) { try { cp = new CompressionParameters(compressionClass, compressionChunkSizeKB * 1024, Collections.<String, String>emptyMap()); cp = new CompressionParameters(null); log.debug("Creating CF {}: setting {} to null to disable compression", columnfamilyName, CompressionParameters.SSTABLE_COMPRESSION);
@Override public Map<String, String> getCompressionOptions(String cf) throws BackendException { CFMetaData cfm = Schema.instance.getCFMetaData(keySpaceName, cf); if (cfm == null) return null; return ImmutableMap.copyOf(cfm.compressionParameters().asThriftOptions()); }
public static CompressionParameters create(Map<? extends CharSequence, ? extends CharSequence> opts) throws ConfigurationException { Map<String, String> options = copyOptions(opts); String sstableCompressionClass = options.get(SSTABLE_COMPRESSION); String chunkLength = options.get(CHUNK_LENGTH_KB); options.remove(SSTABLE_COMPRESSION); options.remove(CHUNK_LENGTH_KB); CompressionParameters cp = new CompressionParameters(sstableCompressionClass, parseChunkLength(chunkLength), options); cp.validate(); return cp; }
public static CompressionParameters getOutputCompressionParamaters(Configuration conf) { if (getOutputCompressionClass(conf) == null) return new CompressionParameters(null); Map<String, String> options = new HashMap<String, String>(); options.put(CompressionParameters.SSTABLE_COMPRESSION, getOutputCompressionClass(conf)); options.put(CompressionParameters.CHUNK_LENGTH_KB, getOutputCompressionChunkLength(conf)); try { return CompressionParameters.create(options); } catch (ConfigurationException e) { throw new RuntimeException(e); } }
chunkLength = CompressionParameters.parseChunkLength(compressionOptions.get(CompressionParameters.CHUNK_LENGTH_KB)); CompressionParameters cp = new CompressionParameters(sstableCompressionClass, chunkLength, remainingOptions); cp.validate();
public void setCompressionParameters(Map<String,String> opts) { try { metadata.compressionParameters = CompressionParameters.create(opts); } catch (ConfigurationException e) { throw new IllegalArgumentException(e.getMessage()); } }
public int chunkLength() { return parameters.chunkLength(); }
public CompressionParameters(String sstableCompressorClass, Integer chunkLength, Map<String, String> otherOptions) throws ConfigurationException { this(createCompressor(parseCompressorClass(sstableCompressorClass), otherOptions), chunkLength, otherOptions); }
public Map<String, String> asThriftOptions() { Map<String, String> options = new HashMap<String, String>(otherOptions); if (sstableCompressor == null) return options; options.put(SSTABLE_COMPRESSION, sstableCompressor.getClass().getName()); if (chunkLength != null) options.put(CHUNK_LENGTH_KB, chunkLengthInKB()); return options; }
public static CompressionParameters getOutputCompressionParamaters(Configuration conf) { if (getOutputCompressionClass(conf) == null) return new CompressionParameters(null); Map<String, String> options = new HashMap<String, String>(); options.put(CompressionParameters.SSTABLE_COMPRESSION, getOutputCompressionClass(conf)); options.put(CompressionParameters.CHUNK_LENGTH_KB, getOutputCompressionChunkLength(conf)); try { return CompressionParameters.create(options); } catch (ConfigurationException e) { throw new RuntimeException(e); } }
cfm.compressionParameters(CompressionParameters.create(cfProps.compressionParameters));
private String chunkLengthInKB() { return String.valueOf(chunkLength() / 1024); }
public SSTableSimpleUnsortedWriter(File directory, IPartitioner partitioner, String keyspace, String columnFamily, AbstractType<?> comparator, AbstractType<?> subComparator, int bufferSizeInMB) { this(directory, partitioner, keyspace, columnFamily, comparator, subComparator, bufferSizeInMB, new CompressionParameters(null)); }
public Map<String,String> getCompressionParameters() { return metadata.compressionParameters().asThriftOptions(); }
public void applyToCFMetadata(CFMetaData cfm) throws ConfigurationException, SyntaxException { if (hasProperty(KW_COMMENT)) cfm.comment(getString(KW_COMMENT, "")); cfm.readRepairChance(getDouble(KW_READREPAIRCHANCE, cfm.getReadRepairChance())); cfm.dcLocalReadRepairChance(getDouble(KW_DCLOCALREADREPAIRCHANCE, cfm.getDcLocalReadRepair())); cfm.gcGraceSeconds(getInt(KW_GCGRACESECONDS, cfm.getGcGraceSeconds())); int minCompactionThreshold = toInt(KW_MINCOMPACTIONTHRESHOLD, getCompactionOptions().get(KW_MINCOMPACTIONTHRESHOLD), cfm.getMinCompactionThreshold()); int maxCompactionThreshold = toInt(KW_MAXCOMPACTIONTHRESHOLD, getCompactionOptions().get(KW_MAXCOMPACTIONTHRESHOLD), cfm.getMaxCompactionThreshold()); if (minCompactionThreshold <= 0 || maxCompactionThreshold <= 0) throw new ConfigurationException("Disabling compaction by setting compaction thresholds to 0 has been deprecated, set the compaction option 'enabled' to false instead."); cfm.minCompactionThreshold(minCompactionThreshold); cfm.maxCompactionThreshold(maxCompactionThreshold); cfm.defaultTimeToLive(getInt(KW_DEFAULT_TIME_TO_LIVE, cfm.getDefaultTimeToLive())); cfm.speculativeRetry(CFMetaData.SpeculativeRetry.fromString(getString(KW_SPECULATIVE_RETRY, cfm.getSpeculativeRetry().toString()))); cfm.memtableFlushPeriod(getInt(KW_MEMTABLE_FLUSH_PERIOD, cfm.getMemtableFlushPeriod())); cfm.minIndexInterval(getInt(KW_MIN_INDEX_INTERVAL, cfm.getMinIndexInterval())); cfm.maxIndexInterval(getInt(KW_MAX_INDEX_INTERVAL, cfm.getMaxIndexInterval())); if (compactionStrategyClass != null) { cfm.compactionStrategyClass(compactionStrategyClass); cfm.compactionStrategyOptions(new HashMap<>(getCompactionOptions())); } cfm.bloomFilterFpChance(getDouble(KW_BF_FP_CHANCE, cfm.getBloomFilterFpChance())); if (!getCompressionOptions().isEmpty()) cfm.compressionParameters(CompressionParameters.create(getCompressionOptions())); CachingOptions cachingOptions = getCachingOptions(); if (cachingOptions != null) cfm.caching(cachingOptions); }
/** * @param source Input source to read compressed data from * @param info Compression info */ public CompressedInputStream(InputStream source, CompressionInfo info, boolean hasPostCompressionAdlerChecksums) { this.info = info; this.checksum = hasPostCompressionAdlerChecksums ? new Adler32() : new CRC32(); this.hasPostCompressionAdlerChecksums = hasPostCompressionAdlerChecksums; this.buffer = new byte[info.parameters.chunkLength()]; // buffer is limited to store up to 1024 chunks this.dataBuffer = new ArrayBlockingQueue<byte[]>(Math.min(info.chunks.length, 1024)); new Thread(new Reader(source, info, dataBuffer)).start(); }
public CompressionParameters copy() { try { return new CompressionParameters(sstableCompressor, chunkLength, new HashMap<>(otherOptions)); } catch (ConfigurationException e) { throw new AssertionError(e); // can't happen at this point. } }
@Override public Map<String, String> getCompressionOptions(String cf) throws BackendException { CFMetaData cfm = Schema.instance.getCFMetaData(keySpaceName, cf); if (cfm == null) return null; return ImmutableMap.copyOf(cfm.compressionParameters().asThriftOptions()); }
.compactionStrategyClass(cfProps.compactionStrategyClass) .compactionStrategyOptions(cfProps.compactionStrategyOptions) .compressionParameters(CompressionParameters.create(cfProps.compressionParameters)) .caching(CachingOptions.fromString(getPropertyString(CFPropDefs.KW_CACHING, CFMetaData.DEFAULT_CACHING_STRATEGY.toString()))) .speculativeRetry(CFMetaData.SpeculativeRetry.fromString(getPropertyString(CFPropDefs.KW_SPECULATIVE_RETRY, CFMetaData.DEFAULT_SPECULATIVE_RETRY.toString())))
/** * @param sections Collection of sections in uncompressed file. Should not contain sections that overlap each other. * @return Total chunk size in bytes for given sections including checksum. */ public long getTotalSizeForSections(Collection<Pair<Long, Long>> sections) { long size = 0; long lastOffset = -1; for (Pair<Long, Long> section : sections) { int startIndex = (int) (section.left / parameters.chunkLength()); int endIndex = (int) (section.right / parameters.chunkLength()); endIndex = section.right % parameters.chunkLength() == 0 ? endIndex - 1 : endIndex; for (int i = startIndex; i <= endIndex; i++) { long offset = i * 8L; long chunkOffset = chunkOffsets.getLong(offset); if (chunkOffset > lastOffset) { lastOffset = chunkOffset; long nextChunkOffset = offset + 8 == chunkOffsetsSize ? compressedFileLength : chunkOffsets.getLong(offset + 8); size += (nextChunkOffset - chunkOffset); } } } return size; }