/** * Maximum number of bytes to be encrypted at a time when SASL encryption is enabled. */ public int maxSaslEncryptedBlockSize() { return Ints.checkedCast(JavaUtils.byteStringAsBytes( conf.get("spark.network.sasl.maxEncryptedBlockSize", "64k"))); }
/** * Minimum size of a block that we should start using memory map rather than reading in through * normal IO operations. This prevents Spark from memory mapping very small blocks. In general, * memory mapping has high overhead for blocks close to or below the page size of the OS. */ public int memoryMapBytes() { return Ints.checkedCast(JavaUtils.byteStringAsBytes( conf.get("spark.storage.memoryMapThreshold", "2m"))); }
/** * Maximum number of bytes to be encrypted at a time when SASL encryption is used. */ public int maxSaslEncryptedBlockSize() { return Ints.checkedCast(JavaUtils.byteStringAsBytes( conf.get("spark.network.sasl.maxEncryptedBlockSize", "64k"))); }
/** * Maximum number of bytes to be encrypted at a time when SASL encryption is used. */ public int maxSaslEncryptedBlockSize() { return Ints.checkedCast(JavaUtils.byteStringAsBytes( conf.get("spark.network.sasl.maxEncryptedBlockSize", "64k"))); }
/** * Minimum size of a block that we should start using memory map rather than reading in through * normal IO operations. This prevents Spark from memory mapping very small blocks. In general, * memory mapping has high overhead for blocks close to or below the page size of the OS. */ public int memoryMapBytes() { return Ints.checkedCast(JavaUtils.byteStringAsBytes( conf.get("spark.storage.memoryMapThreshold", "2m"))); }
/** * Maximum number of bytes to be encrypted at a time when SASL encryption is used. */ public int maxSaslEncryptedBlockSize() { return Ints.checkedCast(JavaUtils.byteStringAsBytes( conf.get("spark.network.sasl.maxEncryptedBlockSize", "64k"))); }
/** * Minimum size of a block that we should start using memory map rather than reading in through * normal IO operations. This prevents Spark from memory mapping very small blocks. In general, * memory mapping has high overhead for blocks close to or below the page size of the OS. */ public int memoryMapBytes() { return Ints.checkedCast(JavaUtils.byteStringAsBytes( conf.get("spark.storage.memoryMapThreshold", "2m"))); }
/** * Minimum size of a block that we should start using memory map rather than reading in through * normal IO operations. This prevents Spark from memory mapping very small blocks. In general, * memory mapping has high overhead for blocks close to or below the page size of the OS. */ public int memoryMapBytes() { return Ints.checkedCast(JavaUtils.byteStringAsBytes( conf.get("spark.storage.memoryMapThreshold", "2m"))); }
@VisibleForTesting ExternalShuffleBlockResolver( TransportConf conf, File registeredExecutorFile, Executor directoryCleaner) throws IOException { this.conf = conf; this.registeredExecutorFile = registeredExecutorFile; String indexCacheSize = conf.get("spark.shuffle.service.index.cache.size", "100m"); CacheLoader<File, ShuffleIndexInformation> indexCacheLoader = new CacheLoader<File, ShuffleIndexInformation>() { public ShuffleIndexInformation load(File file) throws IOException { return new ShuffleIndexInformation(file); } }; shuffleIndexCache = CacheBuilder.newBuilder() .maximumWeight(JavaUtils.byteStringAsBytes(indexCacheSize)) .weigher(new Weigher<File, ShuffleIndexInformation>() { public int weigh(File file, ShuffleIndexInformation indexInfo) { return indexInfo.getSize(); } }) .build(indexCacheLoader); db = LevelDBProvider.initLevelDB(this.registeredExecutorFile, CURRENT_VERSION, mapper); if (db != null) { executors = reloadRegisteredExecutors(db); } else { executors = Maps.newConcurrentMap(); } this.directoryCleaner = directoryCleaner; }
@VisibleForTesting ExternalShuffleBlockResolver( TransportConf conf, File registeredExecutorFile, Executor directoryCleaner) throws IOException { this.conf = conf; this.registeredExecutorFile = registeredExecutorFile; String indexCacheSize = conf.get("spark.shuffle.service.index.cache.size", "100m"); CacheLoader<File, ShuffleIndexInformation> indexCacheLoader = new CacheLoader<File, ShuffleIndexInformation>() { public ShuffleIndexInformation load(File file) throws IOException { return new ShuffleIndexInformation(file); } }; shuffleIndexCache = CacheBuilder.newBuilder() .maximumWeight(JavaUtils.byteStringAsBytes(indexCacheSize)) .weigher(new Weigher<File, ShuffleIndexInformation>() { public int weigh(File file, ShuffleIndexInformation indexInfo) { return indexInfo.getSize(); } }) .build(indexCacheLoader); db = LevelDBProvider.initLevelDB(this.registeredExecutorFile, CURRENT_VERSION, mapper); if (db != null) { executors = reloadRegisteredExecutors(db); } else { executors = Maps.newConcurrentMap(); } this.directoryCleaner = directoryCleaner; }