@Override public default void setConf(final Configuration configuration) { HadoopPools.initialize(configuration); }
public GryoPool getGryoPool() { return HadoopPools.getGryoPool(); }
@Override public void close() { HadoopPools.close(); }
@Override public void applyConfiguration(final Configuration configuration) { HadoopPools.initialize(configuration); }
@Override public Object readClassAndObject(final InputStream inputStream) { return HadoopPools.getGryoPool().readWithKryo(kryo -> kryo.readClassAndObject(new Input(inputStream))); }
@Override public void close() { HadoopPools.close(); }
public synchronized static void initialize(final org.apache.hadoop.conf.Configuration configuration) { HadoopPools.initialize(ConfUtil.makeApacheConfiguration(configuration)); }
@Override public void writeClassAndObject(final Object object, final OutputStream outputStream) { HadoopPools.getGryoPool().writeWithKryo(kryo -> { final Output output = new Output(outputStream); kryo.writeClassAndObject(output, object); output.flush(); }); }
public static GryoPool getGryoPool() { if (!INITIALIZED) { final Configuration configuration = SystemUtil.getSystemPropertiesConfiguration("tinkerpop", true); HadoopGraph.LOGGER.warn("The " + HadoopPools.class.getSimpleName() + " has not been initialized, using system properties configuration: " + ConfigurationUtils.toString(configuration)); initialize(configuration); } return GRYO_POOL; }
public GryoPool getGryoPool() { return HadoopPools.getGryoPool(); }
public GryoSerializer(final SparkConf sparkConfiguration) { final long bufferSizeKb = sparkConfiguration.getSizeAsKb("spark.kryoserializer.buffer", "64k"); final long maxBufferSizeMb = sparkConfiguration.getSizeAsMb("spark.kryoserializer.buffer.max", "64m"); this.referenceTracking = sparkConfiguration.getBoolean("spark.kryo.referenceTracking", true); this.registrationRequired = sparkConfiguration.getBoolean(Constants.SPARK_KRYO_REGISTRATION_REQUIRED, false); if (bufferSizeKb >= ByteUnit.GiB.toKiB(2L)) { throw new IllegalArgumentException("spark.kryoserializer.buffer must be less than 2048 mb, got: " + bufferSizeKb + " mb."); } else { this.bufferSize = (int) ByteUnit.KiB.toBytes(bufferSizeKb); if (maxBufferSizeMb >= ByteUnit.GiB.toMiB(2L)) { throw new IllegalArgumentException("spark.kryoserializer.buffer.max must be less than 2048 mb, got: " + maxBufferSizeMb + " mb."); } else { this.maxBufferSize = (int) ByteUnit.MiB.toBytes(maxBufferSizeMb); //this.userRegistrator = sparkConfiguration.getOption("spark.kryo.registrator"); } } // create a GryoPool and store it in static HadoopPools final List<Object> ioRegistries = new ArrayList<>(); ioRegistries.addAll(makeApacheConfiguration(sparkConfiguration).getList(IoRegistry.IO_REGISTRY, Collections.emptyList())); ioRegistries.add(SparkIoRegistry.class.getCanonicalName().replace("." + SparkIoRegistry.class.getSimpleName(), "$" + SparkIoRegistry.class.getSimpleName())); HadoopPools.initialize(GryoPool.build(). version(GryoVersion.valueOf(sparkConfiguration.get(GryoPool.CONFIG_IO_GRYO_VERSION, GryoPool.CONFIG_IO_GRYO_POOL_VERSION_DEFAULT.name()))). poolSize(sparkConfiguration.getInt(GryoPool.CONFIG_IO_GRYO_POOL_SIZE, GryoPool.CONFIG_IO_GRYO_POOL_SIZE_DEFAULT)). ioRegistries(ioRegistries). initializeMapper(builder -> builder.referenceTracking(this.referenceTracking). registrationRequired(this.registrationRequired)). create()); }
@Override public Object readClassAndObject(final InputStream inputStream) { return HadoopPools.getGryoPool().readWithKryo(kryo -> kryo.readClassAndObject(new Input(inputStream))); }
@Override public default void setConf(final Configuration configuration) { HadoopPools.initialize(configuration); }
@Override public void writeClassAndObject(final Object object, final OutputStream outputStream) { HadoopPools.getGryoPool().writeWithKryo(kryo -> { final Output output = new Output(outputStream); kryo.writeClassAndObject(output, object); output.flush(); }); }
@Override public void applyConfiguration(final Configuration configuration) { HadoopPools.initialize(configuration); }
public synchronized static void initialize(final org.apache.hadoop.conf.Configuration configuration) { HadoopPools.initialize(ConfUtil.makeApacheConfiguration(configuration)); }
public JanusGraphKryoShimService() { final BaseConfiguration c = new BaseConfiguration(); c.setProperty(IoRegistry.IO_REGISTRY, ImmutableList.of(JanusGraphIoRegistry.class.getCanonicalName())); HadoopPools.initialize(c); }
public static GryoPool getGryoPool() { if (!INITIALIZED) { final Configuration configuration = SystemUtil.getSystemPropertiesConfiguration("tinkerpop", true); HadoopGraph.LOGGER.warn("The " + HadoopPools.class.getSimpleName() + " has not been initialized, using system properties configuration: " + ConfigurationUtils.toString(configuration)); initialize(configuration); } return GRYO_POOL; }
public JanusGraphKryoShimService() { final BaseConfiguration c = new BaseConfiguration(); c.setProperty(IoRegistry.IO_REGISTRY, ImmutableList.of(JanusGraphIoRegistry.class.getCanonicalName())); HadoopPools.initialize(c); }
public GryoSerializer(final SparkConf sparkConfiguration) { final long bufferSizeKb = sparkConfiguration.getSizeAsKb("spark.kryoserializer.buffer", "64k"); final long maxBufferSizeMb = sparkConfiguration.getSizeAsMb("spark.kryoserializer.buffer.max", "64m"); this.referenceTracking = sparkConfiguration.getBoolean("spark.kryo.referenceTracking", true); this.registrationRequired = sparkConfiguration.getBoolean(Constants.SPARK_KRYO_REGISTRATION_REQUIRED, false); if (bufferSizeKb >= ByteUnit.GiB.toKiB(2L)) { throw new IllegalArgumentException("spark.kryoserializer.buffer must be less than 2048 mb, got: " + bufferSizeKb + " mb."); } else { this.bufferSize = (int) ByteUnit.KiB.toBytes(bufferSizeKb); if (maxBufferSizeMb >= ByteUnit.GiB.toMiB(2L)) { throw new IllegalArgumentException("spark.kryoserializer.buffer.max must be less than 2048 mb, got: " + maxBufferSizeMb + " mb."); } else { this.maxBufferSize = (int) ByteUnit.MiB.toBytes(maxBufferSizeMb); //this.userRegistrator = sparkConfiguration.getOption("spark.kryo.registrator"); } } // create a GryoPool and store it in static HadoopPools final List<Object> ioRegistries = new ArrayList<>(); ioRegistries.addAll(makeApacheConfiguration(sparkConfiguration).getList(IoRegistry.IO_REGISTRY, Collections.emptyList())); ioRegistries.add(SparkIoRegistry.class.getCanonicalName().replace("." + SparkIoRegistry.class.getSimpleName(), "$" + SparkIoRegistry.class.getSimpleName())); HadoopPools.initialize(GryoPool.build(). version(GryoVersion.valueOf(sparkConfiguration.get(GryoPool.CONFIG_IO_GRYO_VERSION, GryoPool.CONFIG_IO_GRYO_POOL_VERSION_DEFAULT.name()))). poolSize(sparkConfiguration.getInt(GryoPool.CONFIG_IO_GRYO_POOL_SIZE, GryoPool.CONFIG_IO_GRYO_POOL_SIZE_DEFAULT)). ioRegistries(ioRegistries). initializeMapper(builder -> builder.referenceTracking(this.referenceTracking). registrationRequired(this.registrationRequired)). create()); }