private void lazyInitWrite() { if (!initialized) { this.initialized = true; this.writeClient = RestService.createWriter(clientSettings, -1, 0, LOG).repository; } }
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
@Override public void prepare(Map conf, TopologyContext context, OutputCollector collector) { this.collector = collector; LinkedHashMap copy = new LinkedHashMap(conf); copy.putAll(boltConfig); StormSettings settings = new StormSettings(copy); flushOnTickTuple = settings.getStormTickTupleFlush(); ackWrites = settings.getStormBoltAck(); // trigger manual flush if (ackWrites) { settings.setProperty(ES_BATCH_FLUSH_MANUAL, Boolean.TRUE.toString()); // align Bolt / es-hadoop batch settings numberOfEntries = settings.getStormBulkSize(); settings.setProperty(ES_BATCH_SIZE_ENTRIES, String.valueOf(numberOfEntries)); inflightTuples = new ArrayList<Tuple>(numberOfEntries + 1); } int totalTasks = context.getComponentTasks(context.getThisComponentId()).size(); InitializationUtils.setValueWriterIfNotSet(settings, StormValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, StormTupleBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, StormTupleFieldExtractor.class, log); writer = RestService.createWriter(settings, context.getThisTaskIndex(), totalTasks, log); }
private void lazyInitWrite() { if (!initialized) { this.initialized = true; this.writeClient = RestService.createWriter(clientSettings, -1, 0, LOG).repository; } }
private void lazyInitWrite() { if (!initialized) { this.initialized = true; this.writeClient = RestService.createWriter(clientSettings, -1, 0, LOG).repository; } }
private void lazyInitWrite() { if (!initialized) { this.initialized = true; this.writeClient = RestService.createWriter(clientSettings, -1, 0, LOG).repository; } }
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
protected void init() throws IOException { //int instances = detectNumberOfInstances(cfg); int currentInstance = detectCurrentInstance(cfg); if (log.isTraceEnabled()) { log.trace(String.format("EsRecordWriter instance [%s] initiating discovery of target shard...", currentInstance)); } Settings settings = HadoopSettingsManager.loadFrom(cfg).copy(); if (log.isTraceEnabled()) { log.trace(String.format("Init shard writer from cfg %s", HadoopCfgUtils.asProperties(cfg))); } InitializationUtils.setValueWriterIfNotSet(settings, WritableValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, WritableBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, MapWritableFieldExtractor.class, log); PartitionWriter pw = RestService.createWriter(settings, currentInstance, -1, log); this.repository = pw.repository; if (progressable != null) { this.beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log); this.beat.start(); } }
@Override public void prepare(Map conf, TopologyContext context, OutputCollector collector) { this.collector = collector; LinkedHashMap copy = new LinkedHashMap(conf); copy.putAll(boltConfig); StormSettings settings = new StormSettings(copy); flushOnTickTuple = settings.getStormTickTupleFlush(); ackWrites = settings.getStormBoltAck(); // trigger manual flush if (ackWrites) { settings.setProperty(ES_BATCH_FLUSH_MANUAL, Boolean.TRUE.toString()); // align Bolt / es-hadoop batch settings numberOfEntries = settings.getStormBulkSize(); settings.setProperty(ES_BATCH_SIZE_ENTRIES, String.valueOf(numberOfEntries)); inflightTuples = new ArrayList<Tuple>(numberOfEntries + 1); } int totalTasks = context.getComponentTasks(context.getThisComponentId()).size(); InitializationUtils.setValueWriterIfNotSet(settings, StormValueWriter.class, log); InitializationUtils.setBytesConverterIfNeeded(settings, StormTupleBytesConverter.class, log); InitializationUtils.setFieldExtractorIfNotSet(settings, StormTupleFieldExtractor.class, log); writer = RestService.createWriter(settings, context.getThisTaskIndex(), totalTasks, log); }