public void init(Map stormConf, TopologyContext topologyContext, OutputCollector collector, ParserConfigurations configurations) { if(isBulk) { writerTransformer = config -> configStrategy.createWriterConfig(messageWriter, config); } else { writerTransformer = config -> new SingleBatchConfigurationFacade(configStrategy.createWriterConfig(messageWriter, config)); } try { messageWriter.init(stormConf, topologyContext, writerTransformer.apply(configurations)); } catch (Exception e) { throw new IllegalStateException("Unable to initialize message writer", e); } this.writerComponent = new BulkWriterComponent<JSONObject>(collector, isBulk, isBulk) { @Override protected Collection<Tuple> createTupleCollection() { return new HashSet<>(); } }; }
@Override public WriterConfiguration createWriterConfig(BulkMessageWriter writer, Configurations configs) { if (configs instanceof IndexingConfigurations) { return new IndexingWriterConfiguration(writer.getName(), (IndexingConfigurations) configs); } else { throw new IllegalArgumentException( "Expected config of type IndexingConfigurations but found " + configs.getClass()); } }
BulkWriterResponse response = bulkMessageWriter.write(sensorType, configurations, tupleList, messageList);
new FieldsMatcher("message"))); Map stormConf = new HashMap(); doThrow(new Exception()).when(bulkMessageWriter).init(eq(stormConf),any(TopologyContext.class), any(WriterConfiguration.class)); try { bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector); } catch(RuntimeException e) {} reset(bulkMessageWriter); when(bulkMessageWriter.getName()).thenReturn("hdfs"); bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector); verify(bulkMessageWriter, times(1)).init(eq(stormConf),any(TopologyContext.class), any(WriterConfiguration.class)); tupleList = new ArrayList<>(); messageList = new ArrayList<>(); messageList.add(fullMessageList.get(i)); bulkMessageWriterBolt.execute(tuple); verify(bulkMessageWriter, times(0)).write(eq(sensorType) , any(WriterConfiguration.class), eq(tupleList), eq(messageList)); BulkWriterResponse response = new BulkWriterResponse(); response.addAllSuccesses(tupleList); when(bulkMessageWriter.write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList) , argThat(new MessageListMatcher(messageList)))).thenReturn(response); bulkMessageWriterBolt.execute(tuple); verify(bulkMessageWriter, times(1)).write(eq(sensorType) , any(WriterConfiguration.class), eq(tupleList) , argThat(new MessageListMatcher(messageList))); verify(outputCollector, times(5)).ack(tuple); reset(outputCollector);
verify(batchWriter, times(1)).init(any(), any(), any()); for(int i = 0;i < 4;++i) { Tuple t = tuples.get(i); bolt.execute(t); verify(outputCollector, times(0)).ack(t); verify(batchWriter, times(0)).write(eq(sensorType), any(), any(), any()); when(batchWriter.write(any(), any(), any(), any())).thenReturn(writerResponse); verify(outputCollector, times(1)).ack(t); verify(batchWriter, times(1)).write(eq(sensorType), any(), any(), any()); verify(outputCollector, times(0)).reportError(any()); verify(outputCollector, times(0)).fail(any());
, argThat(new FieldsMatcher("message"))); Map stormConf = new HashMap(); when(bulkMessageWriter.getName()).thenReturn("elasticsearch"); bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector, clock); verify(bulkMessageWriter, times(1)).init(eq(stormConf),any(TopologyContext.class), any(WriterConfiguration.class)); int batchTimeout = bulkMessageWriterBolt.getDefaultBatchTimeout(); assertEquals(4, batchTimeout); messageList.add(fullMessageList.get(i)); bulkMessageWriterBolt.execute(tuple); verify(bulkMessageWriter, times(0)).write(eq(sensorType) , any(WriterConfiguration.class), eq(tupleList), eq(messageList)); BulkWriterResponse response = new BulkWriterResponse(); response.addAllSuccesses(tupleList); when(bulkMessageWriter.write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList) , argThat(new MessageListMatcher(messageList)))).thenReturn(response); bulkMessageWriterBolt.execute(tuple); verify(bulkMessageWriter, times(1)).write(eq(sensorType) , any(WriterConfiguration.class) , eq(tupleList), argThat(new MessageListMatcher(messageList)));
doThrow(new Exception()).when(batchWriter).write(any(), any(), any(), any()); verify(batchWriter, times(1)).init(any(), any(), any()); for(int i = 0;i < 4;++i) { Tuple t = tuples.get(i); bolt.execute(t); verify(outputCollector, times(0)).ack(t); verify(batchWriter, times(0)).write(eq(sensorType), any(), any(), any()); verify(outputCollector, times(1)).ack(t); verify(batchWriter, times(1)).write(eq(sensorType), any(), any(), any()); verify(outputCollector, times(1)).ack(goodTuple); verify(outputCollector, times(1)).reportError(any());
, argThat(new FieldsMatcher("message"))); Map stormConf = new HashMap(); when(bulkMessageWriter.getName()).thenReturn("elasticsearch"); bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector, clock); verify(bulkMessageWriter, times(1)).init(eq(stormConf),any(TopologyContext.class) , any(WriterConfiguration.class)); int batchTimeout = bulkMessageWriterBolt.getDefaultBatchTimeout(); messageList.add(fullMessageList.get(i)); bulkMessageWriterBolt.execute(tuple); verify(bulkMessageWriter, times(0)).write(eq(sensorType) , any(WriterConfiguration.class), eq(tupleList), eq(messageList)); BulkWriterResponse response = new BulkWriterResponse(); response.addAllSuccesses(tupleList); when(bulkMessageWriter.write(eq(sensorType), any(WriterConfiguration.class), eq(tupleList) , argThat(new MessageListMatcher(messageList)))).thenReturn(response); clock.advanceToSeconds(2); bulkMessageWriterBolt.execute(tuple); verify(bulkMessageWriter, times(0)).write(eq(sensorType) , any(WriterConfiguration.class) , eq(tupleList), argThat(new MessageListMatcher(messageList))); clock.advanceToSeconds(9); bulkMessageWriterBolt.execute(tuple); verify(bulkMessageWriter, times(1)).write(eq(sensorType) , any(WriterConfiguration.class) , eq(tupleList), argThat(new MessageListMatcher(messageList)));
verify(batchWriter, times(1)).init(any(), any(), any()); bolt.execute(t); verify(outputCollector, times(0)).ack(t); verify(batchWriter, times(0)).write(eq(sensorType), any(), any(), any()); writerResponse.addAllSuccesses(tuples); writerResponse.addSuccess(goodTuple); when(batchWriter.write(any(), any(), any(), any())).thenReturn(writerResponse); verify(batchWriter, times(1)).write(eq(sensorType), any(), any(), any()); verify(outputCollector, times(1)).reportError(any()); verify(outputCollector, times(0)).fail(any());
bulkMessageWriter.init(stormConf, context, writerconf); } catch (Exception e) { throw new RuntimeException(e);
collector.reportError(new Exception("WARNING: Default and (likely) unoptimized writer config used for " + bulkMessageWriter.getName() + " writer and sensor " + sensorType));
verify(batchWriter, times(1)).init(any(), any(), any()); bolt.execute(t); verify(outputCollector, times(0)).ack(t); verify(batchWriter, times(0)).write(eq(sensorType), any(), any(), any()); writerResponse.addSuccess(goodTuple); writerResponse.addError(new IllegalStateException(), errorTuple); when(batchWriter.write(any(), any(), any(), any())).thenReturn(writerResponse); bolt.execute(errorTuple); for(Tuple t : tuples) { verify(batchWriter, times(1)).write(eq(sensorType), any(), any(), any()); verify(outputCollector, times(1)).reportError(any()); verify(outputCollector, times(0)).fail(any());