Stream<Pair<String,Double>> getTopN() { return topN.stream(); }
@Override public Stream<? extends Scannable> inners() { return cache.stream() .map(cached -> cached.cached); }
/** For testing only - should always be the same as size() */ public int queueSize() { return queue.stream().map(el -> el.size).reduce(0, (l,r) -> l+r); }
public String getOutputText() { return outputLines.stream().map(OutputLine::getLine) .collect(joining(SystemUtils.LINE_SEPARATOR)); }
public List<String> getOutputLines() { return outputLines.stream().map(OutputLine::getLine).collect(toList()); }
private List<String> getOutputLines(OutputLine.OutputSource source) { return outputLines.stream().filter(line -> line.getSource().equals(source)) .map(OutputLine::getLine).collect(toList()); } }
@Inject public NetflowV9CodecAggregator() { // TODO customize this.templateCache = CacheBuilder.newBuilder() .maximumSize(5000) .removalListener(notification -> LOG.debug("Removed {} from template cache for reason {}", notification.getKey(), notification.getCause())) .recordStats() .build(); this.packetCache = CacheBuilder.newBuilder() .expireAfterWrite(1, TimeUnit.MINUTES) .maximumWeight(Size.megabytes(1).toBytes()) .removalListener((RemovalListener<TemplateKey, Queue<PacketBytes>>) notification -> LOG.debug("Removed {} from packet cache for reason {}", notification.getKey(), notification.getCause())) .weigher((key, value) -> value.stream().map(PacketBytes::readableBytes).reduce(0, Integer::sum)) .recordStats() .build(); }
private List<ByteBuf> peekAllOutboundMessages() { return channel.outboundMessages() .stream() .map( msg -> (ByteBuf) msg ) .collect( toList() ); }
private String findAnExistingIndex() { basicReader().execute(); final List<String> indicies = capturedOutput.stream(). filter(s -> s.startsWith("0x")). collect(Collectors.toList()); capturedOutput.clear(); return indicies.get(indicies.size() / 2).trim().replaceAll(":", ""); }
private void assertOutput( Iterable<StreamRecord<Tuple2<TestElem, TestElem>>> expectedOutput, Queue<Object> actualOutput) { int actualSize = actualOutput.stream() .filter(elem -> elem instanceof StreamRecord) .collect(Collectors.toList()) .size(); int expectedSize = Iterables.size(expectedOutput); Assert.assertEquals( "Expected and actual size of stream records different", expectedSize, actualSize ); for (StreamRecord<Tuple2<TestElem, TestElem>> record : expectedOutput) { Assert.assertTrue(actualOutput.contains(record)); } }
@Test public void shouldNotFailWhenNoMetadata() throws IOException { Files.list(dataDir).filter(f -> f.getFileName().toString().endsWith(SingleTableStore.SUFFIX)).findFirst().ifPresent(path -> path.toFile().delete()); basicReader().execute(); assertThat(capturedOutput.stream().anyMatch(msg -> msg.contains("history:")), is(true)); }
private void finishLookupSourceUnspilling() { checkState(state == State.INPUT_UNSPILLING); if (!unspillInProgress.get().isDone()) { // Pages have not be unspilled yet. return; } // Use Queue so that Pages already consumed by Index are not retained by us. Queue<Page> pages = new ArrayDeque<>(getDone(unspillInProgress.get())); long memoryRetainedByRemainingPages = pages.stream() .mapToLong(Page::getRetainedSizeInBytes) .sum(); localUserMemoryContext.setBytes(memoryRetainedByRemainingPages + index.getEstimatedSize().toBytes()); while (!pages.isEmpty()) { Page next = pages.remove(); index.addPage(next); // There is no attempt to compact index, since unspilled pages are unlikely to have blocks with retained size > logical size. memoryRetainedByRemainingPages -= next.getRetainedSizeInBytes(); localUserMemoryContext.setBytes(memoryRetainedByRemainingPages + index.getEstimatedSize().toBytes()); } LookupSourceSupplier partition = buildLookupSource(); lookupSourceChecksum.ifPresent(checksum -> checkState(partition.checksum() == checksum, "Unspilled lookupSource checksum does not match original one")); localUserMemoryContext.setBytes(partition.get().getInMemorySizeInBytes()); spilledLookupSourceHandle.setLookupSource(partition); state = State.INPUT_UNSPILLED_AND_BUILT; }
@Test public void shouldFilterByMultipleInclusionRegex() { basicReader().withInclusionRegex(".*bye$").withInclusionRegex(".*o.*").execute(); assertThat(capturedOutput.size(), is(24)); capturedOutput.stream().filter(msg -> !msg.startsWith("0x")). forEach(msg -> assertThat(msg, containsString("goodbye"))); capturedOutput.stream().filter(msg -> !msg.startsWith("0x")). forEach(msg -> assertThat(msg, not(containsString("hello")))); }
@Test public void shouldIncludeMessageHistoryByDefault() { basicReader().execute(); assertThat(capturedOutput.stream().anyMatch(msg -> msg.contains("history:")), is(true)); }
@Test public void shouldFilterByInclusionRegex() { basicReader().withInclusionRegex(".*good.*").execute(); assertThat(capturedOutput.size(), is(24)); capturedOutput.stream().filter(msg -> !msg.startsWith("0x")). forEach(msg -> assertThat(msg, containsString("goodbye"))); }
@Test public void shouldConvertEntriesToText() { basicReader().execute(); assertThat(capturedOutput.size(), is(48)); assertThat(capturedOutput.stream().anyMatch(msg -> msg.contains("hello")), is(true)); }
@Test public void shouldNotRewindPastStartOfQueueWhenDisplayingHistory() { basicReader().historyRecords(Long.MAX_VALUE).execute(); assertThat(capturedOutput.stream(). filter(msg -> !msg.startsWith("0x")).count(), is(24L)); }
@Test public void shouldReturnNoMoreThanTheSpecifiedNumberOfMaxRecords() { basicReader().historyRecords(5).execute(); assertThat(capturedOutput.stream(). filter(msg -> !msg.startsWith("0x")).count(), is(5L)); }
@Test public void shouldFilterByMultipleExclusionRegex() { basicReader().withExclusionRegex(".*bye$").withExclusionRegex(".*ell.*").execute(); assertThat(capturedOutput.stream().filter(msg -> !msg.startsWith("0x")).count(), is(0L)); }
@Test public void shouldApplyIncludeRegexToHistoryMessagesAndBusinessMessages() { basicReader(). // matches goodbye, but not hello or history withInclusionRegex("goodbye"). asMethodReader(). execute(); assertThat(capturedOutput.stream().anyMatch(msg -> msg.contains("history:")), is(false)); }