@Override public Type createType(TypeManager typeManager, List<TypeParameter> parameters) { checkArgument(parameters.size() >= 1, "Function type must have at least one parameter, got %s", parameters); checkArgument( parameters.stream().allMatch(parameter -> parameter.getKind() == ParameterKind.TYPE), "Expected only types as a parameters, got %s", parameters); List<Type> types = parameters.stream().map(TypeParameter::getType).collect(toList()); return new FunctionType(types.subList(0, types.size() - 1), types.get(types.size() - 1)); } }
private static void verifyFileHasColumnNames(List<String> physicalColumnNames, Path path) { if (!physicalColumnNames.isEmpty() && physicalColumnNames.stream().allMatch(physicalColumnName -> DEFAULT_HIVE_COLUMN_NAME_PATTERN.matcher(physicalColumnName).matches())) { throw new PrestoException( HIVE_FILE_MISSING_COLUMN_NAMES, "ORC file does not contain column names in the footer: " + path); } }
@Test public void shouldBeThreadSafe() { // When: final List<CompletableFuture<Void>> futures = IntStream.range(1, 11).parallel() .mapToObj(idx -> { final CompletableFuture<Void> f = futureStore.getFutureForSequenceNumber(idx); if (idx % 10 == 0) { futureStore.completeFuturesUpToAndIncludingSequenceNumber(idx); } return f; }) .collect(Collectors.toList()); // Then: assertThat(futures.stream().allMatch(CompletableFuture::isDone), is(true)); }
@Override public boolean isReady(Set<String> nodeIds) { if (exceedsMaxTimeOut()) { Set<String> tmp = nodeIds.stream().filter(id -> !this.reportedIds.contains(id)).collect(toSet()); LOG.warn("Failed to recover heartbeats for nodes: {} with timeout {}s", tmp, NODE_MAX_TIMEOUT_SECS); return true; } return nodeIds.stream().allMatch(id -> this.reportedIds.contains(id)); }
@SafeVarargs @Override public final Iterable<EntityRef> getEntitiesWith(Class<? extends Component>... componentClasses) { return () -> entityStore.keySet().stream() //Keep entities which have all of the required components .filter(id -> Arrays.stream(componentClasses) .allMatch(component -> componentStore.get(id, component) != null)) .map(id -> getEntity(id)) .iterator(); }
private ComputedStatistics( List<String> groupingColumns, List<Block> groupingValues, Map<TableStatisticType, Block> tableStatistics, Map<ColumnStatisticMetadata, Block> columnStatistics) { this.groupingColumns = unmodifiableList(new ArrayList<>(requireNonNull(groupingColumns, "groupingColumns is null"))); this.groupingValues = unmodifiableList(new ArrayList<>(requireNonNull(groupingValues, "groupingValues is null"))); if (!groupingValues.stream().allMatch(ComputedStatistics::isSingleValueBlock)) { throw new IllegalArgumentException("grouping value blocks are expected to be single value blocks"); } this.tableStatistics = unmodifiableMap(new HashMap<>(requireNonNull(tableStatistics, "tableStatistics is null"))); if (!tableStatistics.values().stream().allMatch(ComputedStatistics::isSingleValueBlock)) { throw new IllegalArgumentException("computed table statistics blocks are expected to be single value blocks"); } this.columnStatistics = unmodifiableMap(new HashMap<>(requireNonNull(columnStatistics, "columnStatistics is null"))); if (!columnStatistics.values().stream().allMatch(ComputedStatistics::isSingleValueBlock)) { throw new IllegalArgumentException("computed column statistics blocks are expected to be single value blocks"); } }
private static List<ColumnStatistics> toFileStats(List<List<ColumnStatistics>> stripes) { if (stripes.isEmpty()) { return ImmutableList.of(); } int columnCount = stripes.get(0).size(); checkArgument(stripes.stream().allMatch(stripe -> columnCount == stripe.size())); ImmutableList.Builder<ColumnStatistics> fileStats = ImmutableList.builder(); for (int i = 0; i < columnCount; i++) { int column = i; fileStats.add(ColumnStatistics.mergeColumnStatistics(stripes.stream() .map(stripe -> stripe.get(column)) .collect(toList()))); } return fileStats.build(); }
@SafeVarargs private final SELF satisfiesAnyOfAssertionsGroups(Consumer<ACTUAL>... assertionsGroups) throws AssertionError { checkArgument(stream(assertionsGroups).allMatch(assertions -> assertions != null), "No assertions group should be null"); if (stream(assertionsGroups).anyMatch(this::satisfiesAssertions)) return myself; // none of the assertions group was met! let's report all the errors List<AssertionError> assertionErrors = stream(assertionsGroups).map(this::catchAssertionError).collect(toList()); throw multipleAssertionsError(assertionErrors); }
public boolean isDecomposable(FunctionRegistry functionRegistry) { boolean hasOrderBy = getAggregations().values().stream() .map(Aggregation::getCall) .map(FunctionCall::getOrderBy) .anyMatch(Optional::isPresent); boolean hasDistinct = getAggregations().values().stream() .map(Aggregation::getCall) .anyMatch(FunctionCall::isDistinct); boolean decomposableFunctions = getAggregations().values().stream() .map(Aggregation::getSignature) .map(functionRegistry::getAggregateFunctionImplementation) .allMatch(InternalAggregationFunction::isDecomposable); return !hasOrderBy && !hasDistinct && decomposableFunctions; }
public static boolean hasCompatibleCapabilities( RecordFormats one, RecordFormats other, CapabilityType type ) { Set<Capability> myFormatCapabilities = Stream.of( one.capabilities() ) .filter( capability -> capability.isType( type ) ).collect( toSet() ); Set<Capability> otherFormatCapabilities = Stream.of( other.capabilities() ) .filter( capability -> capability.isType( type ) ).collect( toSet() ); if ( myFormatCapabilities.equals( otherFormatCapabilities ) ) { // If they have the same capabilities then of course they are compatible return true; } boolean capabilitiesNotRemoved = otherFormatCapabilities.containsAll( myFormatCapabilities ); otherFormatCapabilities.removeAll( myFormatCapabilities ); boolean allAddedAreAdditive = otherFormatCapabilities.stream().allMatch( Capability::isAdditive ); // Even if capabilities of the two aren't the same then there's a special case where if the additional // capabilities of the other format are all additive then they are also compatible because no data // in the existing store needs to be migrated. return capabilitiesNotRemoved && allAddedAreAdditive; }
private boolean checkForCustomFieldMapping(DescriptorProperties descriptorProperties, TableSchema schema) { final Map<String, String> fieldMapping = SchemaValidator.deriveFieldMapping( descriptorProperties, Optional.of(schema.toRowType())); // until FLINK-9870 is fixed we assume that the table schema is the output type return fieldMapping.size() != schema.getFieldNames().length || !fieldMapping.entrySet().stream().allMatch(mapping -> mapping.getKey().equals(mapping.getValue())); }