private static void checkIntersection( Sets.SetView<VersionlessDependency> intersect, String setA, String setB) { String intersectErrorString = intersect .stream() .map(VersionlessDependency::mavenCoords) .collect(Collectors.joining("\n")); Preconditions.checkArgument( intersect.size() == 0, String.format( "'%s' found in both '%s' & '%s', please remove from one of them.", intersectErrorString, setA, setB)); }
private synchronized void updateAllNodes(AllNodes allNodes) { if (includeCoordinator) { currentCount = allNodes.getActiveNodes().size(); } else { currentCount = Sets.difference(allNodes.getActiveNodes(), allNodes.getActiveCoordinators()).size(); } if (currentCount >= executionMinCount) { ImmutableList<SettableFuture<?>> listeners = ImmutableList.copyOf(futures); futures.clear(); executor.submit(() -> listeners.forEach(listener -> listener.set(null))); } } }
/** * Checks whether user is authorized to access file. Checks regardless of UI filter. * * @param user username * @param fileName file name to access */ public boolean isAuthorizedLogUser(String user, String fileName) { if (StringUtils.isEmpty(user) || StringUtils.isEmpty(fileName) || getLogUserGroupWhitelist(fileName) == null) { return false; } else { LogUserGroupWhitelist whitelist = getLogUserGroupWhitelist(fileName); List<String> logsUsers = new ArrayList<>(); logsUsers.addAll(ObjectReader.getStrings(stormConf.get(DaemonConfig.LOGS_USERS))); logsUsers.addAll(ObjectReader.getStrings(stormConf.get(Config.NIMBUS_ADMINS))); logsUsers.addAll(whitelist.getUserWhitelist()); List<String> logsGroups = new ArrayList<>(); logsGroups.addAll(ObjectReader.getStrings(stormConf.get(DaemonConfig.LOGS_GROUPS))); logsGroups.addAll(ObjectReader.getStrings(stormConf.get(Config.NIMBUS_ADMINS_GROUPS))); logsGroups.addAll(whitelist.getGroupWhitelist()); String userName = principalToLocal.toLocal(user); Set<String> groups = getUserGroups(userName); return logsUsers.stream().anyMatch(u -> u.equals(userName)) || Sets.intersection(groups, new HashSet<>(logsGroups)).size() > 0; } }
@Override public void meet(final Difference node) { if (Sets.intersection(node.getRightArg().getBindingNames(), filterVars).size() > 0) { relocate(filter, node.getRightArg()); } else if (Sets.intersection(node.getLeftArg().getBindingNames(), filterVars).size() > 0) { final Filter clone = new Filter(filter.getArg(), filter .getCondition().clone()); relocate(clone, node.getLeftArg()); } }
@Override public int size() { return Sets.union(systemTags.keySet(), eventMDC.keySet()).size(); } };
private Double getRecall(Set<String> correct, Set<String> result) { if (correct == null || result == null) return null; int intersection = Sets.intersection(correct, result).size(); int correctSize = correct.size(); return correctSize == 0 ? 0 : (double) intersection / (double) correctSize; }
@Override public double computeScore(Collection<TermId> query, Collection<TermId> target) { final Set<TermId> termIdsQuery = ontology.getAllAncestorTermIds(query, false); final Set<TermId> termIdsTarget = ontology.getAllAncestorTermIds(target, false); return Sets.intersection(termIdsQuery, termIdsTarget).size() / (Math.sqrt(termIdsQuery.size()) * Math.sqrt(termIdsTarget.size())); }
@Override public int size() { return Sets.union(systemTags.keySet(), eventMDC.keySet()).size(); } };
@Override public void meet(final Intersection node) { if (Sets.intersection(node.getRightArg().getBindingNames(), filterVars).size() > 0) { relocate(filter, node.getRightArg()); } else if (Sets.intersection(node.getLeftArg().getBindingNames(), filterVars).size() > 0) { final Filter clone = new Filter(filter.getArg(), filter .getCondition().clone()); relocate(clone, node.getLeftArg()); } }
private Double getPrecision(Set<String> correct, Set<String> result) { if (correct == null || result == null) return null; int intersection = Sets.intersection(correct, result).size(); int resultSize = result.size(); return resultSize == 0 ? 0 : (double) intersection / (double) resultSize; }
if (intersection.size() == 0) { iterator.remove();
Sets.difference(ASTHelpers.enumValues(switchType), handledCases); if (!setDifference.isEmpty()) { if (setDifference.contains("UNRECOGNIZED") && setDifference.size() == 1) {
boolean hasMuddyDatabaseVersionHeaders = otherMuddyDatabaseVersionHeaders.size() > 0;
ImmutableSet<?> expectedPartition2Set = ImmutableSet.copyOf(expectedPartition2.getMaterializedRows()); ImmutableSet<?> expectedPartition3Set = ImmutableSet.copyOf(expectedPartition3.getMaterializedRows()); assertEquals(Sets.intersection(expectedPartition1Set, actualSet).size(), 3); assertEquals(Sets.intersection(expectedPartition2Set, actualSet).size(), 3); assertEquals(Sets.intersection(expectedPartition3Set, actualSet).size(), 2);
ImmutableSet<?> expectedPartition2Set = ImmutableSet.copyOf(expectedPartition2.getMaterializedRows()); ImmutableSet<?> expectedPartition3Set = ImmutableSet.copyOf(expectedPartition3.getMaterializedRows()); assertEquals(Sets.intersection(expectedPartition1Set, actualSet).size(), 4); assertEquals(Sets.intersection(expectedPartition2Set, actualSet).size(), 4); assertEquals(Sets.intersection(expectedPartition3Set, actualSet).size(), 2);
ImmutableSet<?> actualSet = ImmutableSet.copyOf(actual.getMaterializedRows()); ImmutableSet<?> expectedRowsSet = ImmutableSet.copyOf(expectedRows.getMaterializedRows()); assertEquals(Sets.intersection(expectedRowsSet, actualSet).size(), 3);
private int update_nimbus_detail() throws Exception { //update count = count of zk's binary files - count of nimbus's binary files StormClusterState zkClusterState = data.getStormClusterState(); // if we use other blobstore, such as HDFS, all nimbus slave can be leader // but if we use local blobstore, we should count topologies files int diffCount = 0; if (data.getBlobStore() instanceof LocalFsBlobStore) { Set<String> keysOnZk = Sets.newHashSet(zkClusterState.active_keys()); Set<String> keysOnLocal = Sets.newHashSet(data.getBlobStore().listKeys()); // we count number of keys which is on zk but not on local diffCount = Sets.difference(keysOnZk, keysOnLocal).size(); } Map mtmp = zkClusterState.get_nimbus_detail(hostPort, false); if (mtmp == null) { mtmp = new HashMap(); } mtmp.put(NIMBUS_DIFFER_COUNT_ZK, diffCount); zkClusterState.update_nimbus_detail(hostPort, mtmp); LOG.debug("update nimbus details " + mtmp); return diffCount; }
private BulkUpdateAdminsResult validateUsersAndRolesForBulkUpdate(List<String> usersToRemove, List<String> rolesToRemove, Set<Admin> existingAdmins) { Set<CaseInsensitiveString> existingAdminNames = existingAdmins.stream().map(Admin::getName).collect(Collectors.toSet()); Sets.SetView<CaseInsensitiveString> invalidUsersToRemove = Sets.difference(caseInsensitive(usersToRemove), existingAdminNames); Sets.SetView<CaseInsensitiveString> invalidRolesToRemove = Sets.difference(caseInsensitive(rolesToRemove), existingAdminNames); BulkUpdateAdminsResult result = new BulkUpdateAdminsResult(); if (invalidUsersToRemove.size() > 0) { result.setNonExistentUsers(invalidUsersToRemove); result.unprocessableEntity("Update failed because some users or roles do not exist under super admins."); } if (invalidRolesToRemove.size() > 0) { result.setNonExistentRoles(invalidRolesToRemove); result.unprocessableEntity("Update failed because some users or roles do not exist under super admins."); } return result; }
public static long exactIntersectionCardinality(SetDigest a, SetDigest b) { checkState(a.isExact(), "exact intersection cannot operate on approximate sets"); checkArgument(b.isExact(), "exact intersection cannot operate on approximate sets"); return Sets.intersection(a.minhash.keySet(), b.minhash.keySet()).size(); }