Refine search
private static Multimap<String, CeTaskProcessor> buildPermissiveCeTaskProcessorIndex(CeTaskProcessor[] taskProcessors) { Multimap<String, CeTaskProcessor> permissiveIndex = ArrayListMultimap.create(taskProcessors.length, 1); for (CeTaskProcessor taskProcessor : taskProcessors) { for (String ceTaskType : taskProcessor.getHandledCeTaskTypes()) { permissiveIndex.put(ceTaskType, taskProcessor); } } return permissiveIndex; }
/** * Creates a {@link Multimap} that maps {@link Extract} to their corresponds {@link WorkUnitState}s. * * @see Multimap */ public static Multimap<Extract, WorkUnitState> createExtractToWorkUnitStateMap( Collection<? extends WorkUnitState> workUnitStates) { Multimap<Extract, WorkUnitState> extractToWorkUnitStateMap = ArrayListMultimap.create(); for (WorkUnitState workUnitState : workUnitStates) { extractToWorkUnitStateMap.put(workUnitState.getExtract(), workUnitState); } return extractToWorkUnitStateMap; }
map.put("bar", 2); Multimap<String, Integer> multimap = HashMultimap.create(); multimap.put("foo", 1); multimap.put("bar", 2); Multimap<String, Integer> multimapView = Multimaps.forMap(map); new EqualsTester().addEqualityGroup(multimap, multimapView).addEqualityGroup(map).testEquals(); Multimap<String, Integer> multimap2 = HashMultimap.create(); multimap2.put("foo", 1); assertFalse(multimapView.equals(multimap2)); multimap2.put("bar", 1); assertEquals(multimap.hashCode(), multimapView.hashCode()); assertEquals(multimap.size(), multimapView.size()); assertEquals(multimapView, ArrayListMultimap.create());
/** * Perform an on-street search around a point with each of several modes to find nearby stops. * @return one or more paths to each reachable stop using the various modes. */ private Multimap<StopCluster, StopAtDistance> findClosestStops(boolean dest) { Multimap<StopCluster, StopAtDistance> pathsByStop = ArrayListMultimap.create(); QualifiedModeSet qModes = dest ? request.egressModes : request.accessModes; for (QualifiedMode qmode : qModes.qModes) { LOG.info("{} mode {}", dest ? "egress" : "access", qmode); for (StopAtDistance sd : findClosestStops(qmode, dest)) { pathsByStop.put(sd.stopCluster, sd); } } return pathsByStop; }
/** merge similar states (states that have come from the same place on different patterns) */ public void mergeStates() { Set<TransitStop> touchedStopVertices = new HashSet<TransitStop>(states.keySet()); for (TransitStop tstop : touchedStopVertices) { Collection<ProfileState> pss = states.get(tstop); // find states that have come from the same place Multimap<ProfileState, ProfileState> foundStates = ArrayListMultimap.create(); for (Iterator<ProfileState> it = pss.iterator(); it.hasNext();) { ProfileState ps = it.next(); foundStates.put(ps.previous, ps); } pss.clear(); // merge them now for (Collection<ProfileState> states : foundStates.asMap().values()) { if (states.size() == 1) pss.addAll(states); else pss.add(ProfileState.merge(states, true)); } } }
protected void match(Tracking<RAW, BASE> tracking, Function<Trackable, SearchKey> searchKeyFactory) { if (tracking.isComplete()) { return; } Multimap<SearchKey, BASE> baseSearch = ArrayListMultimap.create(); tracking.getUnmatchedBases() .forEach(base -> baseSearch.put(searchKeyFactory.apply(base), base)); tracking.getUnmatchedRaws().forEach(raw -> { SearchKey rawKey = searchKeyFactory.apply(raw); Collection<BASE> bases = baseSearch.get(rawKey); bases.stream() .sorted(comparing(this::statusRank).reversed() .thenComparing(comparing(Trackable::getCreationDate))) .findFirst() .ifPresent(match -> { tracking.match(raw, match); baseSearch.remove(rawKey, match); }); }); }
@Override public M create(Object... elements) { Multimap<String, String> multimap = ArrayListMultimap.create(); for (Object o : elements) { @SuppressWarnings("unchecked") Entry<String, String> entry = (Entry<String, String>) o; multimap.put(entry.getKey(), Ascii.toUpperCase(entry.getValue())); } return transform(multimap); }
Multimap<String, Long> orderKeyByStatus = ArrayListMultimap.create(); Multimap<String, Double> totalPriceByStatus = ArrayListMultimap.create(); for (MaterializedRow row : raw.getMaterializedRows()) { orderKeyByStatus.put((String) row.getField(0), ((Number) row.getField(1)).longValue()); totalPriceByStatus.put((String) row.getField(0), (Double) row.getField(2));
/** * group splits for each bucket separately - while evenly filling all the * available slots with tasks */ public Multimap<Integer, InputSplit> group(Configuration conf, Multimap<Integer, InputSplit> bucketSplitMultimap, int availableSlots, float waves, SplitLocationProvider splitLocationProvider) throws IOException { // figure out how many tasks we want for each bucket Map<Integer, Integer> bucketTaskMap = estimateBucketSizes(availableSlots, waves, bucketSplitMultimap.asMap()); // allocate map bucket id to grouped splits Multimap<Integer, InputSplit> bucketGroupedSplitMultimap = ArrayListMultimap.<Integer, InputSplit> create(); // use the tez grouper to combine splits once per bucket for (int bucketId : bucketSplitMultimap.keySet()) { Collection<InputSplit> inputSplitCollection = bucketSplitMultimap.get(bucketId); InputSplit[] rawSplits = inputSplitCollection.toArray(new InputSplit[0]); InputSplit[] groupedSplits = tezGrouper.getGroupedSplits(conf, rawSplits, bucketTaskMap.get(bucketId), HiveInputFormat.class.getName(), new ColumnarSplitSizeEstimator(), splitLocationProvider); LOG.info("Original split count is " + rawSplits.length + " grouped split count is " + groupedSplits.length + ", for bucket: " + bucketId); for (InputSplit inSplit : groupedSplits) { bucketGroupedSplitMultimap.put(bucketId, inSplit); } } return bucketGroupedSplitMultimap; }
Multimap<ConfigStoreAccessor, ConfigKeyPath> partitionedAccessor = ArrayListMultimap.create(); ConfigStoreAccessor accessor = this.getConfigStoreAccessor(u); ConfigKeyPath configKeypath = ConfigClientUtils.buildConfigKeyPath(u, accessor.configStore); partitionedAccessor.put(accessor, configKeypath);
@Override public void handleDataPoints(TaskInfo taskInfo, Collection<DataPoint> dataPoints) { synchronized (BUFFER) { for (DataPoint dp : dataPoints) { for (Map.Entry<String, Object> entry : expandComplexDataPoint(dp).entrySet()) { String metricName = entry.getKey(); Multimap<Integer, Object> taskIdToBucket = BUFFER.get(taskInfo.srcComponentId, metricName); if (null == taskIdToBucket) { taskIdToBucket = ArrayListMultimap.create(); taskIdToBucket.put(taskInfo.srcTaskId, entry.getValue()); } else { taskIdToBucket.get(taskInfo.srcTaskId).add(entry.getValue()); } BUFFER.put(taskInfo.srcComponentId, metricName, taskIdToBucket); } } } }
private Map<String, QueryBuilder> createFilters(ProjectMeasuresQuery query) { Map<String, QueryBuilder> filters = new HashMap<>(); filters.put("__authorization", authorizationTypeSupport.createQueryFilter()); Multimap<String, MetricCriterion> metricCriterionMultimap = ArrayListMultimap.create(); query.getMetricCriteria().forEach(metricCriterion -> metricCriterionMultimap.put(metricCriterion.getMetricKey(), metricCriterion)); metricCriterionMultimap.asMap().forEach((key, value) -> { BoolQueryBuilder metricFilters = boolQuery(); value .stream() .map(ProjectMeasuresIndex::toQuery) .forEach(metricFilters::must); filters.put(key, metricFilters); }); query.getQualityGateStatus() .ifPresent(qualityGateStatus -> filters.put(ALERT_STATUS_KEY, termQuery(FIELD_QUALITY_GATE_STATUS, QUALITY_GATE_STATUS.get(qualityGateStatus.name())))); query.getProjectUuids() .ifPresent(projectUuids -> filters.put("ids", termsQuery("_id", projectUuids))); query.getLanguages() .ifPresent(languages -> filters.put(FILTER_LANGUAGES, termsQuery(FIELD_LANGUAGES, languages))); query.getOrganizationUuid() .ifPresent(organizationUuid -> filters.put(FIELD_ORGANIZATION_UUID, termQuery(FIELD_ORGANIZATION_UUID, organizationUuid))); query.getTags() .ifPresent(tags -> filters.put(FIELD_TAGS, termsQuery(FIELD_TAGS, tags))); query.getQueryText() .map(ProjectsTextSearchQueryFactory::createQuery) .ifPresent(queryBuilder -> filters.put("textQuery", queryBuilder)); return filters; }
public Multimap<String, String> selectGroupsByLogins(DbSession session, Collection<String> logins) { Multimap<String, String> result = ArrayListMultimap.create(); executeLargeInputs( logins, input -> { List<LoginGroup> groupMemberships = mapper(session).selectGroupsByLogins(input); for (LoginGroup membership : groupMemberships) { result.put(membership.login(), membership.groupName()); } return groupMemberships; }); return result; }
private void connectTerminalOps(ParseContext pCtx) { Multimap<TerminalOperator<?>, ReduceSinkOperator> terminalOpToRSMap = ArrayListMultimap.create(); rsToTerminalOpsInfo.put(rsFound, candidate); for (TerminalOperator<?> terminalOp : candidate.terminalOps) { terminalOpToRSMap.put(terminalOp, rsFound);
/** * Create a {@link Multimap} that maps a {@link CopyableDataset} to all {@link WorkUnitState}s that belong to this * {@link CopyableDataset}. This mapping is used to set WorkingState of all {@link WorkUnitState}s to * {@link WorkUnitState.WorkingState#COMMITTED} after a {@link CopyableDataset} is successfully published. */ private static Multimap<CopyEntity.DatasetAndPartition, WorkUnitState> groupByFileSet( Collection<? extends WorkUnitState> states) { Multimap<CopyEntity.DatasetAndPartition, WorkUnitState> datasetRoots = ArrayListMultimap.create(); for (WorkUnitState workUnitState : states) { CopyEntity file = CopySource.deserializeCopyEntity(workUnitState); CopyEntity.DatasetAndPartition datasetAndPartition = file.getDatasetAndPartition( CopyableDatasetMetadata.deserialize(workUnitState.getProp(CopySource.SERIALIZED_COPYABLE_DATASET))); datasetRoots.put(datasetAndPartition, workUnitState); } return datasetRoots; }
/** Generate groups of splits, separated by schema evolution boundaries */ public Multimap<Integer, InputSplit> generateGroupedSplits(JobConf jobConf, Configuration conf, InputSplit[] splits, float waves, int availableSlots, String inputName, boolean groupAcrossFiles, SplitLocationProvider locationProvider) throws Exception { MapWork work = populateMapWork(jobConf, inputName); // ArrayListMultimap is important here to retain the ordering for the splits. Multimap<Integer, InputSplit> bucketSplitMultiMap = ArrayListMultimap.<Integer, InputSplit> create(); int i = 0; InputSplit prevSplit = null; for (InputSplit s : splits) { // this is the bit where we make sure we don't group across partition // schema boundaries if (schemaEvolved(s, prevSplit, groupAcrossFiles, work)) { ++i; prevSplit = s; } bucketSplitMultiMap.put(i, s); } LOG.info("# Src groups for split generation: " + (i + 1)); // group them into the chunks we want Multimap<Integer, InputSplit> groupedSplits = this.group(jobConf, bucketSplitMultiMap, availableSlots, waves, locationProvider); return groupedSplits; }