@Generates private static <E> HashMultiset<E> generateHashMultiset(E freshElement) { HashMultiset<E> multiset = HashMultiset.create(); multiset.add(freshElement); return multiset; }
@Generates private static <E> HashMultiset<E> generateHashMultiset(E freshElement) { HashMultiset<E> multiset = HashMultiset.create(); multiset.add(freshElement); return multiset; }
occurrencesByHost.add(cluster.manager.controlConnection.connectedHost().getAddress()); } finally { cluster.close();
@Generates private static <E> HashMultiset<E> freshHashMultiset(E freshElement) { HashMultiset<E> multiset = HashMultiset.create(); multiset.add(freshElement); return multiset; }
private void appendException(ImportException e) { importExceptions.add(e); String category = e.getCategory().toString(); importExceptionCodes.add( category ); }
/** * {@inheritDoc} * <p> * <strong>NOTE:</strong> This method deviates from the original * {@link Multimap} interface in that changes to the returned coll ection * WILL NOT update the underlying multimap and vice-versa. * </p> */ @Override public Multiset<K> keys() { Set<K> keys = keySet(); HashMultiset<K> multiset = HashMultiset.create(keys.size()); for (K key : keys) { multiset.add(key, get(key).size()); } return multiset; }
public static ScanResult scanVerticesOfComponent(List<LocalId> curVertices){ HashMultiset<Language> langs = HashMultiset.create(); for (LocalId curVertex : curVertices){ langs.add(curVertex.getLanguage()); } Integer langCount = langs.entrySet().size(); Integer articleCount = curVertices.size(); Double clarity = ((double)langCount/(double)articleCount); ScanResult scanResult = new ScanResult(clarity, langCount, articleCount); return scanResult; }
public String getMostUsedArticleCasing() { HashMultiset<String> articleNames = HashMultiset.create(); String result; for (Writable writable: super.get()) { LinkWritable link = (LinkWritable)writable; articleNames.add(link.getArticle().toString()); } ImmutableMultiset<String> sorted = Multisets.copyHighestCountFirst(articleNames); result = (String)sorted.elementSet().toArray()[0]; return result; } }
@BeforeExperiment void setUp() { Random random = new Random(); multisets.clear(); for (int i = 0; i < ARRAY_SIZE; i++) { HashMultiset<Integer> multiset = HashMultiset.<Integer>create(); multisets.add(multiset); queries[i] = random.nextInt(); multiset.add(queries[i]); } }
classLoaderMultiset.add(classLoaderName);
neighbors.forEach( neighbor -> rows.add( Row.builder() .put(COL_NODE, new Node(node))
@BeforeExperiment void setUp() { hashMultiset = HashMultiset.create(size); linkedHashMultiset = LinkedHashMultiset.create(size); treeMultiset = TreeMultiset.create(); Random random = new Random(); int sizeRemaining = size; // TODO(kevinb): generate better test contents for multisets for (int i = 0; sizeRemaining > 0; i++) { // The JVM will return interned values for small ints. Integer value = random.nextInt(1000) + 128; int count = Math.min(random.nextInt(10) + 1, sizeRemaining); sizeRemaining -= count; hashMultiset.add(value, count); linkedHashMultiset.add(value, count); treeMultiset.add(value, count); } //TODO(kevinb): convert to assert once benchmark tests enable asserts by default Preconditions.checkState(hashMultiset.size() == size); }
public void testHashMultiset() { assertFreshInstance(new TypeToken<HashMultiset<String>>() {}); HashMultiset<String> expected = HashMultiset.create(); expected.add(new FreshValueGenerator().generate(String.class)); assertValueAndTypeEquals(expected, new FreshValueGenerator().generate(new TypeToken<HashMultiset<String>>() {})); }
assertEquals(maxSize, inSplit.getLength()); assertEquals(1, inSplit.getLocations().length); nodeSplits.add(inSplit.getLocations()[0]);
@Test public void multiNodeClusterNonNormalizedAffinities() throws Exception { final Wrapper wrapper = newWrapper(2000, 1, 250, ImmutableList.of( new EndpointAffinity(N1_EP2, 15, true, 50), new EndpointAffinity(N2_EP2, 15, true, 50), new EndpointAffinity(N3_EP1, 10, true, 50), new EndpointAffinity(N4_EP2, 20, true, 50), new EndpointAffinity(N1_EP1, 20, true, 50) )); INSTANCE.parallelizeFragment(wrapper, newParameters(100, 20, 80), null); // Expect the fragment parallelization to be 20 because: // 1. the cost (2000) is above the threshold (SLICE_TARGET_DEFAULT) (which gives 2000/100=20 width) and // 2. Number of mandatory node assignments are 5 (current width 200 satisfies the requirement) // 3. max width per node is 20 which limits the width to 100, but existing width (20) is already less assertEquals(20, wrapper.getWidth()); final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints(); assertEquals(20, assignedEps.size()); final HashMultiset<NodeEndpoint> counts = HashMultiset.create(); for(final NodeEndpoint ep : assignedEps) { counts.add(ep); } // Each node gets at max 5. assertThat(counts.count(N1_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N2_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N3_EP1), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N4_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N1_EP1), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); }
@Test public void multiNodeCluster2() throws Exception { final Wrapper wrapper = newWrapper(200, 1, 20, ImmutableList.of( new EndpointAffinity(N1_EP2, 0.15, true, 50), new EndpointAffinity(N2_EP2, 0.15, true, 50), new EndpointAffinity(N3_EP1, 0.10, true, 50), new EndpointAffinity(N4_EP2, 0.20, true, 50), new EndpointAffinity(N1_EP1, 0.20, true, 50) )); INSTANCE.parallelizeFragment(wrapper, newParameters(1, 5, 20), null); // Expect the fragment parallelization to be 20 because: // 1. the cost (200) is above the threshold (SLICE_TARGET_DEFAULT) (which gives 200/1=200 width) and // 2. Number of mandatory node assignments are 5 (current width 200 satisfies the requirement) // 3. max fragment width is 20 which limits the width assertEquals(20, wrapper.getWidth()); final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints(); assertEquals(20, assignedEps.size()); final HashMultiset<NodeEndpoint> counts = HashMultiset.create(); for(final NodeEndpoint ep : assignedEps) { counts.add(ep); } // Each node gets at max 5. assertTrue(counts.count(N1_EP2) <= 5); assertTrue(counts.count(N2_EP2) <= 5); assertTrue(counts.count(N3_EP1) <= 5); assertTrue(counts.count(N4_EP2) <= 5); assertTrue(counts.count(N1_EP1) <= 5); }
@Test public void testMultiplePeersWithSameKey() throws Exception { final int CAPACITY = 3; PeerCache cache = new PeerCache(CAPACITY, 100000); DatanodeID dnId = new DatanodeID("192.168.0.1", "fakehostname", "fake_datanode_id", 100, 101, 102, 103); HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY); for (int i = 0; i < CAPACITY; ++i) { FakePeer peer = new FakePeer(dnId, false); peers.add(peer); cache.put(dnId, peer); } // Check that all of the peers ended up in the cache assertEquals(CAPACITY, cache.size()); while (!peers.isEmpty()) { Peer peer = cache.get(dnId, false); assertTrue(peer != null); assertTrue(!peer.isClosed()); peers.remove(peer); } assertEquals(0, cache.size()); cache.close(); }
@Test public void testMultiplePeersWithSameKey() throws Exception { final int CAPACITY = 3; PeerCache cache = new PeerCache(CAPACITY, 100000); DatanodeID dnId = new DatanodeID("192.168.0.1", "fakehostname", "fake_datanode_id", 100, 101, 102, 103); HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY); for (int i = 0; i < CAPACITY; ++i) { FakePeer peer = new FakePeer(dnId, false); peers.add(peer); cache.put(dnId, peer); } // Check that all of the peers ended up in the cache assertEquals(CAPACITY, cache.size()); while (!peers.isEmpty()) { Peer peer = cache.get(dnId, false); assertTrue(peer != null); assertTrue(!peer.isClosed()); peers.remove(peer); } assertEquals(0, cache.size()); cache.close(); }
for (int i = 0; i < CAPACITY; ++i) { FakePeer peer = new FakePeer(dnId, i == CAPACITY - 1); peers.add(peer); cache.put(dnId, peer);
for (int i = 0; i < CAPACITY; ++i) { FakePeer peer = new FakePeer(dnId, i == CAPACITY - 1); peers.add(peer); cache.put(dnId, peer);