@Override public Object call() throws Exception { return affinity.mapPartitionToNode(0); } }, IgniteException.class, EXPECTED_MSG);
/** {@inheritDoc} */ @Override public ClusterNode mapPartitionToNode(int part) { CacheOperationContext old = gate.enter(null); try { return delegate.mapPartitionToNode(part); } finally { gate.leave(old); } }
/** {@inheritDoc} */ @Override public ClusterNode call() throws Exception { return affinity().mapPartitionToNode(part); } }
/** * Resolves TensorFlow cluster worker jobs and acquires ports. * * @param spec TensorFlow cluster specification. * @param upstreamCacheName Upstream cache name. */ private void resolveAndAcquirePortsForWorkers(TensorFlowClusterSpec spec, String upstreamCacheName) { Affinity<?> affinity = ignite.affinity(upstreamCacheName); int parts = affinity.partitions(); Set<UUID> distinctNodeIds = new HashSet<>(); for (int part = 0; part < parts; part++) { ClusterNode node = affinity.mapPartitionToNode(part); UUID nodeId = node.id(); distinctNodeIds.add(nodeId); } List<UUID> nodeIds = new ArrayList<>(distinctNodeIds); Collections.sort(nodeIds); for (UUID nodeId : nodeIds) { int port = portMgr.acquirePort(nodeId); spec.addTask(WORKER_JOB_NAME, nodeId, port); } }
/** * Checks if affinity mapping has been changed. * * @return True if mapping has been changed, otherwise false. */ private boolean hasAffinityChanged() { Affinity<?> affinity = ignite.affinity(jobArchive.getUpstreamCacheName()); int parts = affinity.partitions(); UUID[] ids = new UUID[parts]; for (int part = 0; part < parts; part++) { ClusterNode node = affinity.mapPartitionToNode(part); UUID nodeId = node.id(); ids[part] = nodeId; } if (prev == null || !Arrays.equals(ids, prev)) { prev = ids; return true; } return false; }
/** {@inheritDoc} */ @Override public void test() throws AssertionError { super.test(); Affinity<?> affinity = ignite().affinity(CACHE_NAME); int partCnt = affinity.partitions(); Map<ClusterNode, Integer> partMap = new HashMap<>(serverCount()); for (int i = 0; i < partCnt; i++) { ClusterNode node = affinity.mapPartitionToNode(i); int cnt = partMap.containsKey(node) ? partMap.get(node) : 0; partMap.put(node, cnt + 1); } int fairCnt = partCnt / serverCount(); for (int count : partMap.values()) { double deviation = Math.abs(fairCnt - count) / (double)fairCnt; if (deviation > MAX_DEVIATION) { throw new AssertionError("partition distribution deviation exceeded max: fair count=" + fairCnt + ", actual count=" + count + ", deviation=" + deviation); } } } }
/** * Check mapPartitionToPrimaryAndBackups and mapPartitionToNode methods. * * @param testAff Affinity1. * @param aff Affinity2. */ private void checkMapPartitionToNode(Affinity testAff, Affinity aff) { assertEquals(aff.partitions(), testAff.partitions()); for (int part = 0; part < aff.partitions(); ++part) { assertEquals(testAff.mapPartitionToNode(part).id(), aff.mapPartitionToNode(part).id()); checkEqualCollection(testAff.mapPartitionToPrimaryAndBackups(part), aff.mapPartitionToPrimaryAndBackups(part)); } }
/** * @param key Key. * @return For the given key pair {primary node, some other node}. */ private IgniteBiTuple<ClusterNode, ClusterNode> getNodes(Integer key) { Affinity<Integer> aff = grid(0).affinity(DEFAULT_CACHE_NAME); int part = aff.partition(key); ClusterNode primary = aff.mapPartitionToNode(part); assert primary != null; Collection<ClusterNode> nodes = new ArrayList<>(grid(0).cluster().nodes()); nodes.remove(primary); ClusterNode other = F.first(nodes); assert other != null; assert !F.eqNodes(primary, other); return F.t(primary, other); } }
/** * Verifies that affinity mappings are the same on clients and servers. */ private void verifyPartitionToNodeMappings() { IgniteEx refSrv = grid(0); String cacheName; for (int i = 0; i < CACHES; i++) { cacheName = "cache-" + i; Affinity<Object> refAffinity = refSrv.affinity(cacheName); for (int j = 0; j < PARTITIONS_CNT; j++) { ClusterNode refAffNode = refAffinity.mapPartitionToNode(j); assertNotNull("Affinity node for " + j + " partition is null", refAffNode); for (int k = SRV_CNT; k < SRV_CNT + CLIENTS_CNT; k++) { ClusterNode clAffNode = grid(k).affinity(cacheName).mapPartitionToNode(j); assertNotNull("Affinity node for " + k + " client and " + j + " partition is null", clAffNode); assertEquals("Affinity node for " + k + " client and " + j + " partition is different on client", refAffNode.id(), clAffNode.id()); } } } }
int part = reader.readObject(); ClusterNode node = aff.mapPartitionToNode(part);
/** * @param partCntrs Expected per-partition entries count. */ private void checkScanPartition(Ignite ignite, IgniteCache<DbKey, DbValue> cache, Map<Integer, Integer> partCntrs, boolean loc) { Affinity<Object> aff = ignite.affinity(cache.getName()); int parts = aff.partitions(); for (int p = 0; p < parts; p++) { ScanQuery<DbKey, DbValue> qry = new ScanQuery<>(); qry.setPartition(p); qry.setLocal(loc); if (loc && !ignite.cluster().localNode().equals(aff.mapPartitionToNode(p))) continue; QueryCursor<Cache.Entry<DbKey, DbValue>> cur = cache.query(qry); Set<DbKey> allKeys = new HashSet<>(); for (Cache.Entry<DbKey, DbValue> e : cur) { allKeys.add(e.getKey()); assertEquals(e.getKey().val, e.getValue().iVal); } Integer exp = partCntrs.get(p); if (exp == null) exp = 0; assertEquals(exp, (Integer)allKeys.size()); } }
/** * JUnit. * * @throws Exception If failed. */ @Test public void testMapPartitionToNode() throws Exception { int part = RND.nextInt(affinity().partitions()); AffinityFunctionContext ctx = new GridAffinityFunctionContextImpl(new ArrayList<>(grid(0).cluster().nodes()), null, null, new AffinityTopologyVersion(1), 1); AffinityFunction aff = affinity(); List<List<ClusterNode>> assignment = aff.assignPartitions(ctx); assertEquals(F.first(nodes(assignment, aff, part)), grid(0).affinity(DEFAULT_CACHE_NAME).mapPartitionToNode(part)); }
/** {@inheritDoc} */ @Override public ClusterNode mapPartitionToNode(int part) { CacheOperationContext old = gate.enter(null); try { return delegate.mapPartitionToNode(part); } finally { gate.leave(old); } }
int part = reader.readObject(); ClusterNode node = aff.mapPartitionToNode(part);