/** * Convert IGFS affinity block location into Hadoop affinity block location. * * @param block IGFS affinity block location. * @return Hadoop affinity block location. */ private BlockLocation convert(IgfsBlockLocation block) { Collection<String> names = block.names(); Collection<String> hosts = block.hosts(); return new BlockLocation( names.toArray(new String[names.size()]) /* hostname:portNumber of data nodes */, hosts.toArray(new String[hosts.size()]) /* hostnames of data nodes */, block.start(), block.length() ) { @Override public String toString() { try { return "BlockLocation [offset=" + getOffset() + ", length=" + getLength() + ", hosts=" + Arrays.asList(getHosts()) + ", names=" + Arrays.asList(getNames()) + ']'; } catch (IOException e) { throw new RuntimeException(e); } } }; }
/** * Convert IGFS affinity block location into Hadoop affinity block location. * * @param block IGFS affinity block location. * @return Hadoop affinity block location. */ private BlockLocation convert(IgfsBlockLocation block) { Collection<String> names = block.names(); Collection<String> hosts = block.hosts(); return new BlockLocation( names.toArray(new String[names.size()]) /* hostname:portNumber of data nodes */, hosts.toArray(new String[hosts.size()]) /* hostnames of data nodes */, block.start(), block.length() ) { @Override public String toString() { try { return "BlockLocation [offset=" + getOffset() + ", length=" + getLength() + ", hosts=" + Arrays.asList(getHosts()) + ", names=" + Arrays.asList(getNames()) + ']'; } catch (IOException e) { throw new RuntimeException(e); } } }; }
/** * @param location HDFS block location. * @param len New length. */ public IgfsBlockLocationImpl(IgfsBlockLocation location, long len) { assert location != null; start = location.start(); this.len = len; nodeIds = location.nodeIds(); names = location.names(); hosts = location.hosts(); }
assertEquals(F.first(blocks).start(), start); assertEquals(start + len, F.last(blocks).start() + F.last(blocks).length()); assertEquals(blocks0, blocks); blocks = igfs.affinity(filePath, start, len, maxLen); assertEquals(F.first(blocks).start(), start); assertEquals(start + len, F.last(blocks).start() + F.last(blocks).length()); + ", maxLen=" + maxLen + ']'; assert block.length() + block.start() <= start + len : "block.length() + block.start() < start + len. [block.length=" + block.length() + ", block.start()=" + block.start() + ", start=" + start +", len=" + len + ']'; assert block.start() < block0.start() && block.start() + block.length() <= block0.start() || block.start() > block0.start() && block0.start() + block0.length() <= block.start() : "Blocks cross each other: block0=" + block + ", block1= " + block0;
splitBlocks(last.start(), last.length() + blockLen, maxLen, affNodes, res);
/** * @throws Exception If failed. */ @Test public void testAffinity() throws Exception { long fileSize = 32L * 1024 * 1024; IgfsPath filePath = new IgfsPath("/file"); try (OutputStream os = igfs.create(filePath, true)) { for(int i = 0; i < fileSize / chunk.length; ++i) os.write(chunk); } long len = igfs.info(filePath).length(); int start = 0; // Check default maxLen (maxLen = 0) for (int i = 0; i < igfs.context().data().groupBlockSize() / 1024; i++) { Collection<IgfsBlockLocation> blocks = igfs.affinity(filePath, start, len); assertEquals(F.first(blocks).start(), start); assertEquals(start + len, F.last(blocks).start() + F.last(blocks).length()); len -= 1024 * 2; start += 1024; } }
splitBlocks(last.start(), last.length() + partEnd - pos, maxLen, affNodes, res);
/** @throws Exception If failed. */ @Test public void testAffinity2() throws Exception { int blockSize = BLOCK_SIZE; long t = System.currentTimeMillis(); IgfsEntryInfo info = IgfsUtils.createFile(IgniteUuid.randomUuid(), blockSize, 1024 * 1024, null, null, false, null, t, t); Collection<IgfsBlockLocation> affinity = mgr.affinity(info, 0, info.length()); for (IgfsBlockLocation loc : affinity) { info("Going to check IGFS block location: " + loc); int block = (int)(loc.start() / blockSize); int endPos; do { IgfsBlockKey key = new IgfsBlockKey(info.id(), null, false, block); ClusterNode affNode = grid(0).affinity(grid(0).igfsx("igfs").configuration() .getDataCacheConfiguration().getName()).mapKeyToNode(key); assertTrue("Failed to find node in affinity [dataMgr=" + loc.nodeIds() + ", nodeId=" + affNode.id() + ", block=" + block + ']', loc.nodeIds().contains(affNode.id())); endPos = (block + 1) * blockSize; block++; } while (endPos < loc.start() + loc.length()); } }
/** * Checks affinity validity. * * @param blockSize Block size. * @param info File info. * @param affinity Affinity block locations to check. */ private void checkAffinity(int blockSize, IgfsEntryInfo info, Iterable<IgfsBlockLocation> affinity) { for (IgfsBlockLocation loc : affinity) { info("Going to check IGFS block location: " + loc); int block = (int)(loc.start() / blockSize); int endPos; do { IgfsBlockKey key = new IgfsBlockKey(info.id(), info.fileMap().affinityKey(block * blockSize, false), false, block); ClusterNode affNode = grid(0).affinity(grid(0).igfsx("igfs").configuration() .getDataCacheConfiguration().getName()).mapKeyToNode(key); assertTrue("Failed to find node in affinity [dataMgr=" + loc.nodeIds() + ", nodeId=" + affNode.id() + ", block=" + block + ']', loc.nodeIds().contains(affNode.id())); endPos = (block + 1) * blockSize; block++; } while (endPos < loc.start() + loc.length()); } }
/** * @param location HDFS block location. * @param len New length. */ public IgfsBlockLocationImpl(IgfsBlockLocation location, long len) { assert location != null; start = location.start(); this.len = len; nodeIds = location.nodeIds(); names = location.names(); hosts = location.hosts(); }
splitBlocks(last.start(), last.length() + blockLen, maxLen, affNodes, res);
splitBlocks(last.start(), last.length() + partEnd - pos, maxLen, affNodes, res);