/** * Convert IGFS affinity block location into Hadoop affinity block location. * * @param block IGFS affinity block location. * @return Hadoop affinity block location. */ private BlockLocation convert(IgfsBlockLocation block) { Collection<String> names = block.names(); Collection<String> hosts = block.hosts(); return new BlockLocation( names.toArray(new String[names.size()]) /* hostname:portNumber of data nodes */, hosts.toArray(new String[hosts.size()]) /* hostnames of data nodes */, block.start(), block.length() ) { @Override public String toString() { try { return "BlockLocation [offset=" + getOffset() + ", length=" + getLength() + ", hosts=" + Arrays.asList(getHosts()) + ", names=" + Arrays.asList(getNames()) + ']'; } catch (IOException e) { throw new RuntimeException(e); } } }; }
ClusterNode node = null; for (UUID nodeId : loc.nodeIds()) { node = nodes.get(nodeId); ", subgrid=" + subgrid + ']'); IgfsJob job = createJob(path, new IgfsFileRange(file.path(), loc.start(), loc.length()), args); ComputeJob jobImpl = igfsProc.createJob(job, fs.name(), file.path(), loc.start(), loc.length(), args.recordResolver()); totalLen += loc.length();
assertEquals(F.first(blocks).start(), start); assertEquals(start + len, F.last(blocks).start() + F.last(blocks).length()); assertEquals(blocks0, blocks); blocks = igfs.affinity(filePath, start, len, maxLen); assertEquals(F.first(blocks).start(), start); assertEquals(start + len, F.last(blocks).start() + F.last(blocks).length()); totalLen += block.length(); assert block.length() <= maxLen : "block.length() <= maxLen. [block.length=" + block.length() + ", maxLen=" + maxLen + ']'; assert block.length() + block.start() <= start + len : "block.length() + block.start() < start + len. [block.length=" + block.length() + ", block.start()=" + block.start() + ", start=" + start +", len=" + len + ']'; assert block.start() < block0.start() && block.start() + block.length() <= block0.start() || block.start() > block0.start() && block0.start() + block0.length() <= block.start() : "Blocks cross each other: block0=" + block + ", block1= " + block0;
/** @throws Exception If failed. */ @Test public void testZeroReplicationFactor() throws Exception { // This test doesn't make sense for any mode except of PRIMARY. if (mode == PRIMARY) { Path igfsHome = new Path(PRIMARY_URI); Path file = new Path(igfsHome, "someFile"); try (FSDataOutputStream out = fs.create(file, (short)0)) { out.write(new byte[1024 * 1024]); } IgniteFileSystem igfs = grid(0).fileSystem("igfs"); IgfsPath filePath = new IgfsPath("/someFile"); IgfsFile fileInfo = igfs.info(filePath); awaitPartitionMapExchange(); Collection<IgfsBlockLocation> locations = igfs.affinity(filePath, 0, fileInfo.length()); assertEquals(1, locations.size()); IgfsBlockLocation location = F.first(locations); assertEquals(1, location.nodeIds().size()); } }
assertEquals("Unexpected block location: " + loc, pos, loc.start()); assertEquals("Unexpected block location: " + loc, len, loc.length()); pos, first.start()); first.length() >= grpSize - pos % grpSize); last.start() <= (pos / grpSize + 4) * grpSize); last.length() >= (pos + len - 1) % grpSize + 1);
/** @throws Exception If failed. */ @Test public void testZeroReplicationFactor() throws Exception { // This test doesn't make sense for any mode except of PRIMARY. if (mode == PRIMARY) { Path igfsHome = new Path(primaryFsUri); Path file = new Path(igfsHome, "someFile"); try (FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()), Options.CreateOpts.repFac((short)1))) { out.write(new byte[1024 * 1024]); } IgniteFileSystem igfs = grid(0).fileSystem("igfs"); IgfsPath filePath = new IgfsPath("/someFile"); IgfsFile fileInfo = igfs.info(filePath); Collection<IgfsBlockLocation> locations = igfs.affinity(filePath, 0, fileInfo.length()); assertEquals(1, locations.size()); IgfsBlockLocation location = F.first(locations); assertEquals(1, location.nodeIds().size()); } }
/** * Checks affinity validity. * * @param blockSize Block size. * @param info File info. * @param affinity Affinity block locations to check. */ private void checkAffinity(int blockSize, IgfsEntryInfo info, Iterable<IgfsBlockLocation> affinity) { for (IgfsBlockLocation loc : affinity) { info("Going to check IGFS block location: " + loc); int block = (int)(loc.start() / blockSize); int endPos; do { IgfsBlockKey key = new IgfsBlockKey(info.id(), info.fileMap().affinityKey(block * blockSize, false), false, block); ClusterNode affNode = grid(0).affinity(grid(0).igfsx("igfs").configuration() .getDataCacheConfiguration().getName()).mapKeyToNode(key); assertTrue("Failed to find node in affinity [dataMgr=" + loc.nodeIds() + ", nodeId=" + affNode.id() + ", block=" + block + ']', loc.nodeIds().contains(affNode.id())); endPos = (block + 1) * blockSize; block++; } while (endPos < loc.start() + loc.length()); } }
/** * Convert IGFS affinity block location into Hadoop affinity block location. * * @param block IGFS affinity block location. * @return Hadoop affinity block location. */ private BlockLocation convert(IgfsBlockLocation block) { Collection<String> names = block.names(); Collection<String> hosts = block.hosts(); return new BlockLocation( names.toArray(new String[names.size()]) /* hostname:portNumber of data nodes */, hosts.toArray(new String[hosts.size()]) /* hostnames of data nodes */, block.start(), block.length() ) { @Override public String toString() { try { return "BlockLocation [offset=" + getOffset() + ", length=" + getLength() + ", hosts=" + Arrays.asList(getHosts()) + ", names=" + Arrays.asList(getNames()) + ']'; } catch (IOException e) { throw new RuntimeException(e); } } }; }
/** * @throws Exception If failed. */ @Test public void testAffinity() throws Exception { long fileSize = 32L * 1024 * 1024; IgfsPath filePath = new IgfsPath("/file"); try (OutputStream os = igfs.create(filePath, true)) { for(int i = 0; i < fileSize / chunk.length; ++i) os.write(chunk); } long len = igfs.info(filePath).length(); int start = 0; // Check default maxLen (maxLen = 0) for (int i = 0; i < igfs.context().data().groupBlockSize() / 1024; i++) { Collection<IgfsBlockLocation> blocks = igfs.affinity(filePath, start, len); assertEquals(F.first(blocks).start(), start); assertEquals(start + len, F.last(blocks).start() + F.last(blocks).length()); len -= 1024 * 2; start += 1024; } }
/** @throws Exception If failed. */ @Test public void testCreateFileColocated() throws Exception { IgfsPath path = new IgfsPath("/colocated"); UUID uuid = UUID.randomUUID(); IgniteUuid affKey; long idx = 0; while (true) { affKey = new IgniteUuid(uuid, idx); if (grid(0).affinity(grid(0).igfsx("igfs").configuration().getDataCacheConfiguration() .getName()).mapKeyToNode(affKey).id().equals(grid(0).localNode().id())) break; idx++; } try (IgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) { // Write 5M, should be enough to test distribution. for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]); } IgfsFile info = fs.info(path); Collection<IgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length()); assertEquals(1, affNodes.size()); Collection<UUID> nodeIds = F.first(affNodes).nodeIds(); assertEquals(1, nodeIds.size()); assertEquals(grid(0).localNode().id(), F.first(nodeIds)); }
/** @throws Exception If failed. */ @Test public void testAffinity2() throws Exception { int blockSize = BLOCK_SIZE; long t = System.currentTimeMillis(); IgfsEntryInfo info = IgfsUtils.createFile(IgniteUuid.randomUuid(), blockSize, 1024 * 1024, null, null, false, null, t, t); Collection<IgfsBlockLocation> affinity = mgr.affinity(info, 0, info.length()); for (IgfsBlockLocation loc : affinity) { info("Going to check IGFS block location: " + loc); int block = (int)(loc.start() / blockSize); int endPos; do { IgfsBlockKey key = new IgfsBlockKey(info.id(), null, false, block); ClusterNode affNode = grid(0).affinity(grid(0).igfsx("igfs").configuration() .getDataCacheConfiguration().getName()).mapKeyToNode(key); assertTrue("Failed to find node in affinity [dataMgr=" + loc.nodeIds() + ", nodeId=" + affNode.id() + ", block=" + block + ']', loc.nodeIds().contains(affNode.id())); endPos = (block + 1) * blockSize; block++; } while (endPos < loc.start() + loc.length()); } }
ClusterNode node = null; for (UUID nodeId : loc.nodeIds()) { node = nodes.get(nodeId); ", subgrid=" + subgrid + ']'); IgfsJob job = createJob(path, new IgfsFileRange(file.path(), loc.start(), loc.length()), args); ComputeJob jobImpl = igfsProc.createJob(job, fs.name(), file.path(), loc.start(), loc.length(), args.recordResolver()); totalLen += loc.length();