/** {@inheritDoc} */ @Override public OutputStream openOutputStream(String path, boolean append) throws IOException { IgfsPath igfsPath = new IgfsPath(path); final IgfsOutputStream igfsOutputStream; if (append) igfsOutputStream = igfsEx.append(igfsPath, true/*create*/); else igfsOutputStream = igfsEx.create(igfsPath, true/*overwrite*/); return igfsOutputStream; }
@Override public T call() throws Exception { return call0(igfs.context()); } });
/** {@inheritDoc} */ @Override public boolean delete(IgfsPath path, boolean recursive) throws IgniteException { return igfs.delete(path, recursive); }
@Override public IgfsHandshakeResponse apply() { igfs.clientLogDirectory(logDir); return new IgfsHandshakeResponse(igfs.name(), igfs.groupBlockSize(), igfs.globalSampling()); } });
@Override public HadoopIgfsStreamDelegate apply() { IgfsOutputStream stream = igfs.create(path, bufSize, overwrite, colocate ? igfs.nextAffinityKey() : null, replication, blockSize, props); return new HadoopIgfsStreamDelegate(HadoopIgfsInProc.this, stream); } });
@Override public Void apply() { switch (cmd) { case EXISTS: res.response(igfs.exists(req.path())); res.response(igfs.info(req.path())); res.response(igfs.summary(req.path())); res.response(igfs.update(req.path(), req.properties())); igfs.rename(req.path(), req.destinationPath()); res.response(igfs.delete(req.path(), req.flag())); igfs.mkdirs(req.path(), req.properties()); res.paths(igfs.listPaths(req.path())); res.files(igfs.listFiles(req.path())); igfs.setTimes(req.path(), req.modificationTime(), req.accessTime()); res.locations(igfs.affinity(req.path(), req.start(), req.length())); IgfsInputStream igfsIn = !req.flag() ? igfs.open(req.path(), bufSize) : igfs.open(req.path(), bufSize, req.sequentialReadsBeforePrefetch()); log.debug("Opened IGFS input stream for file read [igfsName=" + igfs.name() + ", path=" + req.path() + ", streamId=" + streamId + ", ses=" + ses + ']');
IgfsMetrics initMetrics = igfs.metrics(); igfs.create(file1, 256, true, null, 1, 256, null).close(); int blockSize = igfs.info(file1).blockSize(); checkBlockMetrics(initMetrics, igfs.metrics(), 0, 0, 0, 0, 0, 0); IgfsOutputStream os = igfs.append(file1, false); os.write(new byte[blockSize * 2]); os.close(); checkBlockMetrics(initMetrics, igfs.metrics(), 0, 0, 0, 2, 0, blockSize * 2); os = igfs.create(file2, 256, true, null, 1, 256, null); os.write(new byte[blockSize]); os.close(); checkBlockMetrics(initMetrics, igfs.metrics(), 0, 0, 0, 3, 0, blockSize * 3); IgfsInputStream is = igfs.open(file1); is.readFully(0, new byte[blockSize * 2]); is.close(); checkBlockMetrics(initMetrics, igfs.metrics(), 2, 0, blockSize * 2, 3, 0, blockSize * 3); is = igfs.open(file2); is.read(new byte[blockSize]); is.close();
IgfsPath inDir = new IgfsPath(PATH_INPUT); igfs.mkdirs(inDir); try (PrintWriter pw = new PrintWriter(igfs.create(inFile, true))) { pw.println("hello0 world0"); pw.println("world1 hello1"); HadoopFileBlock fileBlock1 = new HadoopFileBlock(HOSTS, inFileUri, 0, igfs.info(inFile).length() - 1); try (PrintWriter pw = new PrintWriter(igfs.append(inFile, false))) { pw.println("hello2 world2"); pw.println("world3 hello3"); igfs.info(inFile).length() - fileBlock1.length());
primary.mkdirs(IGFS_PATH_DUAL); assert primary.exists(IGFS_PATH_DUAL); assert secondary.exists(IGFS_PATH_DUAL); assert !primary.exists(IGFS_PATH_DUAL); assert !secondary.exists(IGFS_PATH_DUAL); assert !fs.exists(PATH_DUAL); assert primary.exists(IGFS_PATH_DUAL); assert secondary.exists(IGFS_PATH_DUAL); assert fs.exists(PATH_DUAL); assert secondary.exists(IGFS_PATH_PROXY); assert fs.exists(PATH_PROXY); G.stop(primary.context().kernalContext().grid().name(), true);
@Override public Void apply() { igfs.mkdirs(path, props); return null; } });
/** * Tests Hadoop command line integration. */ @Test public void testHadoopCommandLine() throws Exception { assertEquals(0, executeHadoopCmd("fs", "-ls", "/")); assertEquals(0, executeHadoopCmd("fs", "-mkdir", "/input")); assertEquals(0, executeHadoopCmd("fs", "-put", new File(testWorkDir, "test-data").getAbsolutePath(), "/input")); assertTrue(igfs.exists(new IgfsPath("/input/test-data"))); assertEquals(0, executeHadoopCmd("jar", examplesJar.getAbsolutePath(), "wordcount", "/input", "/output")); IgfsPath path = new IgfsPath("/user/" + System.getProperty("user.name") + "/"); assertTrue(igfs.exists(path)); IgfsPath jobStatPath = null; for (IgfsPath jobPath : igfs.listPaths(path)) { assertNull(jobStatPath); jobStatPath = jobPath; } File locStatFile = new File(testWorkDir, "performance"); assertEquals(0, executeHadoopCmd("fs", "-get", jobStatPath.toString() + "/performance", locStatFile.toString())); long evtCnt = HadoopTestUtils.simpleCheckJobStatFile(new BufferedReader(new FileReader(locStatFile))); assertTrue(evtCnt >= 22); //It's the minimum amount of events for job with combiner. assertTrue(igfs.exists(new IgfsPath("/output"))); BufferedReader in = new BufferedReader(new InputStreamReader(igfs.open(new IgfsPath("/output/part-r-00000")))); List<String> res = new ArrayList<>(); String line; while ((line = in.readLine()) != null) res.add(line); Collections.sort(res); assertEquals("[blue\t150, green\t200, red\t100, yellow\t50]", res.toString()); }
/** {@inheritDoc} */ @Override public IgfsFile info(IgfsPath path) throws IgniteException { return igfs.info(path); }
/** {@inheritDoc} */ @Override public IgfsSecondaryFileSystemPositionedReadable open(IgfsPath path, int bufSize) throws IgniteException { return (IgfsSecondaryFileSystemPositionedReadable)igfs.open(path, bufSize); }
/** {@inheritDoc} */ @Override public OutputStream create(IgfsPath path, boolean overwrite) throws IgniteException { return igfs.create(path, overwrite); }
/** {@inheritDoc} */ @Override public boolean exists(IgfsPath path) { return igfs.exists(path); }
/** * Ensure that the given paths don't exist in the given IGFS. * * @param uni secondary FS. * @param paths Paths. * @throws Exception If failed. */ protected void checkNotExist(IgfsSecondaryFileSystemTestAdapter uni, IgfsPath... paths) throws Exception { IgfsEx ex = uni.igfs(); for (IgfsPath path : paths) { if (ex != null) assert !ex.exists(path) : "Path exists [igfs=" + ex.name() + ", path=" + path + ']'; assert !uni.exists(path.toString()) : "Path exists [igfs=" + uni.name() + ", path=" + path + ']'; } }
/** * Ensure that the given paths exist in the given IGFS. * * @param uni filesystem. * @param paths Paths. * @throws IgniteCheckedException If failed. */ protected void checkExist(IgfsSecondaryFileSystemTestAdapter uni, IgfsPath... paths) throws IgniteCheckedException { IgfsEx ex = uni.igfs(); for (IgfsPath path : paths) { if (ex != null) assert ex.context().meta().fileId(path) != null : "Path doesn't exist [igfs=" + ex.name() + ", path=" + path + ']'; try { assert uni.exists(path.toString()) : "Path doesn't exist [igfs=" + uni.name() + ", path=" + path + ']'; } catch (IOException ioe) { throw new IgniteCheckedException(ioe); } } }