/** * Convert IGFS path into Hadoop path. * * @param path IGFS path. * @return Hadoop path. */ private Path convert(IgfsPath path) { return new Path(IGFS_SCHEME, uriAuthority, path.toString()); }
/** * Convert IGFS path into Hadoop path. * * @param path IGFS path. * @return Hadoop path. */ private Path convert(IgfsPath path) { return new Path(IGFS_SCHEME, uriAuthority, path.toString()); }
/** * Create file for IGFS path. * * @param path IGFS path. * @return File object. */ private File fileForPath(IgfsPath path) { if (workDir == null) return new File(path.toString()); else { if ("/".equals(path.toString())) return new File(workDir); else return new File(workDir, path.toString()); } }
/** * Creates short name of the file in TRASH directory. * The name consists of the whole file path and its unique id. * Upon file cleanup this name will be parsed to extract the path. * Note that in contrast to common practice the composed name contains '/' character. * * @param path The full path of the deleted file. * @param id The file id. * @return The new short name for trash directory. */ static String composeNameForTrash(IgfsPath path, IgniteUuid id) { return id.toString() + TRASH_NAME_SEPARATOR + path.toString(); }
/** * Log rename event. * * @param path Path. * @param destPath Destination path. */ public void logRename(IgfsPath path, IgfsPath destPath) { addEntry(new Entry(TYPE_RENAME, path.toString(), null, null, null, null, null, null, null, null, null, null, null, null, null, null, destPath.toString(), null, null)); }
/** * Convert IGFS path into Hadoop path. * * @param path IGFS path. * @return Hadoop path. */ private Path convert(IgfsPath path) { URI uri = fileSystemForUser().getUri(); return new Path(uri.getScheme(), uri.getAuthority(), path.toString()); }
/** * Log file open event. * * @param streamId Stream ID. * @param path Path. * @param bufSize Buffer size. * @param dataLen Data length. */ public void logOpen(long streamId, IgfsPath path, int bufSize, long dataLen) { addEntry(new Entry(TYPE_OPEN_IN, path.toString(), streamId, bufSize, dataLen, null, null, null, null, null, null, null, null, null, null, null, null, null, null)); }
/** * Log directory creation event. * * @param path Path. */ public void logMakeDirectory(IgfsPath path) { addEntry(new Entry(TYPE_DIR_MAKE, path.toString(), null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)); }
/** * Log file append event. * * @param streamId Stream ID. * @param path Path. * @param bufSize Buffer size. */ public void logAppend(long streamId, IgfsPath path, int bufSize) { addEntry(new Entry(TYPE_OPEN_OUT, path.toString(), streamId, bufSize, null, true, null, null, null, null, null, null, null, null, null, null, null, null, null)); }
/** * Log directory listing event. * * @param path Path. * @param files Files. */ public void logListDirectory(IgfsPath path, String[] files) { addEntry(new Entry(TYPE_DIR_LIST, path.toString(), null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, files)); }
/** * Log delete event. * * @param path Path. * @param recursive Recursive flag. */ public void logDelete(IgfsPath path, boolean recursive) { addEntry(new Entry(TYPE_DELETE, path.toString(), null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, recursive, null)); }
/** * Log file create event. * * @param streamId Stream ID. * @param path Path. * @param overwrite Overwrite flag. * @param bufSize Buffer size. * @param replication Replication factor. * @param blockSize Block size. */ public void logCreate(long streamId, IgfsPath path, boolean overwrite, int bufSize, int replication, long blockSize) { addEntry(new Entry(TYPE_OPEN_OUT, path.toString(), streamId, bufSize, null, false, overwrite, replication, blockSize, null, null, null, null, null, null, null, null, null, null)); }
/** * Check valid file system endpoint. * * @param authority Authority. * @throws Exception If failed. */ private void checkValid(String authority) throws Exception { FileSystem fs = fileSystem(authority, tcp); assert fs.exists(new Path(PATH.toString())); }
/** {@inheritDoc} */ @Override public void preHandleDir(String strPath) throws Exception { IgfsPath path = new IgfsPath(strPath); if (fs.exists(path)) throw new IgniteException("path " + path.toString() + " already exists"); try { fs.mkdirs(path); } catch (IgniteException ex) { throw ex; } } }
/** * Ensure that the given paths don't exist in the given IGFS. * * @param uni secondary FS. * @param paths Paths. * @throws Exception If failed. */ protected void checkNotExist(IgfsSecondaryFileSystemTestAdapter uni, IgfsPath... paths) throws Exception { IgfsEx ex = uni.igfs(); for (IgfsPath path : paths) { if (ex != null) assert !ex.exists(path) : "Path exists [igfs=" + ex.name() + ", path=" + path + ']'; assert !uni.exists(path.toString()) : "Path exists [igfs=" + uni.name() + ", path=" + path + ']'; } }
/** * Tests whole job execution with all phases in all combination of new and old versions of API. * @throws Exception If fails. */ @Test public void testWholeMapReduceExecution() throws Exception { IgfsPath inDir = new IgfsPath(PATH_INPUT); igfs.mkdirs(inDir); IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input"); generateTestFile(inFile.toString(), "red", red, "blue", blue, "green", green, "yellow", yellow ); for (boolean[] apiMode: getApiModes()) { assert apiMode.length == 3; boolean useNewMapper = apiMode[0]; boolean useNewCombiner = apiMode[1]; boolean useNewReducer = apiMode[2]; doTest(inFile, useNewMapper, useNewCombiner, useNewReducer); } }
/** * Test update when parent is the root. * * @throws Exception If failed. */ @SuppressWarnings("ConstantConditions") @Test public void testUpdateParentRoot() throws Exception { if(!propertiesSupported()) return; Map<String, String> props = properties("owner", "group", "0555"); create(igfs, paths(DIR), null); igfs.update(DIR, props); if (dual) assertEquals(props, igfsSecondary.properties(DIR.toString())); assertEquals(props, igfs.info(DIR).properties()); }
/** * Test update when parent is the root and the path being updated is missing locally. * * @param props Properties. * @throws Exception If failed. */ protected void doUpdateParentRootPathMissing(Map<String, String> props) throws Exception { if (!propertiesSupported()) return; create(igfsSecondary, paths(DIR), null); create(igfs, null, null); igfs.update(DIR, props); checkExist(igfs, DIR); assertTrue(propertiesContains(igfsSecondary.properties(DIR.toString()), props)); assertTrue(propertiesContains(igfs.info(DIR).properties(), props)); }
/** * Test update in case both local and remote file systems have the same folder structure. * * @throws Exception If failed. */ @SuppressWarnings("ConstantConditions") @Test public void testUpdate() throws Exception { if(!propertiesSupported()) return; Map<String, String> props = properties("owner", "group", "0555"); create(igfs, paths(DIR, SUBDIR), paths(FILE)); igfs.update(FILE, props); if (dual) assertEquals(props, igfsSecondary.properties(FILE.toString())); assertEquals(props, igfs.info(FILE).properties()); }