/** {@inheritDoc} */ @Override public IgfsOutputStream create(IgfsPath path, boolean overwrite) { return igfs.create(path, overwrite); }
/** {@inheritDoc} */ @Override public IgfsOutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication, long blockSize, @Nullable Map<String, String> props) { return igfs.create(path, bufSize, overwrite, replication, blockSize, props); }
/** {@inheritDoc} */ @Override public IgfsOutputStream create(IgfsPath path, int bufSize, boolean overwrite, @Nullable IgniteUuid affKey, int replication, long blockSize, @Nullable Map<String, String> props) { return igfs.create(path, bufSize, overwrite, affKey, replication, blockSize, props); }
/** * Create the given directories and files in the given IGFS. * * @param igfs IGFS. * @param dirs Directories. * @param files Files. * @throws Exception If failed. */ @SuppressWarnings("EmptyTryBlock") public static void create(IgfsImpl igfs, @Nullable IgfsPath[] dirs, @Nullable IgfsPath[] files) throws Exception { if (dirs != null) { for (IgfsPath dir : dirs) igfs.mkdirs(dir); } if (files != null) { for (IgfsPath file : files) { try (OutputStream ignored = igfs.create(file, true)) { // No-op. } igfs.await(file); } } }
@Override public void run() { try { int lvl = rand.nextInt(lvlCnt) + 1; IgfsPath parentPath = dirPaths.get(lvl).get(rand.nextInt(dirPaths.get(lvl).size())); IgfsPath path = new IgfsPath(parentPath, "newFile-" + fileCtr.incrementAndGet()); U.awaitQuiet(barrier); IgfsOutputStream os = null; try { os = igfs.create(path, true); os.write(chunk); } finally { U.closeQuiet(os); } } catch (IOException | IgniteException ignore) { // No-op. } } };
/** * Create the file in the given IGFS and write provided data chunks to it. * * @param igfs IGFS. * @param file File. * @param overwrite Overwrite flag. * @param blockSize Block size. * @param chunks Data chunks. * @throws Exception If failed. */ protected static void createFile(IgfsImpl igfs, IgfsPath file, boolean overwrite, long blockSize, @Nullable byte[]... chunks) throws Exception { IgfsOutputStream os = null; try { os = igfs.create(file, 256, overwrite, null, 0, blockSize, null); writeFileChunks(os, chunks); } finally { U.closeQuiet(os); awaitFileClose(igfs, file); } }
os = igfs.create(FILE, true);
@Override public void run() { int idx = ctr.incrementAndGet(); final IgfsPath path = new IgfsPath("/file" + idx); try { for (int i = 0; i < REPEAT_CNT; i++) { IgfsOutputStream os = igfs.create(path, 128, true/*overwrite*/, null, 0, 256, null); os.write(chunk); os.close(); assert igfs.exists(path); } awaitFileClose(igfs, path); checkFileContent(igfs, path, chunk); } catch (IOException | IgniteCheckedException e) { err.compareAndSet(null, e); // Log the very first error. } } }, threadCnt);
/** * Test rename on the file when it was opened for write(create) and is not closed yet. * * @throws Exception If failed. */ @Test public void testCreateRenameNoClose() throws Exception { if (dual) return; create(igfs, paths(DIR, SUBDIR), null); IgfsOutputStream os = null; try { os = igfs.create(FILE, true); igfs.rename(FILE, FILE2); os.close(); } finally { U.closeQuiet(os); } }
/** * Test rename on the file parent when it was opened for write(create) and is not closed yet. * * @throws Exception If failed. */ @Test public void testCreateRenameParentNoClose() throws Exception { if (dual) return; create(igfs, paths(DIR, SUBDIR), null); IgfsOutputStream os = null; try { os = igfs.create(FILE, true); igfs.rename(SUBDIR, SUBDIR2); os.close(); } finally { U.closeQuiet(os); } }
/** * Ensure that IGFS is able to stop in case not closed output stream exist. * * @throws Exception If failed. */ @Test public void testStop() throws Exception { create(igfs, paths(DIR, SUBDIR), null); IgfsOutputStream os = igfs.create(FILE, true); os.write(chunk); igfs.stop(true); // Reset test state. afterTestsStopped(); beforeTestsStarted(); }
/** * Checks simple write. * * @throws Exception On error. */ @Test public void testSimpleWrite() throws Exception { IgfsPath path = new IgfsPath("/file1"); IgfsOutputStream os = igfs.create(path, 128, true/*overwrite*/, null, 0, 256, null); os.write(chunk); os.close(); assert igfs.exists(path); checkFileContent(igfs, path, chunk); os = igfs.create(path, 128, true/*overwrite*/, null, 0, 256, null); assert igfs.exists(path); os.write(chunk); assert igfs.exists(path); os.write(chunk); assert igfs.exists(path); os.close(); assert igfs.exists(path); checkFileContent(igfs, path, chunk, chunk); }
/** * Ensure that a DUAL mode file is not propagated to eviction policy * * @throws Exception If failed. */ @Test public void testFileDualExclusion() throws Exception { start(); evictPlc.setExcludePaths(Collections.singleton(FILE_RMT.toString())); // Create file in primary mode. It must not be propagated to eviction policy. igfsPrimary.create(FILE_RMT, true).close(); checkEvictionPolicy(0, 0); int blockSize = igfsPrimary.info(FILE_RMT).blockSize(); append(FILE_RMT, blockSize); checkEvictionPolicy(0, 0); read(FILE_RMT, 0, blockSize); checkEvictionPolicy(0, 0); }
/** * Test update on the file when it was opened for write(create) and is not closed yet. * * @throws Exception If failed. */ @Test public void testCreateUpdateNoClose() throws Exception { if (dual) return; if(!propertiesSupported()) return; Map<String, String> props = properties("owner", "group", "0555"); create(igfs, paths(DIR, SUBDIR), null); IgfsOutputStream os = null; try { os = igfs.create(FILE, true); igfs.update(FILE, props); os.close(); } finally { U.closeQuiet(os); } }
/** * @throws Exception If failed. */ @Test public void testDeleteFragmentizing() throws Exception { IgfsImpl igfs = (IgfsImpl)grid(0).fileSystem("igfs"); for (int i = 0; i < 30; i++) { IgfsPath path = new IgfsPath("/someFile" + i); try (IgfsOutputStream out = igfs.create(path, true)) { for (int j = 0; j < 5 * IGFS_GROUP_SIZE; j++) out.write(new byte[IGFS_BLOCK_SIZE]); } U.sleep(200); } igfs.clear(); GridTestUtils.retryAssert(log, 50, 100, new CA() { @Override public void apply() { for (int i = 0; i < NODE_CNT; i++) { IgniteEx g = grid(i); GridCacheAdapter<Object, Object> cache = ((IgniteKernal)g).internalCache( g.igfsx("igfs").configuration().getDataCacheConfiguration().getName()); assertTrue("Data cache is not empty [keys=" + cache.keySet() + ", node=" + g.localNode().id() + ']', cache.isEmpty()); } } }); }
/** * Test how evictions are handled for a file working in PRIMARY mode. * * @throws Exception If failed. */ @Test public void testFilePrimary() throws Exception { start(); // Create file in primary mode. It must not be propagated to eviction policy. igfsPrimary.create(FILE, true).close(); checkEvictionPolicy(0, 0); int blockSize = igfsPrimary.info(FILE).blockSize(); append(FILE, blockSize); checkEvictionPolicy(0, 0); read(FILE, 0, blockSize); checkEvictionPolicy(0, 0); }
/** * @throws Exception If failed. */ @Test public void testAffinity() throws Exception { long fileSize = 32L * 1024 * 1024; IgfsPath filePath = new IgfsPath("/file"); try (OutputStream os = igfs.create(filePath, true)) { for(int i = 0; i < fileSize / chunk.length; ++i) os.write(chunk); } long len = igfs.info(filePath).length(); int start = 0; // Check default maxLen (maxLen = 0) for (int i = 0; i < igfs.context().data().groupBlockSize() / 1024; i++) { Collection<IgfsBlockLocation> blocks = igfs.affinity(filePath, start, len); assertEquals(F.first(blocks).start(), start); assertEquals(start + len, F.last(blocks).start() + F.last(blocks).length()); len -= 1024 * 2; start += 1024; } }
/** * Test how evictions are handled for a file working in PRIMARY mode. * * @throws Exception If failed. */ @Test public void testFileDual() throws Exception { start(); igfsPrimary.create(FILE_RMT, true).close(); checkEvictionPolicy(0, 0); int blockSize = igfsPrimary.info(FILE_RMT).blockSize(); // File write. append(FILE_RMT, blockSize); checkEvictionPolicy(1, blockSize); // One more write. append(FILE_RMT, blockSize); checkEvictionPolicy(2, blockSize * 2); // Read. read(FILE_RMT, 0, blockSize); checkEvictionPolicy(2, blockSize * 2); }
/** * Test eviction caused by too much blocks. * * @throws Exception If failed. */ @Test public void testBlockCountEviction() throws Exception { start(); int blockCnt = 3; evictPlc.setMaxBlocks(blockCnt); igfsPrimary.create(FILE_RMT, true).close(); checkEvictionPolicy(0, 0); int blockSize = igfsPrimary.info(FILE_RMT).blockSize(); // Write blocks up to the limit. append(FILE_RMT, blockSize * blockCnt); checkEvictionPolicy(blockCnt, blockCnt * blockSize); // Write one more block what should cause eviction. append(FILE_RMT, blockSize); checkEvictionPolicy(blockCnt, blockCnt * blockSize); // Read the first block. read(FILE_RMT, 0, blockSize); checkEvictionPolicy(blockCnt, blockCnt * blockSize); checkMetrics(1, 1); }