@Override public Object call() throws Exception { igfs.create(path(path), overwrite); return false; } }, IgfsException.class, null);
/** * Generate file with random data and provided argument. * * @param wordCnt Word count. * @throws Exception If failed. */ private void generateFile(int wordCnt) throws Exception { Random rnd = new Random(System.currentTimeMillis()); try (OutputStreamWriter writer = new OutputStreamWriter(igfs.create(FILE, true))) { int cnt = 0; while (cnt < wordCnt) { String word = DICTIONARY[rnd.nextInt(DICTIONARY.length)]; writer.write(word + " "); cnt++; } } }
/** * @param filePath File path to prepare. * @throws Exception If failed. */ private void prepareTestFile(String filePath) throws Exception { IgniteFileSystem igfs = grid(0).fileSystem(igfsName); try (IgfsOutputStream out = igfs.create(new IgfsPath(filePath), true)) { PrintWriter wr = new PrintWriter(new OutputStreamWriter(out)); for (int i = 0; i < 1000; i++) wr.println("Hello, world: " + i); wr.flush(); } }
/** * @param fileName File name. * @param lineCnt Line count. * @throws Exception If failed. */ private void prepareFile(String fileName, int lineCnt) throws Exception { IgniteFileSystem igfs = grid(0).fileSystem(igfsName); try (OutputStream os = igfs.create(new IgfsPath(fileName), true)) { PrintWriter w = new PrintWriter(new OutputStreamWriter(os)); for (int i = 0; i < lineCnt; i++) w.print("Hello, Hadoop map-reduce!\n"); w.flush(); } }
@Override public Object call() throws Exception { int id = cnt.incrementAndGet(); IgfsPath f = new IgfsPath(path.parent(), "asdf" + (id > 1 ? "-" + id : "")); try (IgfsOutputStream out = fs.create(f, 0, true, null, 0, 1024, null)) { assertNotNull(out); cleanUp.add(f); // Add all created into cleanup list. U.copy(new IgfsTestInputStream(size, salt), out); } return null; } }, WRITING_THREADS_CNT, "perform-multi-thread-writing");
/** * Write data to the file. * * @param chunks Data chunks. * @throws Exception In case of exception. */ protected void write(byte[]... chunks) throws Exception { IgfsOutputStream os = igfs.create(FILE, true); if (chunks != null) { for (byte[] chunk : chunks) os.write(chunk); } os.close(); }
@Override public Void call() throws Exception { IgniteFileSystem igfs = attacker.fileSystem(IGFS_NAME); try (IgfsOutputStream out = igfs.create(path, true)) { writeStartLatch.await(); out.write(new byte[1024]); out.flush(); } return null; } });
/** * @throws Exception If failed. */ private void beforeJob() throws Exception { IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName); igfs.clear(); igfs.mkdirs(new IgfsPath(PATH_INPUT)); try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create( new IgfsPath(PATH_INPUT + "/test.file"), true)))) { bw.write("word"); } }
/** {@inheritDoc} */ @Override public void handleFile(String strPath) throws Exception { IgfsPath path = new IgfsPath(strPath); IgfsOutputStream out; try { out = fs.create(path, false); } catch (IgniteException ex) { System.out.println("create file " + path.toString() + " failed: " + ex); throw ex; } try { for (int i = 0; i < size / dataBufer.capacity(); i++) out.write(dataBufer.array()); } catch (IOException ex) { System.out.println("write file " + path.toString() + " failed: " + ex); throw ex; } finally { out.close(); } }
/** * Create file and write specified text to. * * @param path File path to create. * @param overwrite Overwrite file if it already exists. * @param text Text to write into file. * @return Content of this file. * @throws IgniteCheckedException In case of error. */ private String create(String path, boolean overwrite, String text) throws Exception { try (IgfsOutputStream out = igfs.create(path(path), overwrite)) { IOUtils.write(text, out, UTF_8); } assertNotNull(igfs.info(path(path))); return read(path); }
/** * @throws Exception If failed. */ @Test public void testReadFragmentizing() throws Exception { IgniteFileSystem igfs = grid(0).fileSystem("igfs"); IgfsPath path = new IgfsPath("/someFile"); try (IgfsOutputStream out = igfs.create(path, true)) { // Write 10 groups. for (int i = 0; i < 10 * IGFS_GROUP_SIZE; i++) { byte[] data = new byte[IGFS_BLOCK_SIZE]; Arrays.fill(data, (byte)i); out.write(data); } } long start = System.currentTimeMillis(); do { try (IgfsInputStream in = igfs.open(path)) { for (int i = 0; i < 10 * IGFS_GROUP_SIZE; i++) { for (int j = 0; j < IGFS_BLOCK_SIZE; j++) assertEquals(i & 0xFF, in.read()); } assertEquals(-1, in.read()); } } while (System.currentTimeMillis() - start < 7000); }
/** * Ensure correct size calculation. * * @throws Exception If failed. */ @Test public void testSize() throws Exception { IgfsPath dir1 = path("/dir1"); IgfsPath subDir1 = path("/dir1/subdir1"); IgfsPath dir2 = path("/dir2"); IgfsPath fileDir1 = path("/dir1/file"); IgfsPath fileSubdir1 = path("/dir1/subdir1/file"); IgfsPath fileDir2 = path("/dir2/file"); IgfsOutputStream os = igfs.create(fileDir1, false); os.write(new byte[1000]); os.close(); os = igfs.create(fileSubdir1, false); os.write(new byte[2000]); os.close(); os = igfs.create(fileDir2, false); os.write(new byte[4000]); os.close(); assert igfs.size(fileDir1) == 1000; assert igfs.size(fileSubdir1) == 2000; assert igfs.size(fileDir2) == 4000; assert igfs.size(dir1) == 3000; assert igfs.size(subDir1) == 2000; assert igfs.size(dir2) == 4000; }
/** * @throws Exception If failed. */ @Test public void testCacheStart() throws Exception { Ignite g0 = G.start(config(true, 0)); String dataCacheName = ((IgniteEx)g0).igfsx("igfs").configuration().getDataCacheConfiguration().getName(); String metaCacheName = ((IgniteEx)g0).igfsx("igfs").configuration().getMetaCacheConfiguration().getName(); checkIgfsCaches(g0, dataCacheName, metaCacheName); Ignite g1 = G.start(config(false, 1)); checkIgfsCaches(g1, dataCacheName, metaCacheName); IgniteFileSystem igfs = g0.fileSystem("igfs"); igfs.mkdirs(new IgfsPath("/test")); try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create( new IgfsPath("/test/test.file"), true)))) { for (int i = 0; i < 1000; i++) bw.write("test-" + i); } }
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create( new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
/** * @throws Exception If failed. */ @Test public void testCoordinatorLeave() throws Exception { stopGrid(0); // Now node 1 should be coordinator. try { IgfsPath path = new IgfsPath("/someFile"); IgniteFileSystem igfs = grid(1).fileSystem("igfs"); try (IgfsOutputStream out = igfs.create(path, true)) { for (int i = 0; i < 10 * IGFS_GROUP_SIZE; i++) out.write(new byte[IGFS_BLOCK_SIZE]); } awaitFileFragmenting(1, path); } finally { startGrid(0); } } }
/** @throws Exception If failed. */ @Test public void testCreateFileColocated() throws Exception { IgfsPath path = new IgfsPath("/colocated"); UUID uuid = UUID.randomUUID(); IgniteUuid affKey; long idx = 0; while (true) { affKey = new IgniteUuid(uuid, idx); if (grid(0).affinity(grid(0).igfsx("igfs").configuration().getDataCacheConfiguration() .getName()).mapKeyToNode(affKey).id().equals(grid(0).localNode().id())) break; idx++; } try (IgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) { // Write 5M, should be enough to test distribution. for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]); } IgfsFile info = fs.info(path); Collection<IgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length()); assertEquals(1, affNodes.size()); Collection<UUID> nodeIds = F.first(affNodes).nodeIds(); assertEquals(1, nodeIds.size()); assertEquals(grid(0).localNode().id(), F.first(nodeIds)); }
/** @throws Exception If failed. */ @Test public void testMultipleClose() throws Exception { IgniteFileSystem fs = igfsPrimary[0]; IgfsOutputStream out = fs.create(new IgfsPath("/primary/file"), false); out.close(); out.close(); IgfsInputStream in = fs.open(new IgfsPath("/primary/file")); in.close(); in.close(); IgfsMetrics m = fs.metrics(); assertEquals(0, m.filesOpenedForWrite()); assertEquals(0, m.filesOpenedForRead()); }
/** @throws Exception If failed. */ @Test public void testCreate() throws Exception { IgfsPath path = path("/file"); try (IgfsOutputStream os = igfs.create(path, false)) { assert os != null; IgfsFileImpl info = (IgfsFileImpl)igfs.info(path); for (int i = 0; i < nodesCount(); i++) { IgfsEntryInfo fileInfo = (IgfsEntryInfo)grid(i).cachex(metaCacheName).localPeek(info.fileId(), null); assertNotNull(fileInfo); assertNotNull(fileInfo.listing()); } } finally { igfs.delete(path("/"), true); } }
buf[i] = (byte)(i * i); IgfsOutputStream os = igfs.create(path, bufSize, true, null, 0, 1024, null);
String dataCacheName = grid(0).igfsx("igfs").configuration().getDataCacheConfiguration().getName(); try (IgfsOutputStream out = igfs.create(path, true)) { out.write(new byte[10 * 1024 * 1024]);