/** {@inheritDoc} */ @Override public void writeData(HadoopIgfsStreamDelegate delegate, byte[] data, int off, int len) throws IOException { try { IgfsOutputStream stream = delegate.target(); stream.write(data, off, len); } catch (IllegalStateException | IOException e) { HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate); if (lsnr != null) lsnr.onError(e.getMessage()); if (e instanceof IllegalStateException) throw new IOException("Failed to write data to IGFS stream because Grid is stopping.", e); else throw e; } }
/** * Append some data to the given file. * * @param path File path. * @param len Data length. * @throws Exception If failed. */ private void append(IgfsPath path, int len) throws Exception { IgfsOutputStream os = igfsPrimary.append(path, false); os.write(new byte[len]); os.close(); }
/** * Write data to the file. * * @param chunks Data chunks. * @throws Exception In case of exception. */ protected void write(byte[]... chunks) throws Exception { IgfsOutputStream os = igfs.create(FILE, true); if (chunks != null) { for (byte[] chunk : chunks) os.write(chunk); } os.close(); }
@Override public void run() { try { int lvl = rand.nextInt(lvlCnt) + 1; IgfsPath parentPath = dirPaths.get(lvl).get(rand.nextInt(dirPaths.get(lvl).size())); IgfsPath path = new IgfsPath(parentPath, "newFile-" + fileCtr.incrementAndGet()); U.awaitQuiet(barrier); IgfsOutputStream os = null; try { os = igfs.create(path, true); os.write(chunk); } finally { U.closeQuiet(os); } } catch (IOException | IgniteException ignore) { // No-op. } } };
@Override public Void call() throws Exception { IgniteFileSystem igfs = attacker.fileSystem(IGFS_NAME); try (IgfsOutputStream out = igfs.create(path, true)) { writeStartLatch.await(); out.write(new byte[1024]); out.flush(); } return null; } });
@Override public Object call() throws Exception { IgfsOutputStream osErr = igfs(0).append(path, false); try { for (int i = 0; i < maxSize / writeChunkSize * GRID_CNT; ++i) osErr.write(chunk(writeChunkSize)); osErr.close(); return null; } catch (IOException e) { Throwable e0 = e; while (e0.getCause() != null) e0 = e0.getCause(); throw (Exception)e0; } finally { U.closeQuiet(osErr); } } }, IgniteOutOfMemoryException.class, "Not enough memory allocated");
@SuppressWarnings("ThrowFromFinallyBlock") @Override public void run() { while (!stop.get() && err.get() == null) { IgfsOutputStream os = null; try { os = igfs.append(FILE, false); os.write(chunk); os.close(); chunksCtr.incrementAndGet(); } catch (IgniteException ignore) { // No-op. } catch (IOException e) { err.compareAndSet(null, e); } finally { if (os != null) try { os.close(); } catch (IOException ioe) { throw new IgniteException(ioe); } } } } }, threadCnt);
os = igfs.create(FILE, true); os.write(chunk);
/** {@inheritDoc} */ @Override public void handleFile(String strPath) throws Exception { IgfsPath path = new IgfsPath(strPath); IgfsOutputStream out; try { out = fs.create(path, false); } catch (IgniteException ex) { System.out.println("create file " + path.toString() + " failed: " + ex); throw ex; } try { for (int i = 0; i < size / dataBufer.capacity(); i++) out.write(dataBufer.array()); } catch (IOException ex) { System.out.println("write file " + path.toString() + " failed: " + ex); throw ex; } finally { out.close(); } }
@Override public void run() { int idx = ctr.getAndIncrement(); IgfsPath path = new IgfsPath("/file" + idx); try { byte[][] chunks = new byte[REPEAT_CNT][]; for (int i = 0; i < REPEAT_CNT; i++) { chunks[i] = chunk; IgfsOutputStream os = igfs.append(path, false); os.write(chunk); os.close(); assert igfs.exists(path); } awaitFileClose(igfs, path); checkFileContent(igfs, path, chunks); } catch (IOException | IgniteCheckedException e) { err.compareAndSet(null, e); // Log the very first error. } } }, threadCnt);
@Override public void run() { int idx = ctr.incrementAndGet(); final IgfsPath path = new IgfsPath("/file" + idx); try { for (int i = 0; i < REPEAT_CNT; i++) { IgfsOutputStream os = igfs.create(path, 128, true/*overwrite*/, null, 0, 256, null); os.write(chunk); os.close(); assert igfs.exists(path); } awaitFileClose(igfs, path); checkFileContent(igfs, path, chunk); } catch (IOException | IgniteCheckedException e) { err.compareAndSet(null, e); // Log the very first error. } } }, threadCnt);
/** * Checks simple write. * * @throws Exception On error. */ @Test public void testSimpleWrite() throws Exception { IgfsPath path = new IgfsPath("/file1"); IgfsOutputStream os = igfs.create(path, 128, true/*overwrite*/, null, 0, 256, null); os.write(chunk); os.close(); assert igfs.exists(path); checkFileContent(igfs, path, chunk); os = igfs.create(path, 128, true/*overwrite*/, null, 0, 256, null); assert igfs.exists(path); os.write(chunk); assert igfs.exists(path); os.write(chunk); assert igfs.exists(path); os.close(); assert igfs.exists(path); checkFileContent(igfs, path, chunk, chunk); }
/** * @throws Exception If failed. */ @Test public void testReadFragmentizing() throws Exception { IgniteFileSystem igfs = grid(0).fileSystem("igfs"); IgfsPath path = new IgfsPath("/someFile"); try (IgfsOutputStream out = igfs.create(path, true)) { // Write 10 groups. for (int i = 0; i < 10 * IGFS_GROUP_SIZE; i++) { byte[] data = new byte[IGFS_BLOCK_SIZE]; Arrays.fill(data, (byte)i); out.write(data); } } long start = System.currentTimeMillis(); do { try (IgfsInputStream in = igfs.open(path)) { for (int i = 0; i < 10 * IGFS_GROUP_SIZE; i++) { for (int j = 0; j < IGFS_BLOCK_SIZE; j++) assertEquals(i & 0xFF, in.read()); } assertEquals(-1, in.read()); } } while (System.currentTimeMillis() - start < 7000); }
/** * Ensure that IGFS is able to stop in case not closed output stream exist. * * @throws Exception If failed. */ @Test public void testStop() throws Exception { create(igfs, paths(DIR, SUBDIR), null); IgfsOutputStream os = igfs.create(FILE, true); os.write(chunk); igfs.stop(true); // Reset test state. afterTestsStopped(); beforeTestsStarted(); }
IgfsOutputStream os = igfs(0).append(path, false); os.write(chunk(block.length()));
/** * Ensure correct size calculation. * * @throws Exception If failed. */ @Test public void testSize() throws Exception { IgfsPath dir1 = path("/dir1"); IgfsPath subDir1 = path("/dir1/subdir1"); IgfsPath dir2 = path("/dir2"); IgfsPath fileDir1 = path("/dir1/file"); IgfsPath fileSubdir1 = path("/dir1/subdir1/file"); IgfsPath fileDir2 = path("/dir2/file"); IgfsOutputStream os = igfs.create(fileDir1, false); os.write(new byte[1000]); os.close(); os = igfs.create(fileSubdir1, false); os.write(new byte[2000]); os.close(); os = igfs.create(fileDir2, false); os.write(new byte[4000]); os.close(); assert igfs.size(fileDir1) == 1000; assert igfs.size(fileSubdir1) == 2000; assert igfs.size(fileDir2) == 4000; assert igfs.size(dir1) == 3000; assert igfs.size(subDir1) == 2000; assert igfs.size(dir2) == 4000; }
/** * @throws Exception If failed. */ @Test public void testCoordinatorLeave() throws Exception { stopGrid(0); // Now node 1 should be coordinator. try { IgfsPath path = new IgfsPath("/someFile"); IgniteFileSystem igfs = grid(1).fileSystem("igfs"); try (IgfsOutputStream out = igfs.create(path, true)) { for (int i = 0; i < 10 * IGFS_GROUP_SIZE; i++) out.write(new byte[IGFS_BLOCK_SIZE]); } awaitFileFragmenting(1, path); } finally { startGrid(0); } } }
/** * @throws Exception If failed. */ @Test public void testDeleteFragmentizing() throws Exception { IgfsImpl igfs = (IgfsImpl)grid(0).fileSystem("igfs"); for (int i = 0; i < 30; i++) { IgfsPath path = new IgfsPath("/someFile" + i); try (IgfsOutputStream out = igfs.create(path, true)) { for (int j = 0; j < 5 * IGFS_GROUP_SIZE; j++) out.write(new byte[IGFS_BLOCK_SIZE]); } U.sleep(200); } igfs.clear(); GridTestUtils.retryAssert(log, 50, 100, new CA() { @Override public void apply() { for (int i = 0; i < NODE_CNT; i++) { IgniteEx g = grid(i); GridCacheAdapter<Object, Object> cache = ((IgniteKernal)g).internalCache( g.igfsx("igfs").configuration().getDataCacheConfiguration().getName()); assertTrue("Data cache is not empty [keys=" + cache.keySet() + ", node=" + g.localNode().id() + ']', cache.isEmpty()); } } }); }
/** @throws Exception If failed. */ @Test public void testCreateFileColocated() throws Exception { IgfsPath path = new IgfsPath("/colocated"); UUID uuid = UUID.randomUUID(); IgniteUuid affKey; long idx = 0; while (true) { affKey = new IgniteUuid(uuid, idx); if (grid(0).affinity(grid(0).igfsx("igfs").configuration().getDataCacheConfiguration() .getName()).mapKeyToNode(affKey).id().equals(grid(0).localNode().id())) break; idx++; } try (IgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) { // Write 5M, should be enough to test distribution. for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]); } IgfsFile info = fs.info(path); Collection<IgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length()); assertEquals(1, affNodes.size()); Collection<UUID> nodeIds = F.first(affNodes).nodeIds(); assertEquals(1, nodeIds.size()); assertEquals(grid(0).localNode().id(), F.first(nodeIds)); }