/** * Await acknowledgments. * * @throws IOException If failed. */ private void awaitAcks() throws IOException { try { igfsCtx.data().awaitAllAcksReceived(fileInfo.id()); } catch (IgniteCheckedException e) { throw new IOException("Failed to wait for flush acknowledge: " + fileInfo.id, e); } }
/** * Flush remainder. * * @throws IOException If failed. */ private void flushRemainder() throws IOException { try { if (remainder != null) { remainder = igfsCtx.data().storeDataBlocks(fileInfo, length() + space, null, 0, ByteBuffer.wrap(remainder, 0, remainderDataLen), true, streamRange, batch); remainder = null; remainderDataLen = 0; } } catch (IgniteCheckedException e) { throw new IOException("Failed to flush data (remainder) [path=" + path + ", space=" + space + ']', e); } }
@Override public void run() { try { fut.onDone(igfsCtx.data().secondaryDataBlock(path, blockIdx, secReader, blockSize)); } catch (Throwable e) { fut.onDone(null, e); } } });
remainder = igfsCtx.data().storeDataBlocks(fileInfo, length() + space, remainder, remainderDataLen, (ByteBuffer)data, false, streamRange, batch); remainder = igfsCtx.data().storeDataBlocks(fileInfo, length() + space, remainder, remainderDataLen, (DataInput)data, writeLen, false, streamRange, batch);
/** * Get data block for specified block index. * * @param blockIdx Block index. * @return Requested data block or {@code null} if nothing found. * @throws IgniteCheckedException If failed. */ @Nullable protected IgniteInternalFuture<byte[]> dataBlock(final long blockIdx) throws IgniteCheckedException { if (proxy) { assert secReader != null; final GridFutureAdapter<byte[]> fut = new GridFutureAdapter<>(); igfsCtx.runInIgfsThreadPool(new Runnable() { @Override public void run() { try { fut.onDone(igfsCtx.data().secondaryDataBlock(path, blockIdx, secReader, blockSize)); } catch (Throwable e) { fut.onDone(null, e); } } }); return fut; } else { assert fileInfo != null; return igfsCtx.data().dataBlock(fileInfo, path, blockIdx, secReader); } }
/** * Constructs file output stream. * * @param igfsCtx IGFS context. * @param path Path to stored file. * @param fileInfo File info to write binary data to. * @param bufSize The size of the buffer to be used. * @param mode Grid IGFS mode. * @param batch Optional secondary file system batch. */ IgfsOutputStreamImpl(IgfsContext igfsCtx, IgfsPath path, IgfsEntryInfo fileInfo, int bufSize, IgfsMode mode, @Nullable IgfsFileWorkerBatch batch) { super(igfsCtx, path, bufSize, batch); assert fileInfo != null && fileInfo.isFile() : "Unexpected file info: " + fileInfo; assert mode != null && mode != PROXY && (mode == PRIMARY && batch == null || batch != null); // File hasn't been locked. if (fileInfo.lockId() == null) throw new IgfsException("Failed to acquire file lock (concurrently modified?): " + path); synchronized (mux) { this.fileInfo = fileInfo; this.mode = mode; streamRange = initialStreamRange(fileInfo); writeFut = igfsCtx.data().writeStart(fileInfo.id()); } }
IgfsDataManager data = igfs.context().data();
/** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { IgfsEx igfs = (IgfsEx)grid(0).fileSystem("igfs"); mgr = igfs.context().data(); }
/** * Constructor. * * @param igfsCtx IGFS context. */ IgfsDeleteWorker(IgfsContext igfsCtx) { super("igfs-delete-worker%" + igfsCtx.igfs().name() + "%" + igfsCtx.kernalContext().localNodeId() + "%"); this.igfsCtx = igfsCtx; meta = igfsCtx.meta(); data = igfsCtx.data(); assert meta != null; assert data != null; log = igfsCtx.kernalContext().log(IgfsDeleteWorker.class); }
/** * Gets initial affinity range. This range will have 0 length and will start from first * non-occupied file block. * * @param fileInfo File info to build initial range for. * @return Affinity range. */ private IgfsFileAffinityRange initialStreamRange(IgfsEntryInfo fileInfo) { if (!igfsCtx.configuration().isFragmentizerEnabled()) return null; if (!Boolean.parseBoolean(fileInfo.properties().get(IgfsUtils.PROP_PREFER_LOCAL_WRITES))) return null; int blockSize = fileInfo.blockSize(); // Find first non-occupied block offset. long off = ((fileInfo.length() + blockSize - 1) / blockSize) * blockSize; // Need to get last affinity key and reuse it if we are on the same node. long lastBlockOff = off - fileInfo.blockSize(); if (lastBlockOff < 0) lastBlockOff = 0; IgfsFileMap map = fileInfo.fileMap(); IgniteUuid prevAffKey = map == null ? null : map.affinityKey(lastBlockOff, false); IgniteUuid affKey = igfsCtx.data().nextAffinityKey(prevAffKey); return affKey == null ? null : new IgfsFileAffinityRange(off, off, affKey); }
evts = igfsCtx.kernalContext().event(); meta = igfsCtx.meta(); data = igfsCtx.data(); secondaryFs = cfg.getSecondaryFileSystem();
igfsCtx.data().writeClose(fileInfo.id());
igfsCtx.data().dataBlock(info, path, blockIdx, reader);
/** * Create data transfer object for given IGFS metrics. * * @param igfs Source IGFS. */ public VisorIgfsMetrics(IgniteFileSystem igfs) { assert igfs != null; IgfsMetrics m = igfs.metrics(); totalSpaceSz = ((IgfsEx)igfs).context().data().maxSpaceSize(); usedSpaceSz = m.localSpaceSize(); foldersCnt = m.directoriesCount(); filesCnt = m.filesCount(); filesOpenedForRd = m.filesOpenedForRead(); filesOpenedForWrt = m.filesOpenedForWrite(); blocksRd = m.blocksReadTotal(); blocksRdRmt = m.blocksReadRemote(); blocksWrt = m.blocksWrittenTotal(); blocksWrtRmt = m.blocksWrittenRemote(); bytesRd = m.bytesRead(); bytesRdTm = m.bytesReadTime(); bytesWrt = m.bytesWritten(); bytesWrtTm = m.bytesWriteTime(); }
/** * @throws Exception If failed. */ @Test public void testAffinity() throws Exception { long fileSize = 32L * 1024 * 1024; IgfsPath filePath = new IgfsPath("/file"); try (OutputStream os = igfs.create(filePath, true)) { for(int i = 0; i < fileSize / chunk.length; ++i) os.write(chunk); } long len = igfs.info(filePath).length(); int start = 0; // Check default maxLen (maxLen = 0) for (int i = 0; i < igfs.context().data().groupBlockSize() / 1024; i++) { Collection<IgfsBlockLocation> blocks = igfs.affinity(filePath, start, len); assertEquals(F.first(blocks).start(), start); assertEquals(start + len, F.last(blocks).start() + F.last(blocks).length()); len -= 1024 * 2; start += 1024; } }
for (int i = 0; i < igfs.context().data().groupBlockSize() / 1024; i++) { Collection<IgfsBlockLocation> blocks0 = igfs.affinity(filePath, start, len, 0); long maxLen = igfs.context().data().groupBlockSize() * 2; for (int i = 0; i < igfs.context().data().groupBlockSize() / 1024; i++) { blocks = igfs.affinity(filePath, start, len, maxLen); maxLen -= igfs.context().data().groupBlockSize() * 2 / 1024;
assertNotNull(metaCache.get(info.id())); IgfsDataManager dataMgr = ((IgfsEx)igfs).context().data();