/** * @param blockIdx Block index. * @param fileInfo File info. * @return Block key. */ public IgfsBlockKey blockKey(long blockIdx, IgfsEntryInfo fileInfo) { if (fileInfo.affinityKey() != null) return new IgfsBlockKey(fileInfo.id(), fileInfo.affinityKey(), fileInfo.evictExclude(), blockIdx); if (fileInfo.fileMap() != null) { IgniteUuid affKey = fileInfo.fileMap().affinityKey(blockIdx * fileInfo.blockSize(), false); return new IgfsBlockKey(fileInfo.id(), affKey, fileInfo.evictExclude(), blockIdx); } return new IgfsBlockKey(fileInfo.id(), null, fileInfo.evictExclude(), blockIdx); }
/** * Constructs directory info. * * @param path Path. * @param info Entry info. * @param globalGrpBlockSize Global group block size. */ public IgfsFileImpl(IgfsPath path, IgfsEntryInfo info, long globalGrpBlockSize) { A.notNull(path, "path"); A.notNull(info, "info"); this.path = path; fileId = info.id(); flags = IgfsUtils.flags(info.isDirectory(), info.isFile()); if (info.isFile()) { blockSize = info.blockSize(); len = info.length(); grpBlockSize = info.affinityKey() == null ? globalGrpBlockSize : info.length() == 0 ? globalGrpBlockSize : info.length(); } props = info.properties(); if (props == null) props = Collections.emptyMap(); accessTime = info.accessTime(); modificationTime = info.modificationTime(); }
assert info.isDirectory(); final Map<String, IgfsListingEntry> listing = info.listing(); assert fileInfo.isFile(); IgfsEntryInfo lockedInfo = meta.lock(fileInfo.id(), true); assert IgfsUtils.DELETE_LOCK_ID.equals(lockedInfo.lockId());
/** * Constructor. * * @param fileInfo File info to construct listing entry from. */ public IgfsListingEntry(IgfsEntryInfo fileInfo) { id = fileInfo.id(); dir = fileInfo.isDirectory(); }
/** {@inheritDoc} */ @Override protected int optimizeBufferSize(int bufSize) { assert bufSize > 0; if (fileInfo == null) return bufSize; int blockSize = fileInfo.blockSize(); if (blockSize <= 0) return bufSize; if (bufSize <= blockSize) // Optimize minimum buffer size to be equal file's block size. return blockSize; int maxBufSize = blockSize * MAX_BLOCKS_CNT; if (bufSize > maxBufSize) // There is no profit or optimization from larger buffers. return maxBufSize; if (fileInfo.length() == 0) // Make buffer size multiple of block size (optimized for new files). return bufSize / blockSize * blockSize; return bufSize; }
false/*overwrite*/, bufSize, (short)0, 0, null, infos, pendingEvts, outT1); else { if (info.isDirectory()) throw fsException("Failed to open output stream to the file in the " + "secondary file system because the path points to a directory: " + path); long len = info.length(); int blockSize = info.blockSize(); if (info.lockId() != null) { throw fsException("Failed to open file (file is opened for writing) [path=" + path + ", fileId=" + info.id() + ", lockId=" + info.lockId() + ']'); lockedInfo = invokeLock(info.id(), false);
return null; assert info.isDirectory(); Map<String, IgfsListingEntry> listing = info.listing(); if (!exclude.contains(fileInfo.id()) && fileInfo.fileMap() != null && !fileInfo.fileMap().ranges().isEmpty()) return fileInfo;
info = info.length(info.length() + data.length + remainder.length); IgniteInternalFuture<Boolean> fut = mgr.writeStart(info.id()); byte[] left = mgr.storeDataBlocks(info, info.length(), remainder, remainder.length, ByteBuffer.wrap(data), false, range, null); info = info.length(info.length() + remainder2.length); byte[] left2 = mgr.storeDataBlocks(info, info.length(), left, left.length, ByteBuffer.wrap(remainder2), false, range, null); mgr.writeClose(info.id()); for (int pos = 0, block = 0; pos < info.length(); block++) { byte[] stored = mgr.dataBlock(info, path, block, null).get(); boolean b = true; for (long block = 0; block < info.blocksCount(); block++) b &= mgr.dataBlock(info, path, block, null).get() == null;
IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), grpIdx * grpSize);
/** * Tries to remove blocks affected by fragmentizer. If {@code cleanNonColocated} is {@code true}, will remove * non-colocated blocks as well. * * @param fileInfo File info to clean up. * @param range Range to clean up. * @param cleanNonColocated {@code True} if all blocks should be cleaned. */ public void cleanBlocks(IgfsEntryInfo fileInfo, IgfsFileAffinityRange range, boolean cleanNonColocated) { long startIdx = range.startOffset() / fileInfo.blockSize(); long endIdx = range.endOffset() / fileInfo.blockSize(); if (log.isDebugEnabled()) log.debug("Cleaning blocks [fileInfo=" + fileInfo + ", range=" + range + ", cleanNonColocated=" + cleanNonColocated + ", startIdx=" + startIdx + ", endIdx=" + endIdx + ']'); try { try (IgniteDataStreamer<IgfsBlockKey, byte[]> ldr = dataStreamer()) { for (long idx = startIdx; idx <= endIdx; idx++) { ldr.removeData(new IgfsBlockKey(fileInfo.id(), range.affinityKey(), fileInfo.evictExclude(), idx)); if (cleanNonColocated) ldr.removeData(new IgfsBlockKey(fileInfo.id(), null, fileInfo.evictExclude(), idx)); } } } catch (IgniteException e) { log.error("Failed to clean up file range [fileInfo=" + fileInfo + ", range=" + range + ']', e); } }
/** {@inheritDoc} */ @Override public Void process(MutableEntry<IgniteUuid, IgfsEntryInfo> e, Object... args) throws EntryProcessorException { IgfsEntryInfo fileInfo = e.getValue(); assert fileInfo.isDirectory(); Map<String, IgfsListingEntry> listing = new HashMap<>(fileInfo.listing()); // Modify listing in-place. IgfsListingEntry oldEntry = listing.get(name); if (oldEntry == null) throw new IgniteException("Directory listing doesn't contain expected entry: " + name); listing.put(name, new IgfsListingEntry(id, oldEntry.isDirectory())); e.setValue(fileInfo.listing(listing)); return null; }
/** * Gets initial affinity range. This range will have 0 length and will start from first * non-occupied file block. * * @param fileInfo File info to build initial range for. * @return Affinity range. */ private IgfsFileAffinityRange initialStreamRange(IgfsEntryInfo fileInfo) { if (!igfsCtx.configuration().isFragmentizerEnabled()) return null; if (!Boolean.parseBoolean(fileInfo.properties().get(IgfsUtils.PROP_PREFER_LOCAL_WRITES))) return null; int blockSize = fileInfo.blockSize(); // Find first non-occupied block offset. long off = ((fileInfo.length() + blockSize - 1) / blockSize) * blockSize; // Need to get last affinity key and reuse it if we are on the same node. long lastBlockOff = off - fileInfo.blockSize(); if (lastBlockOff < 0) lastBlockOff = 0; IgfsFileMap map = fileInfo.fileMap(); IgniteUuid prevAffKey = map == null ? null : map.affinityKey(lastBlockOff, false); IgniteUuid affKey = igfsCtx.data().nextAffinityKey(prevAffKey); return affKey == null ? null : new IgfsFileAffinityRange(off, off, affKey); }
throw new IgfsPathNotFoundException("File not found: " + path); if (!info.isFile()) throw new IgfsPathIsDirectoryException("Failed to open file (not a file): " + path); info.length(), info.blockSize(), info.blocksCount(), false); info.length(), info.blockSize(), info.blocksCount(), false);
if (dstParentInfo.isFile()) throw new IgfsPathAlreadyExistsException("Failed to perform move because destination points " + "to existing file [src=" + srcPath + ", dst=" + dstPath + ']'); if (dstParentInfo.hasChild(dstName)) throw new IgfsPathAlreadyExistsException("Failed to perform move because destination already " + "contains entry with the same name existing file [src=" + srcPath + IgfsListingEntry srcEntry = srcParentInfo.listing().get(srcName); transferEntry(srcEntry, srcParentInfo.id(), srcName, dstParentInfo.id(), dstName); srcInfo.isFile() ? EVT_IGFS_FILE_RENAMED : EVT_IGFS_DIR_RENAMED);
IgfsFileMap map = fileInfo.fileMap(); Object old = fragmentingFiles.putIfAbsent(fileInfo.id(), nodeIds); UUID nodeId = entry.getKey(); IgfsFragmentizerRequest msg = new IgfsFragmentizerRequest(fileInfo.id(), entry.getValue()); ", fileId=" + fileInfo.id() + ", msg=" + msg + ']'); log.debug("Got empty wait set for fragmentized file: " + fileInfo); fragmentingFiles.remove(fileInfo.id(), nodeIds);
/** * Constructs file output stream. * * @param igfsCtx IGFS context. * @param path Path to stored file. * @param fileInfo File info to write binary data to. * @param bufSize The size of the buffer to be used. * @param mode Grid IGFS mode. * @param batch Optional secondary file system batch. */ IgfsOutputStreamImpl(IgfsContext igfsCtx, IgfsPath path, IgfsEntryInfo fileInfo, int bufSize, IgfsMode mode, @Nullable IgfsFileWorkerBatch batch) { super(igfsCtx, path, bufSize, batch); assert fileInfo != null && fileInfo.isFile() : "Unexpected file info: " + fileInfo; assert mode != null && mode != PROXY && (mode == PRIMARY && batch == null || batch != null); // File hasn't been locked. if (fileInfo.lockId() == null) throw new IgfsException("Failed to acquire file lock (concurrently modified?): " + path); synchronized (mux) { this.fileInfo = fileInfo; this.mode = mode; streamRange = initialStreamRange(fileInfo); writeFut = igfsCtx.data().writeStart(fileInfo.id()); } }
if (info.isDirectory()) { if (!deleteDirectoryContents(trashId, id)) return false; assert info.isFile(); return false; // File is locked, we cannot delete it. assert id.equals(lockedInfo.id());
Map<String, IgfsListingEntry> parentListing = parentInfo.listing(); assert entryInfo.isDirectory() || IgfsUtils.DELETE_LOCK_ID.equals(entryInfo.lockId()); if (!entryInfo.hasChildren()) { id2InfoPrj.remove(childId); id2InfoPrj.put(parentId, parentInfo.listing(newListing));
assertTrue(info.isFile()); assertNotNull(metaCache.get(info.id())); for (int i = 0; i < info.blocksCount(); i++) assertNotNull(dataCache.get(dataMgr.blockKey(i, info))); if (metaCache.get(info.id()) == null) break; assertNull(metaCache.get(info.id())); for (int j = 0; j < info.blocksCount(); j++) { if (dataCache.get(dataMgr.blockKey(i, info)) != null) { doBreak = false; for (int i = 0; i < info.blocksCount(); i++) assertNull(dataCache.get(new IgfsBlockKey(info.id(), null, false, i)));
/** {@inheritDoc} */ @Override public Void process(MutableEntry<IgniteUuid, IgfsEntryInfo> entry, Object... args) throws EntryProcessorException { IgfsEntryInfo oldInfo = entry.getValue(); assert oldInfo != null; IgfsEntryInfo newInfo = oldInfo.unlock(modificationTime); if (updateSpace) { IgfsFileMap newMap = new IgfsFileMap(newInfo.fileMap()); newMap.addRange(affRange); newInfo = newInfo.length(newInfo.length() + space).fileMap(newMap); } entry.setValue(newInfo); return null; }