/** {@inheritDoc} */ @Override protected void start0() throws IgniteCheckedException { metaCacheStartLatch = new CountDownLatch(1); cfg = igfsCtx.configuration(); evts = igfsCtx.kernalContext().event(); sampling = new IgfsSamplingKey(cfg.getName()); log = igfsCtx.kernalContext().log(IgfsMetaManager.class); metaCacheName = cfg.getMetaCacheConfiguration().getName(); }
/** * Constructor. * * @param igfsCtx IGFS context. */ IgfsDeleteWorker(IgfsContext igfsCtx) { super("igfs-delete-worker%" + igfsCtx.igfs().name() + "%" + igfsCtx.kernalContext().localNodeId() + "%"); this.igfsCtx = igfsCtx; meta = igfsCtx.meta(); data = igfsCtx.data(); assert meta != null; assert data != null; log = igfsCtx.kernalContext().log(IgfsDeleteWorker.class); }
/** {@inheritDoc} */ @Override @Nullable public Collection<IpcServerEndpoint> endpoints(String name) { if (name == null) throw new IllegalArgumentException("IGFS name cannot be null"); IgfsContext igfsCtx = igfsCache.get(name); return igfsCtx == null ? Collections.<IpcServerEndpoint>emptyList() : igfsCtx.server().endpoints(); }
/** * Constructs IGFS IPC handler. * * @param igfsCtx Context. * @param endpointCfg Endpoint configuration. * @param mgmt Management flag. */ IgfsIpcHandler(IgfsContext igfsCtx, IgfsIpcEndpointConfiguration endpointCfg, boolean mgmt) { assert igfsCtx != null; ctx = igfsCtx.kernalContext(); igfs = igfsCtx.igfs(); // Keep buffer size multiple of block size so no extra byte array copies is performed. bufSize = igfsCtx.configuration().getBlockSize() * 2; // Create thread pool for request handling. int threadCnt = endpointCfg.getThreadCount(); String prefix = "igfs-" + igfsCtx.igfs().name() + (mgmt ? "mgmt-" : "") + "-ipc"; pool = new IgniteThreadPoolExecutor(prefix, igfsCtx.kernalContext().igniteInstanceName(), threadCnt, threadCnt, Long.MAX_VALUE, new LinkedBlockingQueue<Runnable>()); log = ctx.log(IgfsIpcHandler.class); }
/** * Get data block for specified block index. * * @param blockIdx Block index. * @return Requested data block or {@code null} if nothing found. * @throws IgniteCheckedException If failed. */ @Nullable protected IgniteInternalFuture<byte[]> dataBlock(final long blockIdx) throws IgniteCheckedException { if (proxy) { assert secReader != null; final GridFutureAdapter<byte[]> fut = new GridFutureAdapter<>(); igfsCtx.runInIgfsThreadPool(new Runnable() { @Override public void run() { try { fut.onDone(igfsCtx.data().secondaryDataBlock(path, blockIdx, secReader, blockSize)); } catch (Throwable e) { fut.onDone(null, e); } } }); return fut; } else { assert fileInfo != null; return igfsCtx.data().dataBlock(fileInfo, path, blockIdx, secReader); } }
/** * Constructor. */ protected FragmentizerWorker() { super(igfsCtx.kernalContext().igniteInstanceName(), "fragmentizer-worker", igfsCtx.kernalContext().log(IgfsFragmentizerManager.class)); }
cfg = igfsCtx.configuration(); log = igfsCtx.kernalContext().log(IgfsImpl.class); evts = igfsCtx.kernalContext().event(); meta = igfsCtx.meta(); data = igfsCtx.data(); secondaryFs = cfg.getSecondaryFileSystem(); igfsCtx.kernalContext().resource().injectGeneric(secondaryFs); igfsCtx.kernalContext().resource().injectFileSystem(secondaryFs, this); String dataCacheName = igfsCtx.configuration().getDataCacheConfiguration().getName(); for (CacheConfiguration cacheCfg : igfsCtx.kernalContext().config().getCacheConfiguration()) { if (F.eq(dataCacheName, cacheCfg.getName())) { EvictionPolicy evictPlc = cacheCfg.getEvictionPolicyFactory() != null ?
req.finishUnmarshal(igfsCtx.kernalContext().config().getMarshaller(), null); IgfsEntryInfo fileInfo = igfsCtx.meta().info(fileId); case RANGE_STATUS_INITIAL: { updated = igfsCtx.meta().updateInfo( fileId, new IgfsMetaFileRangeUpdateProcessor(range, RANGE_STATUS_MOVING)); igfsCtx.data().cleanBlocks(fileInfo, range, true); igfsCtx.data().spreadBlocks(fileInfo, range); updated = igfsCtx.meta().updateInfo( fileId, new IgfsMetaFileRangeUpdateProcessor(range, RANGE_STATUS_MOVED)); igfsCtx.data().cleanBlocks(fileInfo, range, true); igfsCtx.data().cleanBlocks(fileInfo, range, false); updated = igfsCtx.meta().updateInfo(fileId, new IgfsMetaFileRangeDeleteProcessor(range)); igfsCtx.data().cleanBlocks(fileInfo, range, true);
/** {@inheritDoc} */ @Override protected List<IgniteUuid> call0(IgfsContext ctx) throws Exception { IgfsMetaManager meta = ctx.meta(); return meta.idsForPath(path); }
/** * Await acknowledgments. * * @throws IOException If failed. */ private void awaitAcks() throws IOException { try { igfsCtx.data().awaitAllAcksReceived(fileInfo.id()); } catch (IgniteCheckedException e) { throw new IOException("Failed to wait for flush acknowledge: " + fileInfo.id, e); } }
@Override public IgniteFileSystem apply(IgfsContext igfsCtx) { return igfsCtx.igfs(); } };
/** * Updates IGFS metrics when the stream is closed. */ protected void updateMetricsOnClose() { IgfsLocalMetrics metrics = igfsCtx.metrics(); metrics.addWrittenBytesTime(bytes, time); metrics.decrementFilesOpenedForWrite(); GridEventStorageManager evts = igfsCtx.kernalContext().event(); if (evts.isRecordable(EVT_IGFS_FILE_CLOSED_WRITE)) evts.record(new IgfsEvent(path, igfsCtx.localNode(), EVT_IGFS_FILE_CLOSED_WRITE, bytes)); }
igfsCtx.configuration().getBlockSize(), status.length(), affKey, createFileLockId(false), igfsCtx.igfs().evictExclude(path, false), status.properties(), status.accessTime(), igfsCtx.data().delete(oldInfo);
IgfsMetaManager meta = igfs.context().meta(); IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache( igfs.configuration().getDataCacheConfiguration().getName());
/** {@inheritDoc} */ @Override public void stop(boolean cancel) { // Stop IGFS instances. for (IgfsContext igfsCtx : igfsCache.values()) { if (log.isDebugEnabled()) log.debug("Stopping igfs: " + igfsCtx.configuration().getName()); List<IgfsManager> mgrs = igfsCtx.managers(); for (ListIterator<IgfsManager> it = mgrs.listIterator(mgrs.size()); it.hasPrevious();) { IgfsManager mgr = it.previous(); mgr.stop(cancel); } igfsCtx.igfs().stop(cancel); } igfsCache.clear(); if (log.isDebugEnabled()) log.debug("IGFS processor stopped."); }
/** * Flushes this output stream and forces any buffered output bytes to be written out. * * @throws IOException if an I/O error occurs. */ @Override public void flush() throws IOException { synchronized (mux) { checkClosed(null, 0); sendBufferIfNotEmpty(); flushRemainder(); awaitAcks(); // Update file length if needed. if (igfsCtx.configuration().isUpdateFileLengthOnFlush() && space > 0) { try { IgfsEntryInfo fileInfo0 = igfsCtx.meta().reserveSpace(fileInfo.id(), space, streamRange); if (fileInfo0 == null) throw new IOException("File was concurrently deleted: " + path); else fileInfo = fileInfo0; streamRange = initialStreamRange(fileInfo); space = 0; } catch (IgniteCheckedException e) { throw new IOException("Failed to update file length data [path=" + path + ", space=" + space + ']', e); } } } }
/** * Gets initial affinity range. This range will have 0 length and will start from first * non-occupied file block. * * @param fileInfo File info to build initial range for. * @return Affinity range. */ private IgfsFileAffinityRange initialStreamRange(IgfsEntryInfo fileInfo) { if (!igfsCtx.configuration().isFragmentizerEnabled()) return null; if (!Boolean.parseBoolean(fileInfo.properties().get(IgfsUtils.PROP_PREFER_LOCAL_WRITES))) return null; int blockSize = fileInfo.blockSize(); // Find first non-occupied block offset. long off = ((fileInfo.length() + blockSize - 1) / blockSize) * blockSize; // Need to get last affinity key and reuse it if we are on the same node. long lastBlockOff = off - fileInfo.blockSize(); if (lastBlockOff < 0) lastBlockOff = 0; IgfsFileMap map = fileInfo.fileMap(); IgniteUuid prevAffKey = map == null ? null : map.affinityKey(lastBlockOff, false); IgniteUuid affKey = igfsCtx.data().nextAffinityKey(prevAffKey); return affKey == null ? null : new IgfsFileAffinityRange(off, off, affKey); }
IgfsUtils.createFile( IgniteUuid.randomUuid(), igfsCtx.configuration().getBlockSize(), status.length(), null, null, igfsCtx.igfs().evictExclude(curPath, false), status.properties(), status.accessTime(),