private long getNextDirectiveId() throws IOException { assert namesystem.hasWriteLock(); if (nextDirectiveId >= Long.MAX_VALUE - 1) { throw new IOException("No more available IDs."); } return nextDirectiveId++; }
public void clearDirectiveStats() { assert namesystem.hasWriteLock(); for (CacheDirective directive : directivesById.values()) { directive.resetStatistics(); } }
@Override public boolean hasReadLock() { return this.fsLock.getReadHoldCount() > 0 || hasWriteLock(); }
/** * Shut down the monitor thread. */ @Override public void close() throws IOException { Preconditions.checkArgument(namesystem.hasWriteLock()); lock.lock(); try { if (shutdown) return; // Since we hold both the FSN write lock and the CRM lock here, // we know that the CRM thread cannot be currently modifying // the cache manager state while we're closing it. // Since the CRM thread checks the value of 'shutdown' after waiting // for a lock, we know that the thread will not modify the cache // manager state after this point. shutdown = true; doRescan.signalAll(); scanFinished.signalAll(); } finally { lock.unlock(); } }
void commitOrCompleteLastBlock( final INodeFile fileINode, final INodesInPath iip, final Block commitBlock) throws IOException { assert hasWriteLock(); Preconditions.checkArgument(fileINode.isUnderConstruction()); blockManager.commitOrCompleteLastBlock(fileINode, commitBlock, iip); }
private void logReassignLease(String leaseHolder, String src, String newHolder) { assert hasWriteLock(); getEditLog().logReassignLease(leaseHolder, src, newHolder); }
private void metaSave(PrintWriter out) { assert hasWriteLock(); long totalInodes = this.dir.totalInodes(); long totalBlocks = this.getBlocksTotal(); out.println(totalInodes + " files and directories, " + totalBlocks + " blocks = " + (totalInodes + totalBlocks) + " total filesystem objects"); blockManager.metaSave(out); }
/** * Close file. * @param path * @param file */ private void closeFile(String path, INodeFile file) { assert hasWriteLock(); // file is closed getEditLog().logCloseFile(path, file); NameNode.stateChangeLog.debug("closeFile: {} with {} blocks is persisted" + " to the file system", path, file.getBlocks().length); }
private synchronized void checkPauseForTesting() throws InterruptedException { assert !dir.hasWriteLock(); assert !dir.getFSNamesystem().hasWriteLock(); if (pauseAfterNthCheckpoint != 0) { ZoneSubmissionTracker tracker = handler.unprotectedGetTracker(pauseZoneId); if (tracker != null) { if (tracker.numFutureDone == pauseAfterNthCheckpoint) { shouldPauseForTesting = true; pauseAfterNthCheckpoint = 0; } } } while (shouldPauseForTesting) { LOG.info("Sleeping in the re-encryption updater for unit test."); wait(); LOG.info("Continuing re-encryption updater after pausing."); } }
public void removeDirective(long id, FSPermissionChecker pc) throws IOException { assert namesystem.hasWriteLock(); try { CacheDirective directive = getById(id); checkWritePermission(pc, directive.getPool()); removeInternal(directive); } catch (IOException e) { LOG.warn("removeDirective of " + id + " failed: ", e); throw e; } LOG.info("removeDirective of " + id + " successful."); }
/** * Persist the block list for the inode. */ static void persistBlocks( FSDirectory fsd, String path, INodeFile file, boolean logRetryCache) { assert fsd.getFSNamesystem().hasWriteLock(); Preconditions.checkArgument(file.isUnderConstruction()); fsd.getEditLog().logUpdateBlocks(path, file, logRetryCache); if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("persistBlocks: " + path + " with " + file.getBlocks().length + " blocks is persisted to" + " the file system"); } }
Lease reassignLeaseInternal(Lease lease, String newHolder, INodeFile pendingFile) { assert hasWriteLock(); pendingFile.getFileUnderConstructionFeature().setClientName(newHolder); return leaseManager.reassignLease(lease, pendingFile, newHolder); }
private Lease reassignLease(Lease lease, String src, String newHolder, INodeFile pendingFile) { assert hasWriteLock(); if(newHolder == null) return lease; // The following transaction is not synced. Make sure it's sync'ed later. logReassignLease(lease.getHolder(), src, newHolder); return reassignLeaseInternal(lease, newHolder, pendingFile); }
/** * Increments, logs and then returns the block ID * @param blockType is the file under striping or contiguous layout? */ private long nextBlockId(BlockType blockType) throws IOException { assert hasWriteLock(); checkNameNodeSafeMode("Cannot get next block ID"); final long blockId = blockManager.nextBlockId(blockType); getEditLog().logAllocateBlockId(blockId); // NB: callers sync the log return blockId; }
/** * Create new block with a unique block id and a new generation stamp. * @param blockType is the file under striping or contiguous layout? */ Block createNewBlock(BlockType blockType) throws IOException { assert hasWriteLock(); Block b = new Block(nextBlockId(blockType), 0, 0); // Increment the generation stamp for every new block. b.setGenerationStamp(nextGenerationStamp(false)); return b; }
@Override public void startSecretManagerIfNecessary() { assert hasWriteLock() : "Starting secret manager needs write lock"; boolean shouldRun = shouldUseDelegationTokens() && !isInSafeMode() && getEditLog().isOpenForWrite(); boolean running = dtSecretManager.isRunning(); if (shouldRun && !running) { startSecretManager(); } }
List<XAttr> completeReencryption(final INode zoneNode) throws IOException { assert dir.hasWriteLock(); assert dir.getFSNamesystem().hasWriteLock(); final Long zoneId = zoneNode.getId(); ZoneReencryptionStatus zs = getReencryptionStatus().getZoneStatus(zoneId); assert zs != null; LOG.info("Re-encryption completed on zone {}. Re-encrypted {} files," + " failures encountered: {}.", zoneNode.getFullPathName(), zs.getFilesReencrypted(), zs.getNumReencryptionFailures()); synchronized (this) { submissions.remove(zoneId); } return FSDirEncryptionZoneOp .updateReencryptionFinish(dir, INodesInPath.fromINode(zoneNode), zs); }
/** * Increments, logs and then returns the stamp */ long nextGenerationStamp(boolean legacyBlock) throws IOException { assert hasWriteLock(); checkNameNodeSafeMode("Cannot get next generation stamp"); long gs = blockManager.nextGenerationStamp(legacyBlock); if (legacyBlock) { getEditLog().logLegacyGenerationStamp(gs); } else { getEditLog().logGenerationStamp(gs); } // NB: callers sync the log return gs; }
/** * Save the batch's edeks to file xattrs. */ static void saveFileXAttrsForBatch(FSDirectory fsd, List<FileEdekInfo> batch) { assert fsd.getFSNamesystem().hasWriteLock(); assert !fsd.hasWriteLock(); if (batch != null && !batch.isEmpty()) { for (FileEdekInfo entry : batch) { final INode inode = fsd.getInode(entry.getInodeId()); // no dir lock, so inode could be removed. no-op if so. if (inode == null) { NameNode.LOG.info("Cannot find inode {}, skip saving xattr for" + " re-encryption", entry.getInodeId()); continue; } fsd.getEditLog().logSetXAttrs(inode.getFullPathName(), inode.getXAttrFeature().getXAttrs(), false); } } }
void finalizeINodeFileUnderConstruction(String src, INodeFile pendingFile, int latestSnapshot, boolean allowCommittedBlock) throws IOException { assert hasWriteLock(); FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature(); if (uc == null) { throw new IOException("Cannot finalize file " + src + " because it is not under construction"); } pendingFile.recordModification(latestSnapshot); // The file is no longer pending. // Create permanent INode, update blocks. No need to replace the inode here // since we just remove the uc feature from pendingFile pendingFile.toCompleteFile(now(), allowCommittedBlock? numCommittedAllowed: 0, blockManager.getMinReplication()); leaseManager.removeLease(uc.getClientName(), pendingFile); // close file and persist block allocations for this file closeFile(src, pendingFile); blockManager.checkRedundancy(pendingFile); }