private synchronized void initJournals(List<URI> dirs) { int minimumRedundantJournals = conf.getInt( DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT); synchronized(journalSetLock) { journalSet = new JournalSet(minimumRedundantJournals); for (URI u : dirs) { boolean required = FSNamesystem.getRequiredNamespaceEditsDirs(conf) .contains(u); if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { StorageDirectory sd = storage.getStorageDirectory(u); if (sd != null) { journalSet.add(new FileJournalManager(conf, sd, storage), required, sharedEditsDirs.contains(u)); } } else { journalSet.add(createJournal(u), required, sharedEditsDirs.contains(u)); } } } if (journalSet.isEmpty()) { LOG.error("No edits directories configured!"); } }
/** * Archive any log files that are older than the given txid. * * If the edit log is not open for write, then this call returns with no * effect. */ @Override public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) { // Should not purge logs unless they are open for write. // This prevents the SBN from purging logs on shared storage, for example. if (!isOpenForWrite()) { return; } assert curSegmentTxId == HdfsServerConstants.INVALID_TXID || // on format this is no-op minTxIdToKeep <= curSegmentTxId : "cannot purge logs older than txid " + minTxIdToKeep + " when current segment starts at " + curSegmentTxId; if (minTxIdToKeep == 0) { return; } // This could be improved to not need synchronization. But currently, // journalSet is not threadsafe, so we need to synchronize this method. try { journalSet.purgeLogsOlderThan(minTxIdToKeep); } catch (IOException ex) { //All journals have failed, it will be handled in logSync. } }
@Override public void selectInputStreams(Collection<EditLogInputStream> streams, long fromTxId, boolean inProgressOk, boolean onlyDurableTxns) throws IOException { journalSet.selectInputStreams(streams, fromTxId, inProgressOk, onlyDurableTxns); }
/** * Shutdown the file store. */ synchronized void close() { if (state == State.CLOSED) { LOG.debug("Closing log when already closed"); return; } try { if (state == State.IN_SEGMENT) { assert editLogStream != null; waitForSyncToFinish(); endCurrentLogSegment(true); } } finally { if (journalSet != null && !journalSet.isEmpty()) { try { synchronized(journalSetLock) { journalSet.close(); } } catch (IOException ioe) { LOG.warn("Error closing journalSet", ioe); } } state = State.CLOSED; } }
abortAllJournals(); disableAndReportErrorOnJournals(badJAS); if (!NameNodeResourcePolicy.areResourcesAvailable(journals, minimumRedundantJournals)) {
/** * Get all the journals this edit log is currently operating on. */ List<JournalAndStream> getJournals() { // The list implementation is CopyOnWriteArrayList, // so we don't need to synchronize this method. return journalSet.getAllJournalStreams(); }
@SuppressWarnings("unchecked") public FSEditLog mockEditLog(StoragePurger purger) throws IOException { final List<JournalManager> jms = Lists.newArrayList(); final JournalSet journalSet = new JournalSet(0); for (FakeRoot root : dirRoots.values()) { if (!root.type.isOfType(NameNodeDirType.EDITS)) continue; fjm.purger = purger; jms.add(fjm); journalSet.add(fjm, false);
void add(JournalManager j, boolean required) { add(j, required, false); }
chainAndMakeRedundantStreams(streams, allStreams, fromTxId);
lastTxId, lastSyncedTxId); try { journalSet.finalizeLogSegment(curSegmentTxId, lastTxId); editLogStream = null; } catch (IOException e) {
public synchronized void discardSegments(long markerTxid) throws IOException { for (JournalAndStream jas : journalSet.getAllJournalStreams()) { jas.getManager().discardSegments(markerTxid); } }
/** * Shutdown the file store. */ synchronized void close() { if (state == State.CLOSED) { LOG.debug("Closing log when already closed"); return; } try { if (state == State.IN_SEGMENT) { assert editLogStream != null; waitForSyncToFinish(); endCurrentLogSegment(true); } } finally { if (journalSet != null && !journalSet.isEmpty()) { try { synchronized(journalSetLock) { journalSet.close(); } } catch (IOException ioe) { LOG.warn("Error closing journalSet", ioe); } } state = State.CLOSED; } }
abortAllJournals(); disableAndReportErrorOnJournals(badJAS); if (!NameNodeResourcePolicy.areResourcesAvailable(journals, minimumRedundantJournals)) {
BackupJournalManager bjm = new BackupJournalManager(bnReg, nnReg); synchronized(journalSetLock) { journalSet.add(bjm, false);
JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId);
/** * Finalize the current log segment. * Transitions from IN_SEGMENT state to BETWEEN_LOG_SEGMENTS state. */ public synchronized void endCurrentLogSegment(boolean writeEndTxn) { LOG.info("Ending log segment " + curSegmentTxId); Preconditions.checkState(isSegmentOpen(), "Bad state: %s", state); if (writeEndTxn) { logEdit(LogSegmentOp.getInstance(cache.get(), FSEditLogOpCodes.OP_END_LOG_SEGMENT)); logSync(); } printStatistics(true); final long lastTxId = getLastWrittenTxId(); try { journalSet.finalizeLogSegment(curSegmentTxId, lastTxId); editLogStream = null; } catch (IOException e) { //All journals have failed, it will be handled in logSync. } state = State.BETWEEN_LOG_SEGMENTS; }
private synchronized void initJournals(List<URI> dirs) { int minimumRedundantJournals = conf.getInt( DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT); synchronized(journalSetLock) { journalSet = new JournalSet(minimumRedundantJournals); for (URI u : dirs) { boolean required = FSNamesystem.getRequiredNamespaceEditsDirs(conf) .contains(u); if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { StorageDirectory sd = storage.getStorageDirectory(u); if (sd != null) { journalSet.add(new FileJournalManager(conf, sd, storage), required, sharedEditsDirs.contains(u)); } } else { journalSet.add(createJournal(u), required, sharedEditsDirs.contains(u)); } } } if (journalSet.isEmpty()) { LOG.error("No edits directories configured!"); } }
public long getSharedLogCTime() throws IOException { for (JournalAndStream jas : journalSet.getAllJournalStreams()) { if (jas.isShared()) { return jas.getManager().getJournalCTime(); } } throw new IOException("No shared log found."); }
/** * Initialize the output stream for logging, opening the first * log segment. */ synchronized void openForWrite(int layoutVersion) throws IOException { Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS, "Bad state: %s", state); long segmentTxId = getLastWrittenTxId() + 1; // Safety check: we should never start a segment if there are // newer txids readable. List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>(); journalSet.selectInputStreams(streams, segmentTxId, true, false); if (!streams.isEmpty()) { String error = String.format("Cannot start writing at txid %s " + "when there is a stream available for read: %s", segmentTxId, streams.get(0)); IOUtils.cleanupWithLogger(LOG, streams.toArray(new EditLogInputStream[0])); throw new IllegalStateException(error); } startLogSegmentAndWriteHeaderTxn(segmentTxId, layoutVersion); assert state == State.IN_SEGMENT : "Bad state: " + state; }
/** * Shutdown the file store. */ synchronized void close() { if (state == State.CLOSED) { LOG.debug("Closing log when already closed"); return; } try { if (state == State.IN_SEGMENT) { assert editLogStream != null; waitForSyncToFinish(); endCurrentLogSegment(true); } } finally { if (journalSet != null && !journalSet.isEmpty()) { try { synchronized(journalSetLock) { journalSet.close(); } } catch (IOException ioe) { LOG.warn("Error closing journalSet", ioe); } } state = State.CLOSED; } }