lastAppliedSN, gainPrimacySN); CompletableFuture<Void> future = client.submit(new JournalEntryCommand( JournalEntry.newBuilder().setSequenceNumber(gainPrimacySN).build())); try { future.get(5, TimeUnit.SECONDS);
JournalEntry entry; while ((entry = reader.read()) != null) { if (entry.getSequenceNumber() > lastPersistSeq) { lastPersistSeq = entry.getSequenceNumber();
.computeMessageSize(2, getAddMountPoint()); .computeMessageSize(3, getBlockContainerIdGenerator()); .computeMessageSize(4, getBlockInfo()); .computeMessageSize(5, getCompleteFile()); .computeMessageSize(6, getDeleteFile()); .computeMessageSize(8, getDeleteMountPoint()); .computeMessageSize(9, getInodeDirectory()); .computeMessageSize(10, getInodeDirectoryIdGenerator()); .computeMessageSize(11, getInodeFile()); .computeMessageSize(12, getInodeLastModificationTime()); .computeMessageSize(15, getPersistDirectory()); .computeMessageSize(16, getAsyncPersistRequest()); .computeMessageSize(19, getRename()); .computeMessageSize(27, getSetAttribute()); .computeMessageSize(29, getDeleteBlock());
@Override public void processJournalEntry(JournalEntry entry) throws IOException { if (entry.hasInodeFile()) { mInodeTree.addInodeFileFromJournal(entry.getInodeFile()); InodeFileEntry inodeFileEntry = entry.getInodeFile(); if (inodeFileEntry.hasTtl()) { mTtlBuckets.insert(InodeFile.fromJournalEntry(inodeFileEntry)); } else if (entry.hasInodeDirectory()) { try { InodeDirectoryEntry inodeDirectoryEntry = entry.getInodeDirectory(); if (inodeDirectoryEntry.hasTtl()) { mTtlBuckets.insert(InodeDirectory.fromJournalEntry(inodeDirectoryEntry)); mInodeTree.addInodeDirectoryFromJournal(entry.getInodeDirectory()); } catch (AccessControlException e) { throw new RuntimeException(e); } else if (entry.hasInodeLastModificationTime()) { InodeLastModificationTimeEntry modTimeEntry = entry.getInodeLastModificationTime(); try (LockedInodePath inodePath = mInodeTree .lockFullInodePath(modTimeEntry.getId(), InodeTree.LockMode.WRITE)) { } else if (entry.hasPersistDirectory()) { PersistDirectoryEntry typedEntry = entry.getPersistDirectory(); try (LockedInodePath inodePath = mInodeTree .lockFullInodePath(typedEntry.getId(), InodeTree.LockMode.WRITE)) {
public Builder mergeFrom(alluxio.proto.journal.Journal.JournalEntry other) { if (other == alluxio.proto.journal.Journal.JournalEntry.getDefaultInstance()) return this; if (other.hasSequenceNumber()) { setSequenceNumber(other.getSequenceNumber()); if (other.hasAddMountPoint()) { mergeAddMountPoint(other.getAddMountPoint()); if (other.hasAsyncPersistRequest()) { mergeAsyncPersistRequest(other.getAsyncPersistRequest()); if (other.hasBlockContainerIdGenerator()) { mergeBlockContainerIdGenerator(other.getBlockContainerIdGenerator()); if (other.hasBlockInfo()) { mergeBlockInfo(other.getBlockInfo()); if (other.hasCompleteFile()) { mergeCompleteFile(other.getCompleteFile()); if (other.hasCompletePartition()) { mergeCompletePartition(other.getCompletePartition()); if (other.hasCompleteStore()) { mergeCompleteStore(other.getCompleteStore()); if (other.hasCreateStore()) { mergeCreateStore(other.getCreateStore()); if (other.hasDeleteBlock()) {
JournalEntry entry; while ((entry = reader.read()) != null) { if (entry.getSequenceNumber() > lastPersistSeq) { lastPersistSeq = entry.getSequenceNumber();
if (!mEntriesToFlush.isEmpty()) { JournalEntry firstEntryToFlush = mEntriesToFlush.peek(); if (firstEntryToFlush.getSequenceNumber() > lastPersistSeq + 1) { throw new RuntimeException(ExceptionMessage.JOURNAL_ENTRY_MISSING.getMessageWithUrl( RuntimeConstants.ALLUXIO_DEBUG_DOCS_URL, lastPersistSeq + 1, firstEntryToFlush.getSequenceNumber())); if (entry.getSequenceNumber() > lastPersistSeq) { try { entry.toBuilder().build().writeDelimitedTo(mJournalOutputStream); retryEndSeq = entry.getSequenceNumber(); } catch (IOJournalClosedException e) { throw e.toJournalClosedException();
while ((entry = parser.next()) != null) { if (start == -1) { start = entry.getSequenceNumber(); end = entry.getSequenceNumber();
JournalEntry entry = JournalEntry.newBuilder() .setDeleteBlock(DeleteBlockEntry.newBuilder().setBlockId(blockId)).build(); journalContext.append(entry);
rpcContext.journal(JournalEntry.newBuilder().setDeleteMountPoint(deleteMountPoint).build());
.setRecursive(deleteOptions.isRecursive()) .setOpTimeMs(opTimeMs).build(); rpcContext.journal(Journal.JournalEntry.newBuilder().setDeleteFile(deleteFile).build());
builder.setUfsFingerprint(options.getUfsFingerprint()); journalContext.append(JournalEntry.newBuilder().setSetAttribute(builder).build());
if (entry.hasAddMountPoint() || entry.hasAsyncPersistRequest() || entry.hasCompleteFile() || entry.hasDeleteFile() || entry.hasDeleteMountPoint() || entry.hasInodeDirectory() || entry.hasInodeDirectoryIdGenerator() || entry.hasInodeFile() || entry.hasInodeLastModificationTime() || entry.hasPersistDirectory() || entry.hasRename() || entry.hasReinitializeFile() || entry.hasSetAttribute() || entry.hasUpdateUfsMode()) { return Constants.FILE_SYSTEM_MASTER_NAME; if (entry.hasBlockContainerIdGenerator() || entry.hasDeleteBlock() || entry.hasBlockInfo()) { return Constants.BLOCK_MASTER_NAME; if (entry.hasCompletePartition() || entry.hasCompleteStore() || entry.hasCreateStore() || entry.hasDeleteStore() || entry.hasRenameStore() || entry.hasMergeStore()) { return Constants.KEY_VALUE_MASTER_NAME;
} else if (entry.getSequenceNumber() == mNextSequenceNumber) { mNextSequenceNumber++; return entry; } else if (entry.getSequenceNumber() < mNextSequenceNumber) { } else { throw new IllegalStateException(ExceptionMessage.JOURNAL_ENTRY_MISSING.getMessage( mNextSequenceNumber, entry.getSequenceNumber()));
rpcContext.journal(JournalEntry.newBuilder() .setUpdateUfsMode(UpdateUfsModeEntry.newBuilder() .setUfsPath(key)
@Override public JournalEntry read() throws IOException { int firstByte = inputStream.read(); if (firstByte == -1) { return null; } // All journal entries start with their size in bytes written as a varint. int size = ProtoUtils.readRawVarint32(firstByte, inputStream); byte[] buffer = size <= mBuffer.length ? mBuffer : new byte[size]; // Total bytes read so far for journal entry. int totalBytesRead = 0; while (totalBytesRead < size) { // Bytes read in last read request. int latestBytesRead = inputStream.read(buffer, totalBytesRead, size - totalBytesRead); if (latestBytesRead < 0) { break; } totalBytesRead += latestBytesRead; } if (totalBytesRead < size) { LOG.warn("Journal entry was truncated. Expected to read " + size + " bytes but only got " + totalBytesRead); return null; } JournalEntry entry = JournalEntry.parseFrom(new ByteArrayInputStream(buffer, 0, size)); if (entry != null) { mLatestSequenceNumber = entry.getSequenceNumber(); } return entry; }
public synchronized void write(JournalEntry entry) throws IOException, JournalClosedException { try { maybeRecoverFromUfsFailures(); maybeRotateLog(); } catch (IOJournalClosedException e) { throw e.toJournalClosedException(); } try { JournalEntry entryToWrite = entry.toBuilder().setSequenceNumber(mNextSequenceNumber).build(); entryToWrite.writeDelimitedTo(mJournalOutputStream); LOG.debug("Adding journal entry (seq={}) to retryList with {} entries.", entryToWrite.getSequenceNumber(), mEntriesToFlush.size()); mEntriesToFlush.add(entryToWrite); mNextSequenceNumber++; } catch (IOJournalClosedException e) { throw e.toJournalClosedException(); } catch (IOException e) { // Set mNeedsRecovery to true so that {@code maybeRecoverFromUfsFailures} // can know a UFS failure has occurred. mNeedsRecovery = true; throw new IOException(ExceptionMessage.JOURNAL_WRITE_FAILURE .getMessageWithUrl(RuntimeConstants.ALLUXIO_DEBUG_DOCS_URL, mJournalOutputStream.currentLog(), e.getMessage()), e); } }
@Override public JournalEntry toJournalEntry() { InodeFileEntry inodeFile = InodeFileEntry.newBuilder() .addAllBlocks(getBlockIds()) .setBlockSizeBytes(getBlockSizeBytes()) .setCacheable(isCacheable()) .setCompleted(isCompleted()) .setCreationTimeMs(getCreationTimeMs()) .setGroup(getGroup()) .setId(getId()) .setLastModificationTimeMs(getLastModificationTimeMs()) .setLength(getLength()) .setMode(getMode()) .setName(getName()) .setOwner(getOwner()) .setParentId(getParentId()) .setPersistenceState(getPersistenceState().name()) .setPinned(isPinned()) .setTtl(getTtl()) .setTtlAction(ProtobufUtils.toProtobuf(getTtlAction())) .setUfsFingerprint(getUfsFingerprint()) .build(); return JournalEntry.newBuilder().setInodeFile(inodeFile).build(); }
@Override public Journal.JournalEntry next() { if (!hasNext()) { throw new NoSuchElementException(); } String alluxioPath = mEntry.getKey(); MountInfo info = mEntry.getValue(); mEntry = null; Map<String, String> properties = info.getOptions().getProperties(); List<File.StringPairEntry> protoProperties = new ArrayList<>(properties.size()); for (Map.Entry<String, String> property : properties.entrySet()) { protoProperties.add(File.StringPairEntry.newBuilder() .setKey(property.getKey()) .setValue(property.getValue()) .build()); } AddMountPointEntry addMountPoint = AddMountPointEntry.newBuilder().setAlluxioPath(alluxioPath) .setMountId(info.getMountId()).setUfsPath(info.getUfsUri().toString()) .setReadOnly(info.getOptions().isReadOnly()).addAllProperties(protoProperties) .setShared(info.getOptions().isShared()).build(); return Journal.JournalEntry.newBuilder().setAddMountPoint(addMountPoint).build(); }
private static void parseJournalFile() { URI location; try { location = new URI(sJournalFile); } catch (URISyntaxException e) { throw new RuntimeException(e); } try (JournalFileParser parser = JournalFileParser.Factory.create(location)) { JournalEntry entry; while ((entry = parser.next()) != null) { if (entry.getSequenceNumber() < sStart) { continue; } if (entry.getSequenceNumber() >= sEnd) { break; } System.out.println(ENTRY_SEPARATOR); System.out.print(entry); } } catch (Exception e) { LOG.error("Failed to get next journal entry.", e); } }