public void trackPrepareLocation(Location location) { this.prepareLocationId = location.getDataFileId(); }
private void recordAckMessageReferenceLocation(Location ackLocation, Location messageLocation) { Set<Integer> referenceFileIds = metadata.ackMessageFileMap.get(Integer.valueOf(ackLocation.getDataFileId())); if (referenceFileIds == null) { referenceFileIds = new HashSet<>(); referenceFileIds.add(messageLocation.getDataFileId()); metadata.ackMessageFileMap.put(ackLocation.getDataFileId(), referenceFileIds); metadata.ackMessageFileMapDirtyFlag.lazySet(true); } else { Integer id = Integer.valueOf(messageLocation.getDataFileId()); if (!referenceFileIds.contains(id)) { referenceFileIds.add(id); } } }
DataFile getDataFile(Location item) throws IOException { Integer key = Integer.valueOf(item.getDataFileId()); DataFile dataFile = null; synchronized (currentDataFile) { dataFile = fileMap.get(key); } if (dataFile == null) { LOG.error("Looking for key " + key + " but not found in fileMap: " + fileMap); throw new IOException("Could not locate data file " + getFile(item.getDataFileId())); } return dataFile; }
public WriteKey(Location item) { file = item.getDataFileId(); offset = item.getOffset(); // TODO: see if we can build a better hash hash = (int)(file ^ offset); }
public void writePayload(Location object, DataOutput dataOut) throws IOException { dataOut.writeInt(object.getDataFileId()); dataOut.writeInt(object.getOffset()); }
@Override public void writePayload(Location object, DataOutput dataOut) throws IOException { dataOut.writeInt(object.getDataFileId()); dataOut.writeInt(object.getOffset()); dataOut.writeInt(object.getSize()); }
private boolean blockedFromCompaction(int journalToAdvance) { // don't forward the current data file if (journalToAdvance == journal.getCurrentDataFileId()) { return true; } // don't forward any data file with inflight transaction records because it will whack the tx - data file link // in the ack map when all acks are migrated (now that the ack map is not just for acks) // TODO: prepare records can be dropped but completion records (maybe only commit outcomes) need to be migrated // as part of the forward work. Location[] inProgressTxRange = getInProgressTxLocationRange(); if (inProgressTxRange[0] != null) { for (int pendingTx = inProgressTxRange[0].getDataFileId(); pendingTx <= inProgressTxRange[1].getDataFileId(); pendingTx++) { if (journalToAdvance == pendingTx) { LOG.trace("Compaction target:{} blocked by inflight transaction records: {}", journalToAdvance, inProgressTxRange); return true; } } } return false; }
public void removeDataFiles(Set<Integer> files) throws IOException { for (Integer key : files) { // Can't remove the data file (or subsequent files) that is currently being written to. if (key >= lastAppendLocation.get().getDataFileId()) { continue; } DataFile dataFile = null; synchronized (currentDataFile) { dataFile = fileMap.remove(key); if (dataFile != null) { fileByFileMap.remove(dataFile.getFile()); dataFile.unlink(); } } if (dataFile != null) { forceRemoveDataFile(dataFile); } } }
/** * Updates the Job removal tracking index with the location of a remove command and the * original JobLocation entry. * * The JobLocation holds the locations in the logs where the add and update commands for * a job stored. The log file containing the remove command can only be discarded after * both the add and latest update log files have also been discarded. * * @param tx * The TX under which the update is to be performed. * @param location * The location value to reference a remove command. * @param removedJob * The original JobLocation instance that holds the add and update locations * * @throws IOException if an error occurs while updating the remove location tracker. */ protected void referenceRemovedLocation(Transaction tx, Location location, JobLocation removedJob) throws IOException { int logId = location.getDataFileId(); List<Integer> removed = this.metaData.getRemoveLocationTracker().get(tx, logId); if (removed == null) { removed = new ArrayList<Integer>(); } removed.add(removedJob.getLocation().getDataFileId()); this.metaData.getRemoveLocationTracker().put(tx, logId, removed); }
return; final int lastJournalFileId = journal.getLastAppendLocation().getDataFileId(); final Set<Integer> candidates = journal.getFileMap().keySet(); LOG.trace("Full gc candidate set:" + candidates);
protected void process(KahaRewrittenDataFileCommand command, Location location) throws IOException { final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet()); // Mark the current journal file as a compacted file so that gc checks can skip // over logs that are smaller compaction type logs. DataFile current = journal.getDataFileById(location.getDataFileId()); current.setTypeCode(command.getRewriteType()); if (completeFileSet.contains(command.getSourceDataFileId()) && command.getSkipIfSourceExists()) { // Move offset so that next location read jumps to next file. location.setOffset(journalMaxFileLength); } }
KahaLocation convert(Location location) { KahaLocation rc = new KahaLocation(); rc.setLogId(location.getDataFileId()); rc.setOffset(location.getOffset()); return rc; }
/** * Adds a reference for the journal log file pointed to by the given Location value. * * To prevent log files in the journal that still contain valid data that needs to be * kept in order to allow for recovery the logs must have active references. Each Job * scheduler should ensure that the logs are accurately referenced. * * @param tx * The TX under which the update is to be performed. * @param location * The location value to update the reference count of. * * @throws IOException if an error occurs while updating the journal references table. */ protected void incrementJournalCount(Transaction tx, Location location) throws IOException { int logId = location.getDataFileId(); Integer val = metaData.getJournalRC().get(tx, logId); int refCount = val != null ? val.intValue() + 1 : 1; metaData.getJournalRC().put(tx, logId, refCount); }
KahaLocation convert(Location location) { KahaLocation rc = new KahaLocation(); rc.setLogId(location.getDataFileId()); rc.setOffset(location.getOffset()); return rc; }
if (job.getLocation().getDataFileId() != location.getDataFileId()) { this.store.referenceRemovedLocation(tx, location, job);
payload = toByteSequence(command); Location location = appender.storeItem(payload, Journal.USER_RECORD_TYPE, false); updatedAckLocations.put(location.getDataFileId(), journalLogsReferenced);
gcCandidateSet.remove(lastUpdate.getDataFileId());
if (missing.contains(job.getLocation().getDataFileId())) { scheduler.removeJobAtTime(tx, job.getJobId(), job.getNextTime()); removed++;
if (removed.getLocation().getDataFileId() != location.getDataFileId()) { this.store.referenceRemovedLocation(tx, location, removed);
/** * Removes one reference for the Journal log file indicated in the given Location value. * * The references are used to track which log files cannot be GC'd. When the reference count * on a log file reaches zero the file id is removed from the tracker and the log will be * removed on the next check point update. * * @param tx * The TX under which the update is to be performed. * @param location * The location value to update the reference count of. * * @throws IOException if an error occurs while updating the journal references table. */ protected void decrementJournalCount(Transaction tx, Location location) throws IOException { int logId = location.getDataFileId(); Integer refCount = metaData.getJournalRC().get(tx, logId); if (refCount != null) { int refCountValue = refCount; refCountValue--; if (refCountValue <= 0) { metaData.getJournalRC().remove(tx, logId); } else { metaData.getJournalRC().put(tx, logId, refCountValue); } } }