@Override public DatabaseVersion clone() { DatabaseVersion clonedDatabaseVersion = new DatabaseVersion(); clonedDatabaseVersion.setHeader(getHeader()); for (ChunkEntry chunkEntry : getChunks()) { clonedDatabaseVersion.addChunk(chunkEntry); } for (MultiChunkEntry multiChunkEntry : getMultiChunks()) { clonedDatabaseVersion.addMultiChunk(multiChunkEntry); } for (FileContent fileContent : getFileContents()) { clonedDatabaseVersion.addFileContent(fileContent); } for (PartialFileHistory fileHistory : getFileHistories()) { clonedDatabaseVersion.addFileHistory(fileHistory); } return clonedDatabaseVersion; }
private void writeDatabaseVersionHeader(IndentXmlStreamWriter xmlOut, DatabaseVersion databaseVersion) throws IOException, XMLStreamException { if (databaseVersion.getTimestamp() == null || databaseVersion.getClient() == null || databaseVersion.getVectorClock() == null || databaseVersion.getVectorClock().isEmpty()) { logger.log(Level.SEVERE, "Cannot write database version. Header fields must be filled: "+databaseVersion.getHeader()); throw new IOException("Cannot write database version. Header fields must be filled: "+databaseVersion.getHeader()); } xmlOut.writeStartElement("header"); xmlOut.writeEmptyElement("time"); xmlOut.writeAttribute("value", databaseVersion.getTimestamp().getTime()); xmlOut.writeEmptyElement("client"); xmlOut.writeAttribute("name", databaseVersion.getClient()); xmlOut.writeStartElement("vectorClock"); VectorClock vectorClock = databaseVersion.getVectorClock(); for (Map.Entry<String, Long> vectorClockEntry : vectorClock.entrySet()) { xmlOut.writeEmptyElement("client"); xmlOut.writeAttribute("name", vectorClockEntry.getKey()); xmlOut.writeAttribute("value", vectorClockEntry.getValue()); } xmlOut.writeEndElement(); // </vectorClock> xmlOut.writeEndElement(); // </header> }
/** * This methods iterates over all {@link DatabaseVersion}s that are dirty. Dirty means that they are not in the winning * branch. All data which is contained in these dirty DatabaseVersions is added to the newDatabaseVersion, so that it * is included in the new Up. Note that only metadata is reuploaded, the actual multichunks are still in the repository. * * @param newDatabaseVersion {@link DatabaseVersion} to which dirty data should be added. */ private void addDirtyData(DatabaseVersion newDatabaseVersion) { Iterator<DatabaseVersion> dirtyDatabaseVersions = localDatabase.getDirtyDatabaseVersions(); if (!dirtyDatabaseVersions.hasNext()) { logger.log(Level.INFO, "No DIRTY data found in database (no dirty databases); Nothing to do here."); } else { logger.log(Level.INFO, "Adding DIRTY data to new database version: "); while (dirtyDatabaseVersions.hasNext()) { DatabaseVersion dirtyDatabaseVersion = dirtyDatabaseVersions.next(); logger.log(Level.INFO, "- Adding chunks/multichunks/filecontents from database version " + dirtyDatabaseVersion.getHeader()); for (ChunkEntry chunkEntry : dirtyDatabaseVersion.getChunks()) { newDatabaseVersion.addChunk(chunkEntry); } for (MultiChunkEntry multiChunkEntry : dirtyDatabaseVersion.getMultiChunks()) { newDatabaseVersion.addMultiChunk(multiChunkEntry); } for (FileContent fileContent : dirtyDatabaseVersion.getFileContents()) { newDatabaseVersion.addFileContent(fileContent); } } } }
private void updateFullDatabaseVersionCache(DatabaseVersion newDatabaseVersion) { for (ChunkEntry sourceChunk : newDatabaseVersion.getChunks()) { if (fullDatabaseVersionCache.getChunk(sourceChunk.getChecksum()) == null) { fullDatabaseVersionCache.addChunk(sourceChunk); for (MultiChunkEntry sourceMultiChunk : newDatabaseVersion.getMultiChunks()) { if (fullDatabaseVersionCache.getMultiChunk(sourceMultiChunk.getId()) == null) { fullDatabaseVersionCache.addMultiChunk(sourceMultiChunk); for (FileContent sourceFileContent : newDatabaseVersion.getFileContents()) { if (fullDatabaseVersionCache.getFileContent(sourceFileContent.getChecksum()) == null) { fullDatabaseVersionCache.addFileContent(sourceFileContent); for (PartialFileHistory sourceFileHistory : newDatabaseVersion.getFileHistories()) { PartialFileHistory targetFileHistory = fullDatabaseVersionCache.getFileHistory(sourceFileHistory.getFileHistoryId()); fullDatabaseVersionCache.addFileHistory(sourceFileHistory.clone());
private long writeDatabaseVersion(Connection connection, DatabaseVersion databaseVersion) throws SQLException { long databaseVersionId = writeDatabaseVersionHeaderInternal(connection, databaseVersion.getHeader()); // TODO [low] Use writeDatabaseVersion()? writeVectorClock(connection, databaseVersionId, databaseVersion.getHeader().getVectorClock()); chunkDao.writeChunks(connection, databaseVersionId, databaseVersion.getChunks()); multiChunkDao.writeMultiChunks(connection, databaseVersionId, databaseVersion.getMultiChunks()); fileContentDao.writeFileContents(connection, databaseVersionId, databaseVersion.getFileContents()); fileHistoryDao.writeFileHistories(connection, databaseVersionId, databaseVersion.getFileHistories()); return databaseVersionId; }
boolean noDatabaseVersions = databaseVersion.isEmpty(); while (!databaseVersion.isEmpty()) { RemoteTransaction remoteTransaction = null; VectorClock newVectorClock = findNewVectorClock(); databaseVersion.setVectorClock(newVectorClock); databaseVersion.setTimestamp(new Date()); databaseVersion.setClient(config.getMachineName()); addMultiChunksToTransaction(remoteTransaction, databaseVersion.getMultiChunks()); logger.log(Level.INFO, "Persisting local SQL database (new database version {0}) ...", databaseVersion.getHeader().toString()); long newDatabaseVersionId = localDatabase.writeDatabaseVersion(databaseVersion);
public static DatabaseVersion createDatabaseVersion(DatabaseVersionHeader basedOnDatabaseVersionHeader, Date date) { VectorClock vectorClock = (basedOnDatabaseVersionHeader != null) ? basedOnDatabaseVersionHeader.getVectorClock().clone() : new VectorClock(); vectorClock.incrementClock("someclient"); DatabaseVersion databaseVersion = new DatabaseVersion(); databaseVersion.setClient("someclient"); databaseVersion.setTimestamp(date); databaseVersion.setVectorClock(vectorClock); return databaseVersion; } }
if (vectorClockInLoadRange) { database.addDatabaseVersion(databaseVersion); logger.log(Level.INFO, " + Added database version " + databaseVersion.getHeader()); vectorClockInLoadRange = vectorClockInRange(vectorClock, versionFrom, versionTo); databaseVersion.setVectorClock(vectorClock); vectorClock = null; databaseVersion.addFileContent(fileContent); fileContent = null; databaseVersion.addMultiChunk(multiChunk); multiChunk = null; databaseVersion.addFileHistory(fileHistory);
protected DatabaseVersion createDatabaseVersionFromRow(ResultSet resultSet, boolean excludeChunkData, int fileHistoryMaxCount) throws SQLException { DatabaseVersionHeader databaseVersionHeader = createDatabaseVersionHeaderFromRow(resultSet); DatabaseVersion databaseVersion = new DatabaseVersion(); databaseVersion.setHeader(databaseVersionHeader); // Add chunk/multichunk/filecontent data if (!excludeChunkData) { Map<ChunkChecksum, ChunkEntry> chunks = chunkDao.getChunks(databaseVersionHeader.getVectorClock()); Map<MultiChunkId, MultiChunkEntry> multiChunks = multiChunkDao.getMultiChunks(databaseVersionHeader.getVectorClock()); Map<FileChecksum, FileContent> fileContents = fileContentDao.getFileContents(databaseVersionHeader.getVectorClock()); for (ChunkEntry chunk : chunks.values()) { databaseVersion.addChunk(chunk); } for (MultiChunkEntry multiChunk : multiChunks.values()) { databaseVersion.addMultiChunk(multiChunk); } for (FileContent fileContent : fileContents.values()) { databaseVersion.addFileContent(fileContent); } } // Add file histories Map<FileHistoryId, PartialFileHistory> fileHistories = fileHistoryDao .getFileHistoriesWithFileVersions(databaseVersionHeader.getVectorClock(), fileHistoryMaxCount); for (PartialFileHistory fileHistory : fileHistories.values()) { databaseVersion.addFileHistory(fileHistory); } return databaseVersion; }
databaseVersion = new DatabaseVersion(); databaseVersion.setTimestamp(timeValue); databaseVersion.setClient(clientName); databaseVersion.addChunk(chunkEntry);
public Collection<PartialFileHistory> getFileHistories() { return fullDatabaseVersionCache.getFileHistories(); }
public MemoryDatabase() { databaseVersions = new ArrayList<DatabaseVersion>(); // Caches fullDatabaseVersionCache = new DatabaseVersion(); filenameHistoryCache = new HashMap<String, PartialFileHistory>(); databaseVersionIdCache = new HashMap<VectorClock, DatabaseVersion>(); contentChecksumFileHistoriesCache = new HashMap<FileChecksum, List<PartialFileHistory>>(); }
private void indexWithNewFiles(List<File> files, List<File> deletedFiles, Queue<DatabaseVersion> databaseVersionQueue) throws IOException { boolean isFirstFile = true; int filesCount = files.size(); while (!files.isEmpty()) { DatabaseVersion newDatabaseVersion = new DatabaseVersion(); // Create the DeduperListener that will receive MultiChunks and store them in the DatabaseVersion object DeduperListener deduperListener = new IndexerDeduperListener(newDatabaseVersion); // Signal the start of indexing if we are about to deduplicate the first file if (isFirstFile) { deduperListener.onStart(files.size()); removeDeletedFiles(newDatabaseVersion, deletedFiles); // Add deletions in first database version isFirstFile = false; } // Find and index new files deduper.deduplicate(files, deduperListener); if (!newDatabaseVersion.getFileHistories().isEmpty()) { logger.log(Level.FINE, "Processed new database version: " + newDatabaseVersion); databaseVersionQueue.offer(newDatabaseVersion); int remainingFilesCount = filesCount - files.size(); eventBus.post(new UpIndexMidSyncExternalEvent(config.getLocalDir().toString(), filesCount, remainingFilesCount)); } //else { (comment-only else case) // Just chunks and multichunks, no filehistory. Since this means the file was being // written/vanished during operations, it makes no sense to upload it. If the user // wants it indexed, Up can be run again. //} } }
for (PartialFileHistory fileHistory : databaseVersion.getFileHistories()) { FileVersion fileVersion = fileHistory.getLastVersion(); lightweightDatabaseVersion.setClient(databaseVersion.getHeader().getClient()); lightweightDatabaseVersion.setDate(databaseVersion.getHeader().getDate()); lightweightDatabaseVersion.setChangeSet(changedFiles);
/** * This methods takes a Map from {@link DatabaseRemoteFile}s to Lists of {@link DatabaseVersion}s and produces more or less * the reverse Map, which can be used to find the cached copy of a remote databasefile, given a {@link DatabaseVersionHeader}. * * @param remoteDatabaseHeaders mapping remote database files to the versions they contain. * @param databaseRemoteFilesInCache mapping files to the database remote file that is cached in it. * * @return databaseVersionLocations a Map from {@link DatabaseVersionHeader}s to the local File in which that version can be found. */ private Map<DatabaseVersionHeader, File> findDatabaseVersionLocations(Map<DatabaseRemoteFile, List<DatabaseVersion>> remoteDatabaseHeaders, Map<File, DatabaseRemoteFile> databaseRemoteFilesInCache) { Map<DatabaseVersionHeader, File> databaseVersionLocations = new HashMap<DatabaseVersionHeader, File>(); for (File databaseFile : databaseRemoteFilesInCache.keySet()) { DatabaseRemoteFile databaseRemoteFile = databaseRemoteFilesInCache.get(databaseFile); for (DatabaseVersion databaseVersion : remoteDatabaseHeaders.get(databaseRemoteFile)) { databaseVersionLocations.put(databaseVersion.getHeader(), databaseFile); } } return databaseVersionLocations; }
SQLException { DatabaseVersion deltaDatabaseVersion = newDatabaseVersion.clone(); File localDeltaDatabaseFile = config.getCache().getDatabaseFile(remoteDeltaDatabaseFile.getName()); logger.log(Level.INFO, "Saving local delta database, version {0} to file {1} ... ", new Object[] { deltaDatabaseVersion.getHeader(), localDeltaDatabaseFile });
private void updateDatabaseVersionIdCache(DatabaseVersion newDatabaseVersion) { databaseVersionIdCache.put(newDatabaseVersion.getVectorClock(), newDatabaseVersion); }
public Collection<MultiChunkEntry> getMultiChunks() { return fullDatabaseVersionCache.getMultiChunks(); }
public static void assertDatabaseVersionEquals(DatabaseVersion expectedDatabaseVersion, DatabaseVersion actualDatabaseVersion) { assertVectorClockEquals(expectedDatabaseVersion.getVectorClock(), actualDatabaseVersion.getVectorClock()); compareDatabaseVersionChunks(expectedDatabaseVersion.getChunks(), actualDatabaseVersion.getChunks()); compareDatabaseVersionMultiChunks(expectedDatabaseVersion.getMultiChunks(), actualDatabaseVersion.getMultiChunks()); compareDatabaseVersionFileContents(expectedDatabaseVersion.getFileContents(), actualDatabaseVersion.getFileContents()); compareDatabaseVersionFileHistories(expectedDatabaseVersion.getFileHistories(), actualDatabaseVersion.getFileHistories()); }
private void updateContentChecksumCache() { contentChecksumFileHistoriesCache.clear(); for (PartialFileHistory fullFileHistory : fullDatabaseVersionCache.getFileHistories()) { FileChecksum lastVersionChecksum = fullFileHistory.getLastVersion().getChecksum(); if (lastVersionChecksum != null) { List<PartialFileHistory> historiesWithVersionsWithSameChecksum = contentChecksumFileHistoriesCache.get(lastVersionChecksum); // Create if it does not exist if (historiesWithVersionsWithSameChecksum == null) { historiesWithVersionsWithSameChecksum = new ArrayList<PartialFileHistory>(); } // Add to cache historiesWithVersionsWithSameChecksum.add(fullFileHistory); contentChecksumFileHistoriesCache.put(lastVersionChecksum, historiesWithVersionsWithSameChecksum); } } }