/** * This methods takes a Map from {@link DatabaseRemoteFile}s to Lists of {@link DatabaseVersion}s and produces more or less * the reverse Map, which can be used to find the cached copy of a remote databasefile, given a {@link DatabaseVersionHeader}. * * @param remoteDatabaseHeaders mapping remote database files to the versions they contain. * @param databaseRemoteFilesInCache mapping files to the database remote file that is cached in it. * * @return databaseVersionLocations a Map from {@link DatabaseVersionHeader}s to the local File in which that version can be found. */ private Map<DatabaseVersionHeader, File> findDatabaseVersionLocations(Map<DatabaseRemoteFile, List<DatabaseVersion>> remoteDatabaseHeaders, Map<File, DatabaseRemoteFile> databaseRemoteFilesInCache) { Map<DatabaseVersionHeader, File> databaseVersionLocations = new HashMap<DatabaseVersionHeader, File>(); for (File databaseFile : databaseRemoteFilesInCache.keySet()) { DatabaseRemoteFile databaseRemoteFile = databaseRemoteFilesInCache.get(databaseFile); for (DatabaseVersion databaseVersion : remoteDatabaseHeaders.get(databaseRemoteFile)) { databaseVersionLocations.put(databaseVersion.getHeader(), databaseFile); } } return databaseVersionLocations; }
private void writeDatabaseVersionHeader(IndentXmlStreamWriter xmlOut, DatabaseVersion databaseVersion) throws IOException, XMLStreamException { if (databaseVersion.getTimestamp() == null || databaseVersion.getClient() == null || databaseVersion.getVectorClock() == null || databaseVersion.getVectorClock().isEmpty()) { logger.log(Level.SEVERE, "Cannot write database version. Header fields must be filled: "+databaseVersion.getHeader()); throw new IOException("Cannot write database version. Header fields must be filled: "+databaseVersion.getHeader()); } xmlOut.writeStartElement("header"); xmlOut.writeEmptyElement("time"); xmlOut.writeAttribute("value", databaseVersion.getTimestamp().getTime()); xmlOut.writeEmptyElement("client"); xmlOut.writeAttribute("name", databaseVersion.getClient()); xmlOut.writeStartElement("vectorClock"); VectorClock vectorClock = databaseVersion.getVectorClock(); for (Map.Entry<String, Long> vectorClockEntry : vectorClock.entrySet()) { xmlOut.writeEmptyElement("client"); xmlOut.writeAttribute("name", vectorClockEntry.getKey()); xmlOut.writeAttribute("value", vectorClockEntry.getValue()); } xmlOut.writeEndElement(); // </vectorClock> xmlOut.writeEndElement(); // </header> }
if (vectorClockInLoadRange) { database.addDatabaseVersion(databaseVersion); logger.log(Level.INFO, " + Added database version " + databaseVersion.getHeader());
/** * This methods iterates over all {@link DatabaseVersion}s that are dirty. Dirty means that they are not in the winning * branch. All data which is contained in these dirty DatabaseVersions is added to the newDatabaseVersion, so that it * is included in the new Up. Note that only metadata is reuploaded, the actual multichunks are still in the repository. * * @param newDatabaseVersion {@link DatabaseVersion} to which dirty data should be added. */ private void addDirtyData(DatabaseVersion newDatabaseVersion) { Iterator<DatabaseVersion> dirtyDatabaseVersions = localDatabase.getDirtyDatabaseVersions(); if (!dirtyDatabaseVersions.hasNext()) { logger.log(Level.INFO, "No DIRTY data found in database (no dirty databases); Nothing to do here."); } else { logger.log(Level.INFO, "Adding DIRTY data to new database version: "); while (dirtyDatabaseVersions.hasNext()) { DatabaseVersion dirtyDatabaseVersion = dirtyDatabaseVersions.next(); logger.log(Level.INFO, "- Adding chunks/multichunks/filecontents from database version " + dirtyDatabaseVersion.getHeader()); for (ChunkEntry chunkEntry : dirtyDatabaseVersion.getChunks()) { newDatabaseVersion.addChunk(chunkEntry); } for (MultiChunkEntry multiChunkEntry : dirtyDatabaseVersion.getMultiChunks()) { newDatabaseVersion.addMultiChunk(multiChunkEntry); } for (FileContent fileContent : dirtyDatabaseVersion.getFileContents()) { newDatabaseVersion.addFileContent(fileContent); } } } }
public static DatabaseVersion createDatabaseVersion(DatabaseVersion basedOnDatabaseVersion) { if (basedOnDatabaseVersion == null) { return createDatabaseVersion(null, new Date()); } else { return createDatabaseVersion(basedOnDatabaseVersion.getHeader(), new Date()); } }
/** * This methods takes a Map containing DatabaseVersions (headers only) and loads these headers into {@link DatabaseBranches}. * In addition, the local branch is added to this. The resulting DatabaseBranches will contain all headers exactly once, * for the client that created that version. * * @param localBranch {@link DatabaseBranch} containing the locally known headers. * @param remoteDatabaseHeaders Map from {@link DatabaseRemoteFile}s (important for client names) to the {@link DatabaseVersion}s that are * contained in these files. * * @return DatabaseBranches filled with all the headers that originated from either of the parameters. */ private DatabaseBranches populateDatabaseBranches(DatabaseBranch localBranch, SortedMap<DatabaseRemoteFile, List<DatabaseVersion>> remoteDatabaseHeaders) { DatabaseBranches allBranches = new DatabaseBranches(); allBranches.put(config.getMachineName(), localBranch.clone()); for (DatabaseRemoteFile remoteDatabaseFile : remoteDatabaseHeaders.keySet()) { // Populate branches DatabaseBranch remoteClientBranch = allBranches.getBranch(remoteDatabaseFile.getClientName(), true); for (DatabaseVersion remoteDatabaseVersion : remoteDatabaseHeaders.get(remoteDatabaseFile)) { DatabaseVersionHeader header = remoteDatabaseVersion.getHeader(); remoteClientBranch.add(header); } } logger.log(Level.INFO, "Populated unknown branches: " + allBranches); return allBranches; }
private long writeDatabaseVersion(Connection connection, DatabaseVersion databaseVersion) throws SQLException { long databaseVersionId = writeDatabaseVersionHeaderInternal(connection, databaseVersion.getHeader()); // TODO [low] Use writeDatabaseVersion()? writeVectorClock(connection, databaseVersionId, databaseVersion.getHeader().getVectorClock()); chunkDao.writeChunks(connection, databaseVersionId, databaseVersion.getChunks()); multiChunkDao.writeMultiChunks(connection, databaseVersionId, databaseVersion.getMultiChunks()); fileContentDao.writeFileContents(connection, databaseVersionId, databaseVersion.getFileContents()); fileHistoryDao.writeFileHistories(connection, databaseVersionId, databaseVersion.getFileHistories()); return databaseVersionId; }
private void applyChangesAndPersistDatabase(MemoryDatabase winnersDatabase, boolean cleanupOccurred, List<PartialFileHistory> preDeleteFileHistoriesWithLastVersion) throws Exception { if (options.isApplyChanges()) { new ApplyChangesOperation(config, localDatabase, transferManager, winnersDatabase, result, cleanupOccurred, preDeleteFileHistoriesWithLastVersion).execute(); } else { logger.log(Level.INFO, "Doing nothing on the file system, because --no-apply switched on"); } // We only persist the versions that we have already applied. DatabaseBranch currentApplyBranch = new DatabaseBranch(); for (DatabaseVersion databaseVersion : winnersDatabase.getDatabaseVersions()) { currentApplyBranch.add(databaseVersion.getHeader()); } persistDatabaseVersions(currentApplyBranch, winnersDatabase); localDatabase.commit(); }
logger.log(Level.INFO, "Persisting local SQL database (new database version {0}) ...", databaseVersion.getHeader().toString()); long newDatabaseVersionId = localDatabase.writeDatabaseVersion(databaseVersion);
File localDeltaDatabaseFile = config.getCache().getDatabaseFile(remoteDeltaDatabaseFile.getName()); logger.log(Level.INFO, "Saving local delta database, version {0} to file {1} ... ", new Object[] { deltaDatabaseVersion.getHeader(), localDeltaDatabaseFile });
@Override public DatabaseVersion clone() { DatabaseVersion clonedDatabaseVersion = new DatabaseVersion(); clonedDatabaseVersion.setHeader(getHeader()); for (ChunkEntry chunkEntry : getChunks()) { clonedDatabaseVersion.addChunk(chunkEntry); } for (MultiChunkEntry multiChunkEntry : getMultiChunks()) { clonedDatabaseVersion.addMultiChunk(multiChunkEntry); } for (FileContent fileContent : getFileContents()) { clonedDatabaseVersion.addFileContent(fileContent); } for (PartialFileHistory fileHistory : getFileHistories()) { clonedDatabaseVersion.addFileHistory(fileHistory); } return clonedDatabaseVersion; }