@Override public Repo<Master> call(long tid, Master master) throws Exception { master.updateBulkImportStatus(source, BulkImportState.CLEANUP); log.debug("removing the bulkDir processing flag file in " + bulk); Path bulkDir = new Path(bulk); MetadataTableUtil.removeBulkLoadInProgressFlag(master.getContext(), "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName()); MetadataTableUtil.addDeleteEntry(master.getContext(), tableId, bulkDir.toString()); log.debug("removing the metadata table markers for loaded files"); AccumuloClient client = master.getContext(); MetadataTableUtil.removeBulkLoadEntries(client, tableId, tid); log.debug("releasing HDFS reservations for " + source + " and " + error); Utils.unreserveHdfsDirectory(master, source, tid); Utils.unreserveHdfsDirectory(master, error, tid); Utils.getReadLock(master, tableId, tid).unlock(); log.debug("completing bulkDir import transaction " + tid); ZooArbitrator.cleanup(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid); master.removeBulkImportStatus(source); return null; } }
ZooArbitrator.cleanup(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid);
@Override public void undo(long tid, Master environment) throws Exception { // unreserve sourceDir/error directories Utils.unreserveHdfsDirectory(environment, bulkInfo.sourceDir, tid); Utils.getReadLock(environment, bulkInfo.tableId, tid).unlock(); TransactionWatcher.ZooArbitrator.cleanup(environment.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid); } }
@Override public Repo<Master> call(long tid, Master master) throws Exception { master.updateBulkImportStatus(source, BulkImportState.CLEANUP); log.debug("removing the bulk processing flag file in " + bulk); Path bulkDir = new Path(bulk); MetadataTableUtil.removeBulkLoadInProgressFlag(master, "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName()); MetadataTableUtil.addDeleteEntry(master, tableId, bulkDir.toString()); log.debug("removing the metadata table markers for loaded files"); Connector conn = master.getConnector(); MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid); log.debug("releasing HDFS reservations for " + source + " and " + error); Utils.unreserveHdfsDirectory(source, tid); Utils.unreserveHdfsDirectory(error, tid); Utils.getReadLock(tableId, tid).unlock(); log.debug("completing bulk import transaction " + tid); ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid); master.removeBulkImportStatus(source); return null; } }
@Override public Repo<Master> call(long tid, Master master) throws Exception { log.debug("removing the bulk processing flag file in " + bulk); Path bulkDir = new Path(bulk); MetadataTable.removeBulkLoadInProgressFlag("/" + bulkDir.getParent().getName() + "/" + bulkDir.getName()); MetadataTable.addDeleteEntry(tableId, "/" + bulkDir.getName()); log.debug("removing the metadata table markers for loaded files"); Connector conn = master.getConnector(); MetadataTable.removeBulkLoadEntries(conn, tableId, tid); log.debug("releasing HDFS reservations for " + source + " and " + error); Utils.unreserveHdfsDirectory(source, tid); Utils.unreserveHdfsDirectory(error, tid); Utils.getReadLock(tableId, tid).unlock(); log.debug("completing bulk import transaction " + tid); ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid); return null; } }
@Override public void undo(long tid, Master environment) throws Exception { // unreserve source/error directories Utils.unreserveHdfsDirectory(sourceDir, tid); Utils.unreserveHdfsDirectory(errorDir, tid); Utils.getReadLock(tableId, tid).unlock(); ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid); } }
@Override public void undo(long tid, Master environment) throws Exception { // unreserve source/error directories Utils.unreserveHdfsDirectory(environment, sourceDir, tid); Utils.unreserveHdfsDirectory(environment, errorDir, tid); Utils.getReadLock(environment, tableId, tid).unlock(); ZooArbitrator.cleanup(environment.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid); } }