errorDir + " is not empty"); ZooArbitrator.start(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid); master.updateBulkImportStatus(sourceDir, BulkImportState.MOVING);
protected Arbitrator getArbitrator(ServerContext context) { Objects.nonNull(context); return new ZooArbitrator(context); }
ZooArbitrator.cleanup(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid);
+ " is not empty"); ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir + " is not empty"); ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid); master.updateBulkImportStatus(sourceDir, BulkImportState.MOVING);
private void runSplitRecoveryTest(AccumuloServerContext context, int failPoint, String mr, int extentToSplit, ZooLock zl, KeyExtent... extents) throws Exception { Text midRow = new Text(mr); SortedMap<FileRef,DataFileValue> splitMapFiles = null; for (int i = 0; i < extents.length; i++) { KeyExtent extent = extents[i]; String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId() + "/dir_" + i; MetadataTableUtil.addTablet(extent, tdir, context, TabletTime.LOGICAL_TIME_ID, zl); SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<>(); mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i)); if (i == extentToSplit) { splitMapFiles = mapFiles; } int tid = 0; TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid); MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", context, zl); } KeyExtent extent = extents[extentToSplit]; KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow); KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow()); splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl); }
@Override public void run() { // gather the list of transactions the tablets have cached final Set<Long> tids = new HashSet<>(); for (Tablet tablet : server.getOnlineTablets()) { tids.addAll(tablet.getBulkIngestedFiles().keySet()); } try { // get the current transactions from ZooKeeper final Set<Long> allTransactionsAlive = ZooArbitrator .allTransactionsAlive(Constants.BULK_ARBITRATOR_TYPE); // remove any that are still alive tids.removeAll(allTransactionsAlive); // cleanup any memory of these transactions for (Tablet tablet : server.getOnlineTablets()) { tablet.cleanupBulkLoadedFiles(tids); } } catch (KeeperException | InterruptedException e) { // we'll just clean it up again later log.debug("Error reading bulk import live transactions {}", e.toString()); } }
@Override public Repo<Master> call(long tid, Master master) throws Exception { master.updateBulkImportStatus(source, BulkImportState.CLEANUP); log.debug("removing the bulk processing flag file in " + bulk); Path bulkDir = new Path(bulk); MetadataTableUtil.removeBulkLoadInProgressFlag(master, "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName()); MetadataTableUtil.addDeleteEntry(master, tableId, bulkDir.toString()); log.debug("removing the metadata table markers for loaded files"); Connector conn = master.getConnector(); MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid); log.debug("releasing HDFS reservations for " + source + " and " + error); Utils.unreserveHdfsDirectory(source, tid); Utils.unreserveHdfsDirectory(error, tid); Utils.getReadLock(tableId, tid).unlock(); log.debug("completing bulk import transaction " + tid); ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid); master.removeBulkImportStatus(source); return null; } }
@Override public Repo<Master> call(long tid, Master master) throws Exception { log.debug("removing the bulk processing flag file in " + bulk); Path bulkDir = new Path(bulk); MetadataTable.removeBulkLoadInProgressFlag("/" + bulkDir.getParent().getName() + "/" + bulkDir.getName()); MetadataTable.addDeleteEntry(tableId, "/" + bulkDir.getName()); log.debug("removing the metadata table markers for loaded files"); Connector conn = master.getConnector(); MetadataTable.removeBulkLoadEntries(conn, tableId, tid); log.debug("releasing HDFS reservations for " + source + " and " + error); Utils.unreserveHdfsDirectory(source, tid); Utils.unreserveHdfsDirectory(error, tid); Utils.getReadLock(tableId, tid).unlock(); log.debug("completing bulk import transaction " + tid); ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid); return null; } }
public TransactionWatcher() { super(new ZooArbitrator()); } }
protected Arbitrator getArbitrator() { return new ZooArbitrator(); } }
@Override public void undo(long tid, Master environment) throws Exception { // unreserve source/error directories Utils.unreserveHdfsDirectory(sourceDir, tid); Utils.unreserveHdfsDirectory(errorDir, tid); Utils.getReadLock(tableId, tid).unlock(); ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid); } }
@Override public Repo<Master> call(long tid, Master master) throws Exception { ZooArbitrator.stop(Constants.BULK_ARBITRATOR_TYPE, tid); return new CopyFailed(tableId, source, bulk, error); } }
protected Arbitrator getArbitrator() { return new ZooArbitrator(); }
protected Arbitrator getArbitrator() { return new ZooArbitrator(); }
protected Arbitrator getArbitrator() { return new ZooArbitrator(); } }
public TransactionWatcher() { super(new ZooArbitrator()); } }
@Override public Repo<Master> call(long tid, Master master) throws Exception { ZooArbitrator.stop(Constants.BULK_ARBITRATOR_TYPE, tid); return new CopyFailed(tableId, source, bulk, error); } }
@Override public Repo<Master> call(long tid, Master master) throws Exception { master.updateBulkImportStatus(source, BulkImportState.CLEANUP); log.debug("removing the bulkDir processing flag file in " + bulk); Path bulkDir = new Path(bulk); MetadataTableUtil.removeBulkLoadInProgressFlag(master.getContext(), "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName()); MetadataTableUtil.addDeleteEntry(master.getContext(), tableId, bulkDir.toString()); log.debug("removing the metadata table markers for loaded files"); AccumuloClient client = master.getContext(); MetadataTableUtil.removeBulkLoadEntries(client, tableId, tid); log.debug("releasing HDFS reservations for " + source + " and " + error); Utils.unreserveHdfsDirectory(master, source, tid); Utils.unreserveHdfsDirectory(master, error, tid); Utils.getReadLock(master, tableId, tid).unlock(); log.debug("completing bulkDir import transaction " + tid); ZooArbitrator.cleanup(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid); master.removeBulkImportStatus(source); return null; } }
@Override public void run() { // gather the list of transactions the tablets have cached final Set<Long> tids = new HashSet<>(); for (Tablet tablet : server.getOnlineTablets()) { tids.addAll(tablet.getBulkIngestedFiles().keySet()); } try { // get the current transactions from ZooKeeper final Set<Long> allTransactionsAlive = ZooArbitrator.allTransactionsAlive(server.getContext(), Constants.BULK_ARBITRATOR_TYPE); // remove any that are still alive tids.removeAll(allTransactionsAlive); // cleanup any memory of these transactions for (Tablet tablet : server.getOnlineTablets()) { tablet.cleanupBulkLoadedFiles(tids); } } catch (KeeperException | InterruptedException e) { // we'll just clean it up again later log.debug("Error reading bulk import live transactions {}", e); } }