ServerConstants.getBaseUris(master.getContext())) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId() + Constants.DEFAULT_TABLET_LOCATION; MetadataTableUtil.addTablet( new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir, master.getContext(), timeType, this.master.masterLock);
@Override public Repo<Master> call(long tid, Master environment) throws Exception { KeyExtent extent = new KeyExtent(tableInfo.getTableId(), null, null); MetadataTableUtil.addTablet(extent, tableInfo.defaultTabletDir, environment.getContext(), tableInfo.getTimeType(), environment.getMasterLock()); if (tableInfo.getInitialSplitSize() > 0) { SortedSet<Text> splits = Utils .getSortedSetFromFile(environment.getInputStream(tableInfo.getSplitFile()), true); SortedSet<Text> dirs = Utils .getSortedSetFromFile(environment.getInputStream(tableInfo.getSplitDirsFile()), false); Map<Text,Text> splitDirMap = createSplitDirectoryMap(splits, dirs); try (BatchWriter bw = environment.getContext().createBatchWriter("accumulo.metadata")) { writeSplitsToMetadataTable(environment.getContext(), tableInfo.getTableId(), splits, splitDirMap, tableInfo.getTimeType(), environment.getMasterLock(), bw); } } return new FinishCreateTable(tableInfo); }
@Override public Repo<Master> call(long tid, Master environment) throws Exception { KeyExtent extent = new KeyExtent(tableInfo.tableId, null, null); MetadataTableUtil.addTablet(extent, tableInfo.dir, environment, tableInfo.timeType, environment.getMasterLock()); return new FinishCreateTable(tableInfo); }
private void runSplitRecoveryTest(AccumuloServerContext context, int failPoint, String mr, int extentToSplit, ZooLock zl, KeyExtent... extents) throws Exception { Text midRow = new Text(mr); SortedMap<FileRef,DataFileValue> splitMapFiles = null; for (int i = 0; i < extents.length; i++) { KeyExtent extent = extents[i]; String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId() + "/dir_" + i; MetadataTableUtil.addTablet(extent, tdir, context, TabletTime.LOGICAL_TIME_ID, zl); SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<>(); mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i)); if (i == extentToSplit) { splitMapFiles = mapFiles; } int tid = 0; TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid); MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", context, zl); } KeyExtent extent = extents[extentToSplit]; KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow); KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow()); splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl); }
ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId() + Constants.DEFAULT_TABLET_LOCATION; MetadataTableUtil.addTablet( new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir, master, timeType, this.master.masterLock);