MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, lock); return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper)); MetadataTableUtil.splitDatafiles(metadataPrevEndRow, splitRatio, new HashMap<>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove); MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, context, lock);
public static void addDeleteEntry(ServerContext context, Table.ID tableId, String path) { update(context, createDeleteMutation(context, tableId, path), new KeyExtent(tableId, null, null)); }
@Override public void undo(long tid, Master environment) throws Exception { MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment.getContext(), environment.getMasterLock()); }
public static void update(ServerContext context, ZooLock zooLock, Mutation m, KeyExtent extent) { Writer t = extent.isMeta() ? getRootTable(context) : getMetadataTable(context); update(context, t, zooLock, m); }
private static void listZookeeper(ServerContext context) throws Exception { System.out.println("Listing volumes referenced in zookeeper"); TreeSet<String> volumes = new TreeSet<>(); volumes.add(getTableURI(MetadataTableUtil.getRootTabletDir(context))); ArrayList<LogEntry> result = new ArrayList<>(); MetadataTableUtil.getRootLogEntries(context, result); for (LogEntry logEntry : result) { getLogURIs(volumes, logEntry); } for (String volume : volumes) System.out.println("\tVolume : " + volume); }
List<FileRef> highDatafilesToRemove = new ArrayList<>(); MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), mapFiles, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove); MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, context, zl); TServerInstance instance = new TServerInstance(location, zl.getSessionId()); Writer writer = MetadataTableUtil.getMetadataTable(context); Assignment assignment = new Assignment(high, instance); Mutation m = new Mutation(assignment.tablet.getMetadataEntry()); .getBulkFilesLoaded(context, extent); MasterMetadataUtil.addNewTablet(context, low, "/lowDir", instance, lowDatafileSizes, bulkFiles, TabletTime.LOGICAL_TIME_ID + "0", -1l, -1l, zl); MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, context, zl); .getBulkFilesLoaded(context, low); Map<Long,? extends Collection<FileRef>> highBulkFiles = MetadataTableUtil .getBulkFilesLoaded(context, high);
List<FileRef> highDatafilesToRemove = new ArrayList<>(); MetadataTableUtil.splitDatafiles(midRow, splitRatio, firstAndLastRows, getDatafileManager().getDatafileSizes(), lowDatafileSizes, highDatafileSizes, highDatafilesToRemove); MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, getTabletServer().getContext(), getTabletServer().getLock()); MasterMetadataUtil.addNewTablet(getTabletServer().getContext(), low, lowDirectory, getTabletServer().getTabletSession(), lowDatafileSizes, getBulkIngestedFiles(), time, lastFlushID, lastCompactID, getTabletServer().getLock()); MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, getTabletServer().getContext(), getTabletServer().getLock());
MetadataTableUtil.initializeClone(tableName, "0", "1", conn, bw2); int rc = MetadataTableUtil.checkClone(tableName, "0", "1", conn, bw2); rc = MetadataTableUtil.checkClone(tableName, "0", "1", conn, bw2);
initializeClone(null, srcTableId, tableId, context, bw); int rewrites = checkClone(null, srcTableId, tableId, context, bw); deleteTable(tableId, false, context, null);
public static void replaceDatafiles(ServerContext context, KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId, DataFileValue size, String address, TServerInstance lastLocation, ZooLock zooLock, boolean insertDeleteFlags) { if (insertDeleteFlags) { // add delete flags for those paths before the data file reference is removed MetadataTableUtil.addDeleteEntries(extent, datafilesToDelete, context); } // replace data file references to old mapfiles with the new mapfiles Mutation m = new Mutation(extent.getMetadataEntry()); for (FileRef pathToRemove : datafilesToDelete) m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta()); for (FileRef scanFile : scanFiles) m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0])); if (size.getNumEntries() > 0) m.put(DataFileColumnFamily.NAME, path.meta(), new Value(size.encode())); if (compactionId != null) TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes())); TServerInstance self = getTServerInstance(address, zooLock); self.putLastLocation(m); // remove the old location if (lastLocation != null && !lastLocation.equals(self)) lastLocation.clearLastLocation(m); MetadataTableUtil.update(context, zooLock, m, extent); }
datafiles.add(new FileRef(this.master.fs, key)); if (datafiles.size() > 1000) { MetadataTableUtil.addDeleteEntries(extent, datafiles, master.getContext()); datafiles.clear(); MetadataTableUtil.addDeleteEntries(extent, datafiles, master.getContext()); datafiles.clear(); MetadataTableUtil.addDeleteEntries(extent, datafiles, master.getContext()); BatchWriter bw = client.createBatchWriter(targetSystemTable, new BatchWriterConfig()); try { ServerConstants.getBaseUris(master.getContext())) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId() + Constants.DEFAULT_TABLET_LOCATION; MetadataTableUtil.addTablet( new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir, master.getContext(), timeType, this.master.masterLock);
/** * During an upgrade from 1.6 to 1.7, we need to add the replication table */ public static void createReplicationTable(ServerContext context) { VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ReplicationTable.ID, context); String dir = context.getVolumeManager().choose(chooserEnv, ServerConstants.getBaseUris(context)) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID + Constants.DEFAULT_TABLET_LOCATION; Mutation m = new Mutation(new Text(TabletsSection.getRow(ReplicationTable.ID, null))); m.put(DIRECTORY_COLUMN.getColumnFamily(), DIRECTORY_COLUMN.getColumnQualifier(), 0, new Value(dir.getBytes(UTF_8))); m.put(TIME_COLUMN.getColumnFamily(), TIME_COLUMN.getColumnQualifier(), 0, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(UTF_8))); m.put(PREV_ROW_COLUMN.getColumnFamily(), PREV_ROW_COLUMN.getColumnQualifier(), 0, KeyExtent.encodePrevEndRow(null)); update(context, getMetadataTable(context), null, m); }
public TabletData(ServerContext context, VolumeManager fs, AccumuloConfiguration conf) throws IOException { directory = VolumeUtil.switchRootTableVolume(context, MetadataTableUtil.getRootTabletDir(context)); logEntries = MetadataTableUtil.getLogEntries(context, RootTable.EXTENT); } catch (Exception ex) { throw new RuntimeException("Unable to read tablet log entries", ex);
bw.addMutation(createDeleteMutation(context, tableId, ref.meta().toString())); bw.addMutation(createDeleteMutation(context, tableId, cell.getValue().toString())); m = new Mutation(key.getRow()); if (lock != null) putLockID(context, lock, m); m = new Mutation(key.getRow()); if (lock != null) putLockID(context, lock, m);
private void runSplitRecoveryTest(AccumuloServerContext context, int failPoint, String mr, int extentToSplit, ZooLock zl, KeyExtent... extents) throws Exception { Text midRow = new Text(mr); SortedMap<FileRef,DataFileValue> splitMapFiles = null; for (int i = 0; i < extents.length; i++) { KeyExtent extent = extents[i]; String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId() + "/dir_" + i; MetadataTableUtil.addTablet(extent, tdir, context, TabletTime.LOGICAL_TIME_ID, zl); SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<>(); mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i)); if (i == extentToSplit) { splitMapFiles = mapFiles; } int tid = 0; TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid); MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", context, zl); } KeyExtent extent = extents[extentToSplit]; KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow); KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow()); splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl); }
void removeFromMetadataTable(ServerContext context) throws Exception { Mutation m = new Mutation(new Text("~err_" + tableId)); m.putDelete(new Text(problemType.name()), new Text(resource)); MetadataTableUtil.getMetadataTable(context).update(m); }
public static void finishSplit(KeyExtent extent, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, ServerContext context, ZooLock zooLock) { finishSplit(extent.getMetadataEntry(), datafileSizes, highDatafilesToRemove, context, zooLock); }
@Override public Repo<Master> call(long tid, Master environment) throws Exception { LoggerFactory.getLogger(CloneMetadata.class) .info(String.format("Cloning %s with tableId %s from srcTableId %s", cloneInfo.tableName, cloneInfo.tableId, cloneInfo.srcTableId)); // need to clear out any metadata entries for tableId just in case this // died before and is executing again MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment.getContext(), environment.getMasterLock()); MetadataTableUtil.cloneTable(environment.getContext(), cloneInfo.srcTableId, cloneInfo.tableId, environment.getFileSystem()); return new FinishCloneTable(cloneInfo); }
MetadataTableUtil.addDeleteEntries(tablet.getExtent(), Collections.singleton(absMergeFile), tablet.getContext());
LogEntryIterator(ServerContext context) throws IOException, KeeperException, InterruptedException { zookeeperEntries = getLogEntries(context, RootTable.EXTENT).iterator(); rootTableEntries = getLogEntries(context, new KeyExtent(MetadataTable.ID, null, null)) .iterator(); try { Scanner scanner = context.createScanner(MetadataTable.NAME, Authorizations.EMPTY); log.info("Setting range to {}", MetadataSchema.TabletsSection.getRange()); scanner.setRange(MetadataSchema.TabletsSection.getRange()); scanner.fetchColumnFamily(LogColumnFamily.NAME); metadataEntries = scanner.iterator(); } catch (Exception ex) { throw new IOException(ex); } }