@Override public synchronized Range getRange() { ensureOpen(); return range; }
/** * During an upgrade we need to move deletion requests for files under the !METADATA table to the * root tablet. */ public static void moveMetaDeleteMarkers(ServerContext context) { String oldDeletesPrefix = "!!~del"; Range oldDeletesRange = new Range(oldDeletesPrefix, true, "!!~dem", false); // move old delete markers to new location, to standardize table schema between all metadata // tables try (Scanner scanner = new ScannerImpl(context, RootTable.ID, Authorizations.EMPTY)) { scanner.setRange(oldDeletesRange); for (Entry<Key,Value> entry : scanner) { String row = entry.getKey().getRow().toString(); if (row.startsWith(oldDeletesPrefix)) { moveDeleteEntry(context, RootTable.OLD_EXTENT, entry, row, oldDeletesPrefix); } else { break; } } } }
try (ScannerImpl scanner2 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) { scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW))); if (!scanner2.iterator().hasNext()) { log.info("Rolling back incomplete split {} {}", metadataEntry, metadataPrevEndRow); MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<>(); try (Scanner scanner3 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) { Key rowKey = new Key(metadataEntry);
try (ScannerImpl scanner = new ScannerImpl(context, tableToVerify, Authorizations.EMPTY)) { scanner.setRange(extent.toMetadataRange()); for (Entry<Key,Value> entry : scanner) tkv.put(entry.getKey(), entry.getValue());
@Override public synchronized Iterator<Entry<Key,Value>> iterator() { ensureOpen(); ScannerIterator iter = new ScannerIterator(context, tableId, authorizations, range, size, getTimeout(TimeUnit.SECONDS), this, isolated, readaheadThreshold, new Reporter()); iters.put(iter, iterCount++); return iter; }
@Override public Scanner createScanner(String tableName, Authorizations authorizations) throws TableNotFoundException { checkArgument(tableName != null, "tableName is null"); checkArgument(authorizations != null, "authorizations is null"); ensureOpen(); Scanner scanner = new ScannerImpl(this, getTableId(tableName), authorizations); Integer batchSize = ClientProperty.SCANNER_BATCH_SIZE.getInteger(getProperties()); if (batchSize != null) { scanner.setBatchSize(batchSize); } return scanner; }
public static Map<Long,? extends Collection<FileRef>> getBulkFilesLoaded(ServerContext context, KeyExtent extent) { Text metadataRow = extent.getMetadataEntry(); Map<Long,List<FileRef>> result = new HashMap<>(); VolumeManager fs = context.getVolumeManager(); try (Scanner scanner = new ScannerImpl(context, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY)) { scanner.setRange(new Range(metadataRow)); scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME); for (Entry<Key,Value> entry : scanner) { Long tid = Long.parseLong(entry.getValue().toString()); List<FileRef> lst = result.get(tid); if (lst == null) { result.put(tid, lst = new ArrayList<>()); } lst.add(new FileRef(fs, entry.getKey())); } } return result; }
@Override public synchronized void disableIsolation() { ensureOpen(); this.isolated = false; }
public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, ServerContext context) { TreeMap<FileRef,DataFileValue> sizes = new TreeMap<>(); try (Scanner mdScanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) { mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME); Text row = extent.getMetadataEntry(); Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text("")); endKey = endKey.followingKey(PartialKey.ROW_COLFAM); mdScanner.setRange(new Range(new Key(row), endKey)); for (Entry<Key,Value> entry : mdScanner) { if (!entry.getKey().getRow().equals(row)) break; DataFileValue dfv = new DataFileValue(entry.getValue().get()); sizes.put(new FileRef(context.getVolumeManager(), entry.getKey()), dfv); } return sizes; } }
@Override public synchronized long getReadaheadThreshold() { ensureOpen(); return readaheadThreshold; }
public static void moveMetaDeleteMarkersFrom14(ServerContext context) { // new KeyExtent is only added to force update to write to the metadata table, not the root // table KeyExtent notMetadata = new KeyExtent(Table.ID.of("anythingNotMetadata"), null, null); // move delete markers from the normal delete keyspace to the root tablet delete keyspace if the // files are for the !METADATA table try (Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) { scanner.setRange(MetadataSchema.DeletesSection.getRange()); for (Entry<Key,Value> entry : scanner) { String row = entry.getKey().getRow().toString(); if (row.startsWith(MetadataSchema.DeletesSection.getRowPrefix() + "/" + MetadataTable.ID)) { moveDeleteEntry(context, notMetadata, entry, row, MetadataSchema.DeletesSection.getRowPrefix()); } else { break; } } } }
@Override public synchronized void enableIsolation() { ensureOpen(); this.isolated = true; }
private static Scanner getTabletLogScanner(ServerContext context, KeyExtent extent) { Table.ID tableId = MetadataTable.ID; if (extent.isMeta()) tableId = RootTable.ID; Scanner scanner = new ScannerImpl(context, tableId, Authorizations.EMPTY); scanner.fetchColumnFamily(LogColumnFamily.NAME); Text start = extent.getMetadataEntry(); Key endKey = new Key(start, LogColumnFamily.NAME); endKey = endKey.followingKey(PartialKey.ROW_COLFAM); scanner.setRange(new Range(new Key(start), endKey)); return scanner; }
@Override public synchronized int getBatchSize() { ensureOpen(); return size; }
scanner = new ScannerImpl(client, Table.ID.of(split.getTableId()), authorizations);
@Override public Authorizations getAuthorizations() { ensureOpen(); return authorizations; }
authorizations); } else { scanner = new ScannerImpl(client, Table.ID.of(baseSplit.getTableId()), authorizations);
@Override public synchronized void setBatchSize(int size) { ensureOpen(); if (size > 0) this.size = size; else throw new IllegalArgumentException("size must be greater than zero"); }
public static void deleteTable(Table.ID tableId, boolean insertDeletes, ServerContext context, ZooLock lock) throws AccumuloException { try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY); BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000)
@Override public synchronized void setReadaheadThreshold(long batches) { ensureOpen(); if (batches < 0) { throw new IllegalArgumentException( "Number of batches before read-ahead must be non-negative"); } readaheadThreshold = batches; }