Table.ID.of(tid)); if (outOfBoundsTablets == null) { continue; for (TabletMigration migration : migrationsFromLastPass.values()) { TableInfo fromInfo = getTableInfo(currentCopy, serverTableIdCopied, migration.tablet.getTableId().toString(), migration.oldServer); if (fromInfo != null) { fromInfo.setOnlineTablets(fromInfo.getOnlineTablets() - 1); migration.tablet.getTableId().toString(), migration.newServer); if (toInfo != null) { toInfo.setOnlineTablets(toInfo.getOnlineTablets() + 1); Table.ID tableId = Table.ID.of(s); String tableName = tableIdToTableName.get(tableId); String regexTableName = getPoolNameForTable(tableName);
public static String getPrintableTableInfoFromName(ClientContext context, String tableName) { Table.ID tableId = null; try { tableId = getTableId(context, tableName); } catch (TableNotFoundException e) { // handled in the string formatting } return tableId == null ? String.format("%s(?)", tableName) : String.format("%s(ID:%s)", tableName, tableId.canonicalID()); }
/** * @return Examine current tserver and migrations and return true if balancing should occur. */ protected boolean shouldBalance(SortedMap<TServerInstance,TabletServerStatus> current, Set<KeyExtent> migrations) { if (current.size() < 2) { return false; } for (KeyExtent keyExtent : migrations) { if (keyExtent.getTableId().equals(tableId)) { return false; } } return true; }
if (new Text(row).compareTo(new Text(MetadataTable.ID.getUtf8())) < 0) { violations = addViolation(violations, 5);
tableId = Table.ID.of(colq.toString()); } else if (WorkSection.NAME.equals(colf)) { ReplicationTarget target = ReplicationTarget.from(colq); entry.getValue(), row); Mutation orderMutation = OrderSection.createMutation(row.toString(), entry.getValue()); orderMutation.putDelete(OrderSection.NAME, new Text(entry.getKey().getUtf8())); mutations.add(orderMutation);
String tableID = ref.id.toString(); String dir = ref.ref; if (!dir.contains(":")) {
long createdTime = setAndGetCreatedTime(new Path(file.toString()), tableId.toString()); stat = Status.newBuilder(stat).setCreatedTime(createdTime).build(); value = ProtobufUtil.toValue(stat);
private void lookupTabletLocation(ClientContext context, Text row, boolean retry, LockCheckerSession lcSession) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { Text metadataRow = new Text(tableId.getUtf8()); metadataRow.append(new byte[] {';'}, 0, 1); metadataRow.append(row.getBytes(), 0, row.getLength());
_flush(tableId, start, end, true); List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.getUtf8()), start == null ? EMPTY : TextUtil.getByteBuffer(start), end == null ? EMPTY : TextUtil.getByteBuffer(end),
AccumuloClient client = master.getContext(); final Text tableId = new Text(getTableId(master.getContext(), tableName).getUtf8());
@Override public void clone(String srcTableName, String newTableName, boolean flush, Map<String,String> propertiesToSet, Set<String> propertiesToExclude) throws AccumuloSecurityException, TableNotFoundException, AccumuloException, TableExistsException { checkArgument(srcTableName != null, "srcTableName is null"); checkArgument(newTableName != null, "newTableName is null"); Table.ID srcTableId = Tables.getTableId(context, srcTableName); if (flush) _flush(srcTableId, null, null, true); if (propertiesToExclude == null) propertiesToExclude = Collections.emptySet(); if (propertiesToSet == null) propertiesToSet = Collections.emptyMap(); List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(srcTableId.getUtf8()), ByteBuffer.wrap(newTableName.getBytes(UTF_8))); Map<String,String> opts = new HashMap<>(); for (Entry<String,String> entry : propertiesToSet.entrySet()) { if (entry.getKey().startsWith(CLONE_EXCLUDE_PREFIX)) throw new IllegalArgumentException("Property can not start with " + CLONE_EXCLUDE_PREFIX); opts.put(entry.getKey(), entry.getValue()); } for (String prop : propertiesToExclude) { opts.put(CLONE_EXCLUDE_PREFIX + prop, ""); } doTableFateOperation(newTableName, AccumuloException.class, FateOperation.TABLE_CLONE, args, opts); }
@Override public void online(String tableName, boolean wait) throws AccumuloSecurityException, AccumuloException, TableNotFoundException { checkArgument(tableName != null, "tableName is null"); Table.ID tableId = Tables.getTableId(context, tableName); /** * ACCUMULO-4574 if table is already online return without executing fate operation. */ TableState expectedState = Tables.getTableState(context, tableId, true); if (expectedState == TableState.ONLINE) { if (wait) waitForTableStateTransition(tableId, TableState.ONLINE); return; } List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.getUtf8())); Map<String,String> opts = new HashMap<>(); try { doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_ONLINE, args, opts); } catch (TableExistsException e) { // should not happen throw new AssertionError(e); } if (wait) waitForTableStateTransition(tableId, TableState.ONLINE); }
protected Set<Integer> consumeWalPrefix(ReplicationTarget target, DataInputStream wal, Status status) throws IOException { Set<Integer> tids = new HashSet<>(); LogFileKey key = new LogFileKey(); LogFileValue value = new LogFileValue(); Set<Integer> desiredTids = new HashSet<>(); // Read through the stuff we've already processed in a previous replication attempt // We also need to track the tids that occurred earlier in the file as mutations // later on might use that tid for (long i = 0; i < status.getBegin(); i++) { key.readFields(wal); value.readFields(wal); switch (key.event) { case DEFINE_TABLET: if (target.getSourceTableId().equals(key.tablet.getTableId())) { desiredTids.add(key.tabletId); } break; default: break; } } return tids; }
private KeyExtent getHighTablet(KeyExtent range) throws AccumuloException { try { AccumuloClient client = this.master.getContext(); Scanner scanner = client.createScanner(range.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY); TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner); KeyExtent start = new KeyExtent(range.getTableId(), range.getEndRow(), null); scanner.setRange(new Range(start.getMetadataEntry(), null)); Iterator<Entry<Key,Value>> iterator = scanner.iterator(); if (!iterator.hasNext()) { throw new AccumuloException("No last tablet for a merge " + range); } Entry<Key,Value> entry = iterator.next(); KeyExtent highTablet = new KeyExtent(entry.getKey().getRow(), KeyExtent.decodePrevEndRow(entry.getValue())); if (!highTablet.getTableId().equals(range.getTableId())) { throw new AccumuloException("No last tablet for merge " + range + " " + highTablet); } return highTablet; } catch (Exception ex) { throw new AccumuloException("Unexpected failure finding the last tablet for a merge " + range, ex); } }
/** * Create a status record in the replication table */ protected boolean addStatusRecord(Text file, Table.ID tableId, Value v) { try { Mutation m = new Mutation(file); m.put(StatusSection.NAME, new Text(tableId.getUtf8()), v); try { replicationWriter.addMutation(m); } catch (MutationsRejectedException e) { log.warn("Failed to write work mutations for replication, will retry", e); return false; } } finally { try { replicationWriter.flush(); } catch (MutationsRejectedException e) { log.warn("Failed to write work mutations for replication, will retry", e); return false; } } return true; }
@Override public List<TabletStats> getTabletStats(TInfo tinfo, TCredentials credentials, String tableId) { TreeMap<KeyExtent,Tablet> onlineTabletsCopy; synchronized (onlineTablets) { onlineTabletsCopy = new TreeMap<>(onlineTablets); } List<TabletStats> result = new ArrayList<>(); Table.ID text = Table.ID.of(tableId); KeyExtent start = new KeyExtent(text, new Text(), null); for (Entry<KeyExtent,Tablet> entry : onlineTabletsCopy.tailMap(start).entrySet()) { KeyExtent ke = entry.getKey(); if (ke.getTableId().compareTo(text) == 0) { Tablet tablet = entry.getValue(); TabletStats stats = tablet.getTabletStats(); stats.extent = ke.toThrift(); stats.ingestRate = tablet.ingestRate(); stats.queryRate = tablet.queryRate(); stats.splitCreationTime = tablet.getSplitCreationTime(); stats.numEntries = tablet.getNumEntries(); result.add(stats); } } return result; }
public static synchronized TabletLocator getLocator(ClientContext context, Table.ID tableId) { Preconditions.checkState(enabled, "The Accumulo singleton that that tracks tablet locations is " + "disabled. This is likely caused by all AccumuloClients being closed or garbage collected" + "."); LocatorKey key = new LocatorKey(context.getInstanceID(), tableId); TabletLocator tl = locators.get(key); if (tl == null) { MetadataLocationObtainer mlo = new MetadataLocationObtainer(); if (RootTable.ID.equals(tableId)) { tl = new RootTabletLocator(new ZookeeperLockChecker(context)); } else if (MetadataTable.ID.equals(tableId)) { tl = new TabletLocatorImpl(MetadataTable.ID, getLocator(context, RootTable.ID), mlo, new ZookeeperLockChecker(context)); } else { tl = new TabletLocatorImpl(tableId, getLocator(context, MetadataTable.ID), mlo, new ZookeeperLockChecker(context)); } locators.put(key, tl); } return tl; }
public static KeyExtent findContainingExtent(KeyExtent extent, SortedSet<KeyExtent> extents) { KeyExtent lookupExtent = new KeyExtent(extent); lookupExtent.setPrevEndRow(null); SortedSet<KeyExtent> tailSet = extents.tailSet(lookupExtent); if (tailSet.isEmpty()) { return null; } KeyExtent first = tailSet.first(); if (first.getTableId().compareTo(extent.getTableId()) != 0) { return null; } if (first.getPrevEndRow() == null) { return first; } if (extent.getPrevEndRow() == null) { return null; } if (extent.getPrevEndRow().compareTo(first.getPrevEndRow()) >= 0) return first; return null; }
@Override public void load() throws TableNotFoundException, IOException, AccumuloException, AccumuloSecurityException { Table.ID tableId = Tables.getTableId(context, tableName); Map<String,String> props = context.instanceOperations().getSystemConfiguration(); AccumuloConfiguration conf = new ConfigurationCopy(props); FileSystem fs = VolumeConfiguration.getVolume(dir, context.getHadoopConf(), conf) .getFileSystem(); Path srcPath = checkPath(fs, dir); SortedMap<KeyExtent,Bulk.Files> mappings; if (plan == null) { mappings = computeMappingFromFiles(fs, tableId, srcPath); } else { mappings = computeMappingFromPlan(fs, tableId, srcPath); } BulkSerialize.writeLoadMapping(mappings, srcPath.toString(), fs::create); List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.getUtf8()), ByteBuffer.wrap(srcPath.toString().getBytes(UTF_8)), ByteBuffer.wrap((setTime + "").getBytes(UTF_8))); new TableOperationsImpl(context).doBulkFateOperation(args, tableName); }
@Override public Repo<Master> call(long tid, Master env) throws Exception { if (RootTable.ID.equals(tableId) && Operation.MERGE.equals(op)) { log.warn("Attempt to merge tablets for {} does nothing. It is not splittable.", RootTable.NAME); } Text start = startRow.length == 0 ? null : new Text(startRow); Text end = endRow.length == 0 ? null : new Text(endRow); if (start != null && end != null) if (start.compareTo(end) >= 0) throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.MERGE, TableOperationExceptionType.BAD_RANGE, "start row must be less than end row"); env.mustBeOnline(tableId); MergeInfo info = env.getMergeInfo(tableId); if (info.getState() == MergeState.NONE) { KeyExtent range = new KeyExtent(tableId, end, start); env.setMergeState(new MergeInfo(range, op), MergeState.STARTED); } return new TableRangeOpWait(namespaceId, tableId); }