@Override public byte[] mutate(byte[] current) throws Exception { final String currentName = new String(current); if (currentName.equals(newName)) return null; // assume in this case the operation is running again, so we are done if (!currentName.equals(oldName)) { throw new AcceptableThriftTableOperationException(null, oldName, TableOperation.RENAME, TableOperationExceptionType.NAMESPACE_NOTFOUND, "Name changed while processing"); } return newName.getBytes(); } });
public static <T extends AbstractId> T getNextId(String name, ServerContext context, Function<String,T> newIdFunction) throws AcceptableThriftTableOperationException { try { IZooReaderWriter zoo = context.getZooReaderWriter(); final String ntp = context.getZooKeeperRoot() + Constants.ZTABLES; byte[] nid = zoo.mutate(ntp, ZERO_BYTE, ZooUtil.PUBLIC, currentValue -> { BigInteger nextId = new BigInteger(new String(currentValue, UTF_8), Character.MAX_RADIX); nextId = nextId.add(BigInteger.ONE); return nextId.toString(Character.MAX_RADIX).getBytes(UTF_8); }); return newIdFunction.apply(new String(nid, UTF_8)); } catch (Exception e1) { log.error("Failed to assign id to " + name, e1); throw new AcceptableThriftTableOperationException(null, name, TableOperation.CREATE, TableOperationExceptionType.OTHER, e1.getMessage()); } }
public static void checkTableDoesNotExist(ServerContext context, String tableName, Table.ID tableId, TableOperation operation) throws AcceptableThriftTableOperationException { Table.ID id = Tables.getNameToIdMap(context).get(tableName); if (id != null && !id.equals(tableId)) throw new AcceptableThriftTableOperationException(null, tableName, operation, TableOperationExceptionType.EXISTS, null); }
@Override public byte[] mutate(byte[] currentValue) throws Exception { String cvs = new String(currentValue, UTF_8); String[] tokens = cvs.split(","); long flushID = Long.parseLong(tokens[0]); flushID++; String txidString = String.format("%016x", tid); for (int i = 1; i < tokens.length; i++) { if (tokens[i].startsWith(txidString)) continue; // skip self log.debug("txidString : {}", txidString); log.debug("tokens[{}] : {}", i, tokens[i]); throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.OTHER, "Another compaction with iterators and/or a compaction strategy is running"); } StringBuilder encodedIterators = new StringBuilder(); if (config != null) { Hex hex = new Hex(); encodedIterators.append(","); encodedIterators.append(txidString); encodedIterators.append("="); encodedIterators.append(new String(hex.encode(config), UTF_8)); } return (Long.toString(flushID) + encodedIterators).getBytes(UTF_8); } });
public static void checkNamespaceDoesNotExist(ServerContext context, String namespace, Namespace.ID namespaceId, TableOperation operation) throws AcceptableThriftTableOperationException { Namespace.ID n = Namespaces.lookupNamespaceId(context, namespace); if (n != null && !n.equals(namespaceId)) throw new AcceptableThriftTableOperationException(null, namespace, operation, TableOperationExceptionType.NAMESPACE_EXISTS, null); }
private Map<String,String> getExportedProps(VolumeManager fs) throws Exception { Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE); try { FileSystem ns = fs.getVolumeByPath(path).getFileSystem(); return TableOperationsImpl.getExportedProps(ns, path); } catch (IOException ioe) { throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error reading table props from " + path + " " + ioe.getMessage()); } }
public CompactRange(Namespace.ID namespaceId, Table.ID tableId, byte[] startRow, byte[] endRow, List<IteratorSetting> iterators, CompactionStrategyConfig compactionStrategy) throws AcceptableThriftTableOperationException { requireNonNull(namespaceId, "Invalid argument: null namespaceId"); requireNonNull(tableId, "Invalid argument: null tableId"); requireNonNull(iterators, "Invalid argument: null iterator list"); requireNonNull(compactionStrategy, "Invalid argument: null compactionStrategy"); this.tableId = tableId; this.namespaceId = namespaceId; this.startRow = startRow.length == 0 ? null : startRow; this.endRow = endRow.length == 0 ? null : endRow; if (iterators.size() > 0 || !compactionStrategy.equals(CompactionStrategyConfigUtil.DEFAULT_STRATEGY)) { this.config = WritableUtils.toByteArray( new UserCompactionConfig(this.startRow, this.endRow, iterators, compactionStrategy)); } else { log.info("No iterators or compaction strategy"); } if (this.startRow != null && this.endRow != null && new Text(startRow).compareTo(new Text(endRow)) >= 0) throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.BAD_RANGE, "start row must be less than end row"); }
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "File referenced by exported table does not exists " + oldFileName); } catch (IOException ioe) { log.warn("{}", ioe.getMessage(), ioe); throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error renaming files " + ioe.getMessage());
try { if (!future.get()) { throw new AcceptableThriftTableOperationException(bulkInfo.tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.OTHER, "Failed to move files from " + bulkInfo.sourceDir); throw new AcceptableThriftTableOperationException(bulkInfo.tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.OTHER, ee.getCause().getMessage());
throw new AcceptableThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.OTHER, "Concurrent merge happened"); // TODO need to handle
@Override public Repo<Master> call(long tid, Master master) throws Exception { try { exportTable(master.getFileSystem(), master.getContext(), tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir); } catch (IOException ioe) { throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonicalID(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Failed to create export files " + ioe.getMessage()); } Utils.unreserveNamespace(master, tableInfo.namespaceID, tid, false); Utils.unreserveTable(master, tableInfo.tableID, tid, false); Utils.unreserveHdfsDirectory(master, new Path(tableInfo.exportDir).toString(), tid); return null; }
private void checkOffline(ClientContext context) throws Exception { if (Tables.getTableState(context, tableInfo.tableID) != TableState.OFFLINE) { Tables.clearCache(context); if (Tables.getTableState(context, tableInfo.tableID) != TableState.OFFLINE) { throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonicalID(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Table is not offline"); } } }
@Override public Repo<Master> call(long tid, Master master) throws Exception { final Path bulkDir = new Path(bulkInfo.bulkDir); final Path sourceDir = new Path(bulkInfo.sourceDir); log.debug(" tid {} sourceDir {}", tid, sourceDir); VolumeManager fs = master.getFileSystem(); if (bulkInfo.tableState == TableState.ONLINE) { ZooArbitrator.start(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid); } try { Map<String,String> oldToNewNameMap = BulkSerialize.readRenameMap(bulkDir.toString(), p -> fs.open(p)); moveFiles(String.format("%016x", tid), sourceDir, bulkDir, master, fs, oldToNewNameMap); return new LoadFiles(bulkInfo); } catch (Exception ex) { throw new AcceptableThriftTableOperationException(bulkInfo.tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_INPUT_DIRECTORY, bulkInfo.sourceDir + ": " + ex); } }
public static long reserveTable(Master env, Table.ID tableId, long tid, boolean writeLock, boolean tableMustExist, TableOperation op) throws Exception { if (getLock(env.getContext(), tableId, tid, writeLock).tryLock()) { if (tableMustExist) { IZooReaderWriter zk = env.getContext().getZooReaderWriter(); if (!zk.exists(env.getContext().getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId)) throw new AcceptableThriftTableOperationException(tableId.canonicalID(), "", op, TableOperationExceptionType.NOTFOUND, "Table does not exist"); } log.info("table {} ({}) locked for {} operation: {}", tableId, Long.toHexString(tid), (writeLock ? "write" : "read"), op); return 0; } else return 100; }
public static long reserveNamespace(Master env, Namespace.ID namespaceId, long id, boolean writeLock, boolean mustExist, TableOperation op) throws Exception { if (getLock(env.getContext(), namespaceId, id, writeLock).tryLock()) { if (mustExist) { IZooReaderWriter zk = env.getContext().getZooReaderWriter(); if (!zk.exists( env.getContext().getZooKeeperRoot() + Constants.ZNAMESPACES + "/" + namespaceId)) throw new AcceptableThriftTableOperationException(namespaceId.canonicalID(), "", op, TableOperationExceptionType.NAMESPACE_NOTFOUND, "Namespace does not exist"); } log.info("namespace {} ({}) locked for {} operation: {}", namespaceId, Long.toHexString(id), (writeLock ? "write" : "read"), op); return 0; } else return 100; }
@Override public Repo<Master> call(long tid, Master environment) throws Exception { // give all table permissions to the creator for (TablePermission permission : TablePermission.values()) { try { AuditedSecurityOperation.getInstance(environment.getContext()).grantTablePermission( environment.getContext().rpcCreds(), cloneInfo.user, cloneInfo.tableId, permission, cloneInfo.namespaceId); } catch (ThriftSecurityException e) { LoggerFactory.getLogger(ClonePermissions.class).error("{}", e.getMessage(), e); throw e; } } // setup permissions in zookeeper before table info in zookeeper // this way concurrent users will not get a spurious permission denied // error try { return new CloneZookeeper(cloneInfo, environment.getContext()); } catch (NamespaceNotFoundException e) { throw new AcceptableThriftTableOperationException(null, cloneInfo.tableName, TableOperation.CLONE, TableOperationExceptionType.NAMESPACE_NOTFOUND, "Namespace for target table not found"); } }
@Override public long isReady(long tid, Master master) throws Exception { if (!Utils.getReadLock(master, tableId, tid).tryLock()) return 100; Tables.clearCache(master.getContext()); if (Tables.getTableState(master.getContext(), tableId) == TableState.ONLINE) { long reserve1, reserve2; reserve1 = reserve2 = Utils.reserveHdfsDirectory(master, sourceDir, tid); if (reserve1 == 0) reserve2 = Utils.reserveHdfsDirectory(master, errorDir, tid); return reserve2; } else { throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.OFFLINE, null); } }
@Override public Repo<Master> call(long tid, Master env) throws Exception { if (RootTable.ID.equals(tableId) && Operation.MERGE.equals(op)) { log.warn("Attempt to merge tablets for {} does nothing. It is not splittable.", RootTable.NAME); } Text start = startRow.length == 0 ? null : new Text(startRow); Text end = endRow.length == 0 ? null : new Text(endRow); if (start != null && end != null) if (start.compareTo(end) >= 0) throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.MERGE, TableOperationExceptionType.BAD_RANGE, "start row must be less than end row"); env.mustBeOnline(tableId); MergeInfo info = env.getMergeInfo(tableId); if (info.getState() == MergeState.NONE) { KeyExtent range = new KeyExtent(tableId, end, start); env.setMergeState(new MergeInfo(range, op), MergeState.STARTED); } return new TableRangeOpWait(namespaceId, tableId); }
@Override public Repo<Master> call(long tid, Master env) throws Exception { // reserve the table name in zookeeper or fail Utils.getTableNameLock().lock(); try { // write tableName & tableId to zookeeper Utils.checkTableDoesNotExist(env.getContext(), tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE); String namespace = Tables.qualify(tableInfo.tableName).getFirst(); Namespace.ID namespaceId = Namespaces.getNamespaceId(env.getContext(), namespace); env.getTableManager().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE); Tables.clearCache(env.getContext()); } finally { Utils.getTableNameLock().unlock(); } for (Entry<String,String> entry : getExportedProps(env.getFileSystem()).entrySet()) if (!TablePropUtil.setTableProperty(env.getContext(), tableInfo.tableId, entry.getKey(), entry.getValue())) { throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Invalid table property " + entry.getKey()); } return new CreateImportDir(tableInfo); }
@Override public long isReady(long tid, Master master) throws Exception { long reserved = Utils.reserveNamespace(master, tableInfo.namespaceID, tid, false, true, TableOperation.EXPORT) + Utils.reserveTable(master, tableInfo.tableID, tid, false, true, TableOperation.EXPORT); if (reserved > 0) return reserved; AccumuloClient client = master.getContext(); checkOffline(master.getContext()); Scanner metaScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY); metaScanner.setRange(new KeyExtent(tableInfo.tableID, null, null).toMetadataRange()); // scan for locations metaScanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME); metaScanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME); if (metaScanner.iterator().hasNext()) { return 500; } // use the same range to check for walogs that we used to check for hosted (or future hosted) // tablets // this is done as a separate scan after we check for locations, because walogs are okay only if // there is no location metaScanner.clearColumns(); metaScanner.fetchColumnFamily(LogColumnFamily.NAME); if (metaScanner.iterator().hasNext()) { throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonicalID(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Write ahead logs found for table"); } return 0; }