private void nextIterator() throws IOException { final ListStatusCommand command = new ListStatusCommand(path, handle, listStatusBatchSize); runner.runCommand(command); RpcFuture<DFS.ListStatusResponse> future = command.getFuture(); try { DFS.ListStatusResponse response = future.checkedGet(rpcTimeoutMs, TimeUnit.MILLISECONDS); handle = response.hasHandle() ? response.getHandle() : null; List<DFS.FileStatus> protoStatuses = getListOrEmpty(response.getStatusesList()); currentIterator = protoStatuses.stream().map(s -> { return fromProtoFileStatus(s); }).iterator(); } catch(TimeoutException e) { throw new IOException("Timeout occured during I/O request for " + uri, e); } catch(RpcException e) { RpcException.propagateIfPossible(e, IOException.class); throw e; } }
@Override public Iterable<RefreshInfo> getRefreshInfos() { if (contextProvider.get().isMaster() || (contextProvider.get().isCoordinator() && contextProvider.get().getDremioConfig().getBoolean(ENABLE_MASTERLESS_BOOL))) { return StreamSupport.stream(reflectionStatusService.get().getRefreshInfos().spliterator(), false) .map(RefreshInfo::fromRefreshInfo).collect(Collectors.toList()); } // need to do RPC call // trying to get master Optional<CoordinationProtos.NodeEndpoint> master = contextProvider.get().getMaster(); if (!master.isPresent()) { throw UserException.connectionError().message("Unable to get master while trying to get Reflection Information") .build(logger); } final ReflectionTunnel reflectionTunnel = reflectionTunnelCreator.getTunnel(master.get()); try { final ReflectionRPC.RefreshInfoResp refreshInfosResp = reflectionTunnel.requestRefreshInfos().get(15, TimeUnit.SECONDS); return refreshInfosResp.getRefreshInfoList().stream() .map(RefreshInfo::fromRefreshInfo).collect(Collectors.toList()); } catch (InterruptedException | ExecutionException | TimeoutException e) { throw UserException.connectionError(e).message("Error while getting Refresh Information") .build(logger); } } }
private void getData() throws IOException { // Free previous resources in.close(); buf.release(); final GetFileDataCommand command = new GetFileDataCommand(path, pos, buffersize); runner.runCommand(command); RpcFuture<DFS.GetFileDataResponse> future = command.getFuture(); try { DFS.GetFileDataResponse response = future.checkedGet(rpcTimeoutMs, TimeUnit.MILLISECONDS); eof = (response.getRead() == -1); buf = future.getBuffer(); if (buf == null) { buf = EMPTY_BUFFER; } in = new ByteBufInputStream(buf); } catch(TimeoutException e) { throw new IOException("Timeout occured during I/O request for " + uri, e); } catch(RpcException e) { RpcException.propagateIfPossible(e, IOException.class); throw e; } } }
@Override public boolean delete(Path f, boolean recursive) throws IOException { Path absolutePath = toAbsolutePath(f); checkPath(absolutePath); final DeleteCommand command = new DeleteCommand(absolutePath.toUri().getPath(), recursive); runner.runCommand(command); RpcFuture<DFS.DeleteResponse> future = command.getFuture(); try { DFS.DeleteResponse response = future.checkedGet(rpcTimeoutMs, TimeUnit.MILLISECONDS); return response.getValue(); } catch(TimeoutException e) { throw new IOException("Timeout occured during I/O request for " + uri, e); } catch(RpcException e) { RpcException.propagateIfPossible(e, IOException.class); throw e; } }
@Override public Iterable<DependencyInfo> getReflectionDependencies() { if (contextProvider.get().isMaster() || (contextProvider.get().isCoordinator() && contextProvider.get().getDremioConfig().getBoolean(ENABLE_MASTERLESS_BOOL))) { return reflectionService.get().getReflectionDependencies(); } // need to do RPC call // trying to get master Optional<CoordinationProtos.NodeEndpoint> master = contextProvider.get().getMaster(); if (!master.isPresent()) { throw UserException.connectionError().message("Unable to get master while trying to get Reflection Information") .build(logger); } final ReflectionTunnel reflectionTunnel = reflectionTunnelCreator.getTunnel(master.get()); try { final ReflectionRPC.DependencyInfoResp dependencyInfosResp = reflectionTunnel.requestDependencyInfos().get(15, TimeUnit.SECONDS); return dependencyInfosResp.getDependencyInfoList().stream() .map(DependencyInfo::getDependencyInfo).collect(Collectors.toList()); } catch (InterruptedException | ExecutionException | TimeoutException e) { throw UserException.connectionError(e).message("Error while getting Dependency Information") .build(logger); } }
@Override public boolean rename(Path src, Path dst) throws IOException { Path absoluteSrc = toAbsolutePath(src); Path absoluteDst = toAbsolutePath(dst); checkPath(absoluteSrc); checkPath(absoluteDst); final RenameCommand command = new RenameCommand(absoluteSrc.toUri().getPath(), absoluteDst.toUri().getPath()); runner.runCommand(command); RpcFuture<DFS.RenameResponse> future = command.getFuture(); try { DFS.RenameResponse response = future.checkedGet(rpcTimeoutMs, TimeUnit.MILLISECONDS); return response.getValue(); } catch(TimeoutException e) { throw new IOException("Timeout occured during I/O request for " + uri, e); } catch(RpcException e) { RpcException.propagateIfPossible(e, IOException.class); throw e; } }
@Override public Iterable<ReflectionInfo> getReflections() { if (contextProvider.get().isMaster() || (contextProvider.get().isCoordinator() && contextProvider.get().getDremioConfig().getBoolean(ENABLE_MASTERLESS_BOOL))) { return reflectionStatusService.get().getReflections(); } // need to do RPC call // trying to get master Optional<CoordinationProtos.NodeEndpoint> master = contextProvider.get().getMaster(); if (!master.isPresent()) { throw UserException.connectionError().message("Unable to get master while trying to get Reflection Information") .build(logger); } final ReflectionTunnel reflectionTunnel = reflectionTunnelCreator.getTunnel(master.get()); try { final ReflectionRPC.ReflectionInfoResp reflectionCombinedStatusResp = reflectionTunnel.requestReflectionStatus().get(15, TimeUnit.SECONDS); return reflectionCombinedStatusResp.getReflectionInfoList().stream() .map(ReflectionInfo::getReflectionInfo).collect(Collectors.toList()); } catch (InterruptedException | ExecutionException | TimeoutException e) { throw UserException.connectionError(e).message("Error while getting Reflection Information") .build(logger); } }
@Override public FileStatus getFileStatus(Path f) throws IOException { Path absolutePath = toAbsolutePath(f); checkPath(absolutePath); final GetFileStatusCommand command = new GetFileStatusCommand(absolutePath.toUri().getPath()); runner.runCommand(command); RpcFuture<DFS.GetFileStatusResponse> future = command.getFuture(); try { DFS.GetFileStatusResponse response = future.checkedGet(rpcTimeoutMs, TimeUnit.MILLISECONDS); return fromProtoFileStatus(response.getStatus()); } catch(TimeoutException e) { throw new IOException("Timeout occured during I/O request for " + uri, e); } catch(RpcException e) { RpcException.propagateIfPossible(e, IOException.class); throw e; } }
private ServerMeta getServerMeta() throws SQLException { assert getServerMetaSupported(); if (serverMeta == null) { synchronized(this) { if (serverMeta == null) { DremioConnection connection = (DremioConnection) getConnection(); try { GetServerMetaResp resp = connection.getClient().getServerMeta().get(); if (resp.getStatus() != RequestStatus.OK) { DremioPBError error = resp.getError(); throw new SQLException("Error when getting server meta: " + error.getMessage()); } serverMeta = resp.getServerMeta(); convertSupport = SQLConvertSupport.toSQLConvertSupport(serverMeta.getConvertSupportList()); } catch (InterruptedException e) { throw new SQLException("Interrupted when getting server meta", e); } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause == null) { throw new AssertionError("Something unknown happened", e); } Throwables.propagateIfPossible(cause); throw new SQLException("Error when getting server meta", cause); } } } } return serverMeta; }
@Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { Path absolutePath = toAbsolutePath(f); checkPath(absolutePath); final MkdirsCommand command = new MkdirsCommand( absolutePath.toUri().getPath(), permission != null ? (int) permission.toExtendedShort() : null); runner.runCommand(command); RpcFuture<DFS.MkdirsResponse> future = command.getFuture(); try { DFS.MkdirsResponse response = future.checkedGet(rpcTimeoutMs, TimeUnit.MILLISECONDS); return response.getValue(); } catch(TimeoutException e) { throw new IOException("Timeout occured during I/O request for " + uri, e); } catch(RpcException e) { RpcException.propagateIfPossible(e, IOException.class); throw e; } }
resp = respFuture.get(); } catch (InterruptedException e) {
private void flush(boolean evenIfEmpty) throws IOException{ if(!buf.isReadable() && !evenIfEmpty){ return; } final WriteDataRequest req = WriteDataRequest.newBuilder() .setPath(path) .setLastOffset(remoteOffset) .setLastUpdate(lastUpdate) .build(); WriteDataCommand command = new WriteDataCommand(req, buf); runner.runCommand(command); try{ WriteDataResponse response = command.getFuture().checkedGet(WRITE_RESPONSE_IN_SECONDS, TimeUnit.SECONDS); remoteOffset += buf.readableBytes(); lastUpdate = response.getUpdateTime(); buf.clear(); } catch (RpcException | TimeoutException e) { throw new IOException(e); } }
@Test @Ignore public void schemas() throws Exception { // test("SELECT * FROM INFORMATION_SCHEMA.SCHEMATA"); // SQL equivalent GetSchemasResp resp = client.getSchemas(null, null).get(); assertEquals(RequestStatus.OK, resp.getStatus()); List<SchemaMetadata> schemas = resp.getSchemasList(); assertEquals(9, schemas.size()); Set<String> expectedSchemaNames = new HashSet<>(); expectedSchemaNames.add("INFORMATION_SCHEMA"); expectedSchemaNames.add("cp.default"); expectedSchemaNames.add("dfs.default"); expectedSchemaNames.add("dfs.root"); expectedSchemaNames.add("dfs.tmp"); expectedSchemaNames.add("dfs_test.default"); expectedSchemaNames.add("dfs_test.home"); expectedSchemaNames.add("dfs_test.tmp"); expectedSchemaNames.add("sys"); Iterator<SchemaMetadata> iterator = schemas.iterator(); while (iterator.hasNext()) { String schemeName = iterator.next().getSchemaName(); assertTrue(expectedSchemaNames.contains(schemeName)); expectedSchemaNames.remove(schemeName); } assertEquals(expectedSchemaNames.size(), 0); }
MetaResultSet getMeta(RpcFuture<Response> future) throws SQLException { final Response response; try { response = future.checkedGet(); } catch (RpcException e) { throw new SQLException("Failure getting metadata", e);
@Test public void tablesWithTableFilter() throws Exception { // test("SELECT * FROM INFORMATION_SCHEMA.\"TABLES\" WHERE TABLE_TYPE IN ('TABLE')"); // SQL equivalent GetTablesResp resp = client.getTables(null, null, null, Arrays.asList("TABLE")).get(); assertEquals(RequestStatus.OK, resp.getStatus()); List<TableMetadata> tables = resp.getTablesList(); assertEquals(1, tables.size()); Iterator<TableMetadata> iterator = tables.iterator(); verifyTable("cp", "tpch/customer.parquet", iterator.next()); }
@Override public void cancel(String username, JobId jobId, String reason) throws JobException { final ForemenTool tool = this.foremenTool.get(); final ExternalId id = ExternalIdHelper.toExternal(QueryIdHelper.getQueryIdFromString(jobId.getId())); if(tool.cancel(id, reason)){ logger.debug("Job cancellation requested on current node."); return; } // now remote... final Job job = getJob(jobId); NodeEndpoint endpoint = job.getJobAttempt().getEndpoint(); if(endpoint.equals(identity)){ throw new JobWarningException(jobId, "Unable to cancel job started on current node. It may have completed before cancellation was requested."); } try{ final CoordTunnel tunnel = coordTunnelCreator.get().getTunnel(JobsServiceUtil.toPB(endpoint)); Ack ack = tunnel.requestCancelQuery(id, reason).checkedGet(15, TimeUnit.SECONDS); if(ack.getOk()){ logger.debug("Job cancellation requested on {}.", endpoint.getAddress()); return; } else { throw new JobWarningException(jobId, String.format("Unable to cancel job started on %s. It may have completed before cancellation was requested.", endpoint.getAddress())); } }catch(TimeoutException | RpcException | RuntimeException e){ logger.info("Unable to cancel remote job for external id: {}", ExternalIdHelper.toString(id), e); throw new JobWarningException(jobId, String.format("Unable to cancel job on node %s.", endpoint.getAddress())); } }
@Test public void columns() throws Exception { // test("SELECT * FROM INFORMATION_SCHEMA.COLUMNS"); // SQL equivalent final GetColumnsResp resp1 = client.getColumns(null, null, null, null).get(); assertEquals(RequestStatus.OK, resp1.getStatus()); final List<ColumnMetadata> columns1 = resp1.getColumnsList(); assertEquals(163, columns1.size()); assertTrue("incremental update column shouldn't be returned", columns1.stream().noneMatch(input -> input.getColumnName().equals(IncrementalUpdateUtils.UPDATE_COLUMN))); }
@Override public QueryProfile getProfile(JobId jobId, int attempt) throws JobNotFoundException { Job job = getJob(jobId); final AttemptId attemptId = new AttemptId(JobsServiceUtil.getJobIdAsExternalId(jobId), attempt); if(jobIsDone(job.getJobAttempt())){ return profileStore.get(attemptId); } // Check if the profile for given attempt already exists. Even if the job is not done, it is possible that // profile exists for previous attempts final QueryProfile queryProfile = profileStore.get(attemptId); if (queryProfile != null) { return queryProfile; } final NodeEndpoint endpoint = job.getJobAttempt().getEndpoint(); if(endpoint.equals(identity)){ final ForemenTool tool = this.foremenTool.get(); Optional<QueryProfile> profile = tool.getProfile(attemptId.getExternalId()); return profile.orNull(); } try{ CoordTunnel tunnel = coordTunnelCreator.get().getTunnel(JobsServiceUtil.toPB(endpoint)); return tunnel.requestQueryProfile(attemptId.getExternalId()).checkedGet(15, TimeUnit.SECONDS); }catch(TimeoutException | RpcException | RuntimeException e){ logger.info("Unable to retrieve remote query profile for external id: {}", ExternalIdHelper.toString(attemptId.getExternalId()), e); return null; } }
@Test public void tablesWithSystemTableFilter() throws Exception { // test("SELECT * FROM INFORMATION_SCHEMA.\"TABLES\" WHERE TABLE_TYPE IN ('SYSTEM_TABLE')"); // SQL equivalent GetTablesResp resp = client.getTables(null, null, null, Arrays.asList("SYSTEM_TABLE")).get(); assertEquals(RequestStatus.OK, resp.getStatus()); List<TableMetadata> tables = resp.getTablesList(); assertEquals(18, tables.size()); Iterator<TableMetadata> iterator = tables.iterator(); verifyTable("INFORMATION_SCHEMA", "CATALOGS", iterator.next()); verifyTable("INFORMATION_SCHEMA", "COLUMNS", iterator.next()); verifyTable("INFORMATION_SCHEMA", "SCHEMATA", iterator.next()); verifyTable("INFORMATION_SCHEMA", "TABLES", iterator.next()); verifyTable("INFORMATION_SCHEMA", "VIEWS", iterator.next()); verifyTable("sys", "boot", iterator.next()); verifyTable("sys", "dependencies", iterator.next()); verifyTable("sys", "fragments", iterator.next()); verifyTable("sys", "materializations", iterator.next()); verifyTable("sys", "memory", iterator.next()); verifyTable("sys", "nodes", iterator.next()); verifyTable("sys", "options", iterator.next()); verifyTable("sys", "queries", iterator.next()); verifyTable("sys", "reflections", iterator.next()); verifyTable("sys", "refreshes", iterator.next()); verifyTable("sys", "slicing_threads", iterator.next()); verifyTable("sys", "threads", iterator.next()); verifyTable("sys", "version", iterator.next()); }
@Test public void firstDisconnectRecovery() throws Exception { CountDownLatch closeLatch = new CountDownLatch(1); FabricRunnerFactory factory = getFabric().registerProtocol(new Protocol(closeLatch)); FabricCommandRunner runner = factory.getCommandRunner(getFabric().getAddress(), getFabric().getPort()); // send a message, establishing the connection. { SimpleMessage m = new SimpleMessage(1); runner.runCommand(m); m.getFuture().checkedGet(1000, TimeUnit.MILLISECONDS); } closeLatch.countDown(); // wait for the local connection to be closed. Thread.sleep(1000); // ensure we can send message again. { SimpleMessage m = new SimpleMessage(1); runner.runCommand(m); m.getFuture().checkedGet(1000, TimeUnit.MILLISECONDS); } }