/** * generate a context message * @return string containing all context information concatenated */ String generateContextMessage(boolean includeErrorIdAndIdentity) { StringBuilder sb = new StringBuilder(); for (String context : contextList) { sb.append(context).append("\n"); } if (includeErrorIdAndIdentity) { // add identification infos sb.append("\n[Error Id: "); sb.append(errorId).append(" "); if (endpoint != null) { sb.append("on ") .append(endpoint.getAddress()) .append(":") .append(endpoint.getUserPort()); } sb.append("]"); } return sb.toString(); }
private void writeConnection(XMLStreamWriter xmlStreamWriter, DatasetConfig datasetConfig, String hostname) throws XMLStreamException { DatasetPath dataset = new DatasetPath(datasetConfig.getFullPathList()); xmlStreamWriter.writeStartElement("connection"); xmlStreamWriter.writeAttribute("class", "genericodbc"); xmlStreamWriter.writeAttribute("dbname", InfoSchemaConstants.IS_CATALOG_NAME); xmlStreamWriter.writeAttribute("odbc-connect-string-extras", format("AUTHENTICATIONTYPE=Basic Authentication;CONNECTIONTYPE=Direct;HOST=%s", hostname)); // It has to match what is returned by the driver/Tableau xmlStreamWriter.writeAttribute("odbc-dbms-name", "Dremio"); xmlStreamWriter.writeAttribute("odbc-driver", "Dremio Connector"); xmlStreamWriter.writeAttribute("odbc-dsn", ""); xmlStreamWriter.writeAttribute("odbc-suppress-connection-pooling", ""); xmlStreamWriter.writeAttribute("odbc-use-connection-pooling", ""); xmlStreamWriter.writeAttribute("schema", dataset.toParentPath()); xmlStreamWriter.writeAttribute("port", String.valueOf(endpoint.getUserPort())); xmlStreamWriter.writeAttribute("server", ""); xmlStreamWriter.writeAttribute("username", ""); writeRelation(xmlStreamWriter, datasetConfig); if (customizationEnabled) { writeConnectionCustomization(xmlStreamWriter); } xmlStreamWriter.writeEndElement(); }
private void writeNativeDrillConnection(XMLStreamWriter xmlStreamWriter, DatasetConfig datasetConfig, String hostname) throws XMLStreamException { DatasetPath dataset = new DatasetPath(datasetConfig.getFullPathList()); xmlStreamWriter.writeStartElement("connection"); xmlStreamWriter.writeAttribute("class", "drill"); xmlStreamWriter.writeAttribute("connection-type", "Direct"); xmlStreamWriter.writeAttribute("authentication", "Basic Authentication"); xmlStreamWriter.writeAttribute("odbc-connect-string-extras", ""); // It has to match what is returned by the driver/Tableau xmlStreamWriter.writeAttribute("schema", dataset.toParentPath()); xmlStreamWriter.writeAttribute("port", String.valueOf(endpoint.getUserPort())); xmlStreamWriter.writeAttribute("server", hostname); writeRelation(xmlStreamWriter, datasetConfig); if (customizationEnabled) { writeConnectionCustomization(xmlStreamWriter); } /* DX-7447 - When using the drill driver, Tableau will show an misleading error message because it could not connect to Dremio since it doesn't have the username/password (and doesn't prompt for them first). To work around this, we generate metadata for the Dataset and add it to the .tds file. This prefills the schema in Tableau and when the user starts working with the schema it will prompt the user to authenticate with Dremio. */ writeTableauMetadata(xmlStreamWriter, datasetConfig); xmlStreamWriter.writeEndElement(); }
private void testSingleMode(String fileName) throws Exception{ test(SINGLE_MODE); CoordinationProtos.NodeEndpoint endpoint = nodes[0].getContext().getEndpoint(); String controlsString = "{\"injections\":[{" + "\"address\":\"" + endpoint.getAddress() + "\"," + "\"port\":\"" + endpoint.getUserPort() + "\"," + "\"type\":\"exception\"," + "\"siteClass\":\"" + "com.dremio.exec.physical.impl.ScanBatch" + "\"," + "\"desc\":\"" + "next-allocate" + "\"," + "\"nSkip\":0," + "\"nFire\":1," + "\"exceptionClass\":\"" + OutOfMemoryException.class.getName() + "\"" + "}]}"; ControlsInjectionUtil.setControls(client, controlsString); String query = getFile(fileName); try { test(query); } catch(UserException uex) { DremioPBError error = uex.getOrCreatePBError(false); Assert.assertEquals(DremioPBError.ErrorType.RESOURCE, error.getErrorType()); Assert.assertTrue("Error message isn't related to memory error", uex.getMessage().contains(UserException.MEMORY_ERROR_MSG)); } }
@Override public Object next() { if (!beforeFirst) { throw new IllegalStateException(); } beforeFirst = false; final NodeEndpoint ep = dbContext.getEndpoint(); NodeInstance i = new NodeInstance(); i.current = false; // disable current field for now. i.hostname = ep.getAddress(); try { i.ip_address = InetAddress.getLocalHost().getHostAddress(); } catch (UnknownHostException e) { // no op } i.user_port = ep.getUserPort(); i.fabric_port = ep.getFabricPort(); final WorkStats stats = dbContext.getWorkStatsProvider().get(); i.cluster_load = stats.getClusterLoad(); i.configured_max_width = (int)dbContext.getClusterResourceInformation().getAverageExecutorCores(dbContext.getOptionManager()); i.actual_max_width = (int) Math.max(1, i.configured_max_width * stats.getMaxWidthFactor()); return i; }
@Test public void testCancelNonRunningFragments() throws Exception{ test("alter session set \"planner.slice_target\" = 10"); // Inject an out of memory exception in the ScanBatch CoordinationProtos.NodeEndpoint endpoint = nodes[0].getContext().getEndpoint(); String controlsString = "{\"injections\":[{" + "\"address\":\"" + endpoint.getAddress() + "\"," + "\"port\":\"" + endpoint.getUserPort() + "\"," + "\"type\":\"exception\"," + "\"siteClass\":\"" + "com.dremio.exec.physical.impl.ScanBatch" + "\"," + "\"desc\":\"" + "next-allocate" + "\"," + "\"nSkip\":0," + "\"nFire\":1," + "\"exceptionClass\":\"" + OutOfMemoryException.class.getName() + "\"" + "}]}"; ControlsInjectionUtil.setControls(client, controlsString); String query = getFile("queries/tpch/04.sql"); try { test(query); fail("The query should have failed!!!"); } catch(UserException uex) { // The query should fail } } }
private void setupExecution() throws Exception{ logger.debug("Starting fragment {}:{} on {}:{}", fragment.getHandle().getMajorFragmentId(), fragment.getHandle().getMinorFragmentId(), fragment.getAssignment().getAddress(), fragment.getAssignment().getUserPort()); outputAllocator = ticket.newChildAllocator("output-frag:" + QueryIdHelper.getFragmentId(fragment.getHandle()), fragmentOptions.getOption(ExecConstants.OUTPUT_ALLOCATOR_RESERVATION), Long.MAX_VALUE); contextCreator.setFragmentOutputAllocator(outputAllocator); final PhysicalOperator rootOperator = reader.readFragmentOperator(fragment.getFragmentJson(), fragment.getFragmentCodec(), SharedDataMap.create(sharedData, fragment.getHandle())); final OperatorCreator operatorCreator = new UserDelegatingOperatorCreator(contextInfo.getQueryUser(), opCreator); pipeline = PipelineCreator.get( new FragmentExecutionContext(fragment.getForeman(), updater, sources, cancelled), buffers, operatorCreator, contextCreator, functionLookupContext, rootOperator, tunnelProvider, new SharedResourcesContextImpl(sharedResources) ); pipeline.setup(); clusterCoordinator.getServiceSet(ClusterCoordinator.Role.COORDINATOR).addNodeStatusListener(crashListener); transitionToRunning(); isSetup = true; }
public void writeTo(io.protostuff.Output output, com.dremio.exec.proto.CoordinationProtos.NodeEndpoint message) throws java.io.IOException { if(message.hasAddress()) output.writeString(1, message.getAddress(), false); if(message.hasUserPort()) output.writeInt32(2, message.getUserPort(), false); if(message.hasFabricPort()) output.writeInt32(3, message.getFabricPort(), false); if(message.hasRoles()) output.writeObject(5, message.getRoles(), com.dremio.exec.proto.SchemaCoordinationProtos.Roles.WRITE, false); if(message.hasStartTime()) output.writeInt64(101, message.getStartTime(), false); if(message.hasProvisionId()) output.writeString(102, message.getProvisionId(), false); if(message.hasMaxDirectMemory()) output.writeInt64(103, message.getMaxDirectMemory(), false); if(message.hasAvailableCores()) output.writeInt32(104, message.getAvailableCores(), false); } public boolean isInitialized(com.dremio.exec.proto.CoordinationProtos.NodeEndpoint message)
public void connect(RpcConnectionHandler<ServerConnection> handler, NodeEndpoint endpoint, UserProperties props, UserBitShared.UserCredentials credentials) { UserToBitHandshake.Builder hsBuilder = UserToBitHandshake.newBuilder() .setRpcVersion(UserRpcConfig.RPC_VERSION) .setSupportListening(true) .setSupportComplexTypes(supportComplexTypes) .setSupportTimeout(true) .setCredentials(credentials) .setRecordBatchType(RecordBatchType.DREMIO) .addSupportedRecordBatchFormats(RecordBatchFormat.DREMIO_1_4) .addSupportedRecordBatchFormats(RecordBatchFormat.DREMIO_0_9) .setClientInfos(UserRpcUtils.getRpcEndpointInfos(clientName)); if (props != null) { hsBuilder.setProperties(props); } this.connectAsClient(queryResultHandler.getWrappedConnectionHandler(handler), hsBuilder.build(), endpoint.getAddress(), endpoint.getUserPort()); }
private void communicateChange(SourceConfig config, RpcType rpcType) { final Set<NodeEndpoint> endpoints = new HashSet<>(); endpoints.add(context.get().getEndpoint()); List<RpcFuture<Ack>> futures = new ArrayList<>(); SourceWrapper wrapper = SourceWrapper.newBuilder().setBytes(ByteString.copyFrom(ProtobufIOUtil.toByteArray(config, SourceConfig.getSchema(), LinkedBuffer.allocate()))).build(); for(NodeEndpoint e : Iterables.concat(this.context.get().getCoordinators(), this.context.get().getExecutors())) { if(!endpoints.add(e)) { continue; } SendSource send = new SendSource(wrapper, rpcType); tunnelFactory.getCommandRunner(e.getAddress(), e.getFabricPort()).runCommand(send);; logger.trace("Sending [{}] to {}:{}", config.getName(), e.getAddress(), e.getUserPort()); futures.add(send.getFuture()); } try { Futures.successfulAsList(futures).get(CHANGE_COMMUNICATION_WAIT, TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e1) { logger.warn("Failure while communicating source change [{}].", config.getName(), e1); } }
@Override public void start() throws Exception { final NodeEndpoint endpoint = context.get().getEndpoint(); endpointName = endpoint.getAddress() + ":" + endpoint.getUserPort(); logger.info("Starting NodeRegistration for {}", endpointName); Roles roles = endpoint.getRoles(); if (roles.getMaster()) { registrationHandles.add(coord.get().getServiceSet(ClusterCoordinator.Role.MASTER).register(endpoint)); } if (roles.getSqlQuery()) { registrationHandles.add(coord.get().getServiceSet(ClusterCoordinator.Role.COORDINATOR).register(endpoint)); } if (roles.getJavaExecutor()) { registrationHandles.add(coord.get().getServiceSet(ClusterCoordinator.Role.EXECUTOR).register(endpoint)); } logger.info("NodeRegistration is up for {}", endpointName); }
/** * Create a single exception injection on a specific bit. Note this format is not directly accepted by the injection * mechanism. Use the {@link Controls} to build exceptions. */ public static String createExceptionOnNode(final Class<?> siteClass, final String desc, final int nSkip, final int nFire, final Class<? extends Throwable> exceptionClass, final NodeEndpoint endpoint) { final String siteClassName = siteClass.getName(); final String exceptionClassName = exceptionClass.getName(); return "{ \"type\":\"exception\"," + "\"siteClass\":\"" + siteClassName + "\"," + "\"desc\":\"" + desc + "\"," + "\"nSkip\":" + nSkip + "," + "\"nFire\":" + nFire + "," + "\"exceptionClass\":\"" + exceptionClassName + "\"," + "\"address\":\"" + endpoint.getAddress() + "\"," + "\"port\":\"" + endpoint.getUserPort() + "\"}"; }
public String getErrorLocation() { NodeEndpoint ep = context.getEndpoint(); if (ep != null) { return ep.getAddress() + ":" + ep.getUserPort(); } else { return null; } }
public String getErrorIdWithIdentity() { final NodeEndpoint endpoint = context.getEndpoint(); if (endpoint == null ) { return "ErrorId: " + context.getErrorId(); } else { return "ErrorId: " + context.getErrorId() + " on " + endpoint.getAddress() + ":" + endpoint.getUserPort(); } }
public final boolean isValidForBit(final NodeEndpoint endpoint) { return address == null || (address.equals(endpoint.getAddress()) && port == endpoint.getUserPort()); } }
public static String getJDBCURL() { Collection<NodeEndpoint> endpoints = clusterCoordinator.getServiceSet(ClusterCoordinator.Role.COORDINATOR) .getAvailableEndpoints(); if (endpoints.isEmpty()) { return null; } NodeEndpoint endpoint = endpoints.iterator().next(); return format("jdbc:dremio:direct=%s:%d", endpoint.getAddress(), endpoint.getUserPort()); }
private void connect(NodeEndpoint endpoint) throws RpcException { final FutureHandler f = new FutureHandler(); client.connect(f, endpoint, props, getUserCredentials()); try { f.checkedGet(30, TimeUnit.SECONDS); } catch (TimeoutException e) { throw new RpcException("Timed out after 30s waiting to connect to "+endpoint.getAddress() +":"+endpoint.getUserPort()); } }
/** * Create a pause injection on a specific bit. Note this format is not directly accepted by the injection * mechanism. Use the {@link Controls} to build exceptions. */ public static String createPauseOnNode(final Class<?> siteClass, final String desc, final int nSkip, final NodeEndpoint endpoint) { return "{ \"type\" : \"pause\"," + "\"siteClass\" : \"" + siteClass.getName() + "\"," + "\"desc\" : \"" + desc + "\"," + "\"nSkip\" : " + nSkip + "," + "\"address\":\"" + endpoint.getAddress() + "\"," + "\"port\":\"" + endpoint.getUserPort() + "\"}"; }
public String getJDBCConnectionString() { NodeEndpoint endpoint = node.getContext().getEndpoint(); return String.format("jdbc:dremio:direct=localhost:%d", endpoint.getUserPort()); }
@Override public void failed(final RpcException ex) { final String endpointIdentity = endpoint != null ? endpoint.getAddress() + ":" + endpoint.getUserPort() : "<null>"; logger.error("Failure while attempting to {} fragment {} on endpoint {} with {}.", signal, QueryIdHelper.getQueryIdentifier(value), endpointIdentity, ex); }