@Override public Object next() { if (!beforeFirst) { throw new IllegalStateException(); } beforeFirst = false; final NodeEndpoint ep = dbContext.getEndpoint(); NodeInstance i = new NodeInstance(); i.current = false; // disable current field for now. i.hostname = ep.getAddress(); try { i.ip_address = InetAddress.getLocalHost().getHostAddress(); } catch (UnknownHostException e) { // no op } i.user_port = ep.getUserPort(); i.fabric_port = ep.getFabricPort(); final WorkStats stats = dbContext.getWorkStatsProvider().get(); i.cluster_load = stats.getClusterLoad(); i.configured_max_width = (int)dbContext.getClusterResourceInformation().getAverageExecutorCores(dbContext.getOptionManager()); i.actual_max_width = (int) Math.max(1, i.configured_max_width * stats.getMaxWidthFactor()); return i; }
@BeforeClass public static void setup() { endpoints = Lists.newArrayList(); final String pattern = "node%d"; for (int i = 2; i < 32; i++) { String host = String.format(pattern, i); endpoints.add(NodeEndpoint.newBuilder().setAddress(host).build()); } }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (!(obj instanceof EndpointAffinity)) { return false; } EndpointAffinity other = (EndpointAffinity) obj; if (Double.doubleToLongBits(affinity) != Double.doubleToLongBits(other.affinity)) { return false; } if (endpoint == null) { if (other.endpoint != null) { return false; } } else if (!endpoint.equals(other.endpoint)) { return false; } return mandatory == other.mandatory; }
coordinator.start(); CoordinationProtos.NodeEndpoint nodeEndpoint1 = CoordinationProtos.NodeEndpoint.newBuilder() .setAddress("host1") .setFabricPort(1234) .build(); CoordinationProtos.NodeEndpoint nodeEndpoint2 = CoordinationProtos.NodeEndpoint.newBuilder() .setAddress("host2") .setFabricPort(1235) .build(); CoordinationProtos.NodeEndpoint nodeEndpoint3 = CoordinationProtos.NodeEndpoint.newBuilder() .setAddress("host3") .setFabricPort(1236) assertTrue(!leader.getCurrentEndPoint().equals(secondLeader.getCurrentEndPoint())); assertTrue(!leader.getCurrentEndPoint().equals(thirdLeader.getCurrentEndPoint())); assertTrue(!secondLeader.getCurrentEndPoint().equals(thirdLeader.getCurrentEndPoint()));
coordinator.start(); CoordinationProtos.NodeEndpoint nodeEndpoint1 = CoordinationProtos.NodeEndpoint.newBuilder() .setAddress("host1") .setFabricPort(1234) .build(); CoordinationProtos.NodeEndpoint nodeEndpoint2 = CoordinationProtos.NodeEndpoint.newBuilder() .setAddress("host2") .setFabricPort(1235) CoordinationProtos.NodeEndpoint taskEndPpoint = leader.getCurrentEndPoint(); while (leadEndPoint.equals(taskEndPpoint)) { Thread.sleep(100); leadEndPoint = leader.getTaskLeader();
result = result && (hasAddress() == other.hasAddress()); if (hasAddress()) { result = result && getAddress() .equals(other.getAddress()); result = result && (hasUserPort() == other.hasUserPort()); if (hasUserPort()) { result = result && (getUserPort() == other.getUserPort()); result = result && (hasFabricPort() == other.hasFabricPort()); if (hasFabricPort()) { result = result && (getFabricPort() == other.getFabricPort()); result = result && (hasRoles() == other.hasRoles()); if (hasRoles()) { result = result && getRoles() .equals(other.getRoles()); result = result && (hasStartTime() == other.hasStartTime()); if (hasStartTime()) { result = result && (getStartTime() == other.getStartTime()); result = result && (hasProvisionId() == other.hasProvisionId()); if (hasProvisionId()) { result = result && getProvisionId() .equals(other.getProvisionId());
hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasAddress()) { hash = (37 * hash) + ADDRESS_FIELD_NUMBER; hash = (53 * hash) + getAddress().hashCode(); if (hasUserPort()) { hash = (37 * hash) + USER_PORT_FIELD_NUMBER; hash = (53 * hash) + getUserPort(); if (hasFabricPort()) { hash = (37 * hash) + FABRIC_PORT_FIELD_NUMBER; hash = (53 * hash) + getFabricPort(); if (hasRoles()) { hash = (37 * hash) + ROLES_FIELD_NUMBER; hash = (53 * hash) + getRoles().hashCode(); if (hasStartTime()) { hash = (37 * hash) + STARTTIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStartTime()); if (hasProvisionId()) { hash = (37 * hash) + PROVISION_ID_FIELD_NUMBER; hash = (53 * hash) + getProvisionId().hashCode(); if (hasMaxDirectMemory()) { hash = (37 * hash) + MAX_DIRECT_MEMORY_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMaxDirectMemory());
if (hasEndpoint()) { result = result && getEndpoint() .equals(other.getEndpoint());
if (hasEndpoint()) { result = result && getEndpoint() .equals(other.getEndpoint());
public Builder mergeFrom(com.dremio.exec.proto.CoordinationProtos.NodeEndpoint other) { if (other == com.dremio.exec.proto.CoordinationProtos.NodeEndpoint.getDefaultInstance()) return this; if (other.hasAddress()) { bitField0_ |= 0x00000001; address_ = other.address_; onChanged(); if (other.hasUserPort()) { setUserPort(other.getUserPort()); if (other.hasFabricPort()) { setFabricPort(other.getFabricPort()); if (other.hasRoles()) { mergeRoles(other.getRoles()); if (other.hasStartTime()) { setStartTime(other.getStartTime()); if (other.hasProvisionId()) { bitField0_ |= 0x00000020; provisionId_ = other.provisionId_; onChanged(); if (other.hasMaxDirectMemory()) { setMaxDirectMemory(other.getMaxDirectMemory()); if (other.hasAvailableCores()) { setAvailableCores(other.getAvailableCores());
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.dremio.exec.proto.CoordExecRPC.IncomingMinorFragment)) { return super.equals(obj); } com.dremio.exec.proto.CoordExecRPC.IncomingMinorFragment other = (com.dremio.exec.proto.CoordExecRPC.IncomingMinorFragment) obj; boolean result = true; result = result && (hasMinorFragment() == other.hasMinorFragment()); if (hasMinorFragment()) { result = result && (getMinorFragment() == other.getMinorFragment()); } result = result && (hasEndpoint() == other.hasEndpoint()); if (hasEndpoint()) { result = result && getEndpoint() .equals(other.getEndpoint()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@Test public void testCancelNonRunningFragments() throws Exception{ test("alter session set \"planner.slice_target\" = 10"); // Inject an out of memory exception in the ScanBatch CoordinationProtos.NodeEndpoint endpoint = nodes[0].getContext().getEndpoint(); String controlsString = "{\"injections\":[{" + "\"address\":\"" + endpoint.getAddress() + "\"," + "\"port\":\"" + endpoint.getUserPort() + "\"," + "\"type\":\"exception\"," + "\"siteClass\":\"" + "com.dremio.exec.physical.impl.ScanBatch" + "\"," + "\"desc\":\"" + "next-allocate" + "\"," + "\"nSkip\":0," + "\"nFire\":1," + "\"exceptionClass\":\"" + OutOfMemoryException.class.getName() + "\"" + "}]}"; ControlsInjectionUtil.setControls(client, controlsString); String query = getFile("queries/tpch/04.sql"); try { test(query); fail("The query should have failed!!!"); } catch(UserException uex) { // The query should fail } } }
private void testSingleMode(String fileName) throws Exception{ test(SINGLE_MODE); CoordinationProtos.NodeEndpoint endpoint = nodes[0].getContext().getEndpoint(); String controlsString = "{\"injections\":[{" + "\"address\":\"" + endpoint.getAddress() + "\"," + "\"port\":\"" + endpoint.getUserPort() + "\"," + "\"type\":\"exception\"," + "\"siteClass\":\"" + "com.dremio.exec.physical.impl.ScanBatch" + "\"," + "\"desc\":\"" + "next-allocate" + "\"," + "\"nSkip\":0," + "\"nFire\":1," + "\"exceptionClass\":\"" + OutOfMemoryException.class.getName() + "\"" + "}]}"; ControlsInjectionUtil.setControls(client, controlsString); String query = getFile(fileName); try { test(query); } catch(UserException uex) { DremioPBError error = uex.getOrCreatePBError(false); Assert.assertEquals(DremioPBError.ErrorType.RESOURCE, error.getErrorType()); Assert.assertTrue("Error message isn't related to memory error", uex.getMessage().contains(UserException.MEMORY_ERROR_MSG)); } }
private void writeNativeDrillConnection(XMLStreamWriter xmlStreamWriter, DatasetConfig datasetConfig, String hostname) throws XMLStreamException { DatasetPath dataset = new DatasetPath(datasetConfig.getFullPathList()); xmlStreamWriter.writeStartElement("connection"); xmlStreamWriter.writeAttribute("class", "drill"); xmlStreamWriter.writeAttribute("connection-type", "Direct"); xmlStreamWriter.writeAttribute("authentication", "Basic Authentication"); xmlStreamWriter.writeAttribute("odbc-connect-string-extras", ""); // It has to match what is returned by the driver/Tableau xmlStreamWriter.writeAttribute("schema", dataset.toParentPath()); xmlStreamWriter.writeAttribute("port", String.valueOf(endpoint.getUserPort())); xmlStreamWriter.writeAttribute("server", hostname); writeRelation(xmlStreamWriter, datasetConfig); if (customizationEnabled) { writeConnectionCustomization(xmlStreamWriter); } /* DX-7447 - When using the drill driver, Tableau will show an misleading error message because it could not connect to Dremio since it doesn't have the username/password (and doesn't prompt for them first). To work around this, we generate metadata for the Dataset and add it to the .tds file. This prefills the schema in Tableau and when the user starts working with the schema it will prompt the user to authenticate with Dremio. */ writeTableauMetadata(xmlStreamWriter, datasetConfig); xmlStreamWriter.writeEndElement(); }
private void writeConnection(XMLStreamWriter xmlStreamWriter, DatasetConfig datasetConfig, String hostname) throws XMLStreamException { DatasetPath dataset = new DatasetPath(datasetConfig.getFullPathList()); xmlStreamWriter.writeStartElement("connection"); xmlStreamWriter.writeAttribute("class", "genericodbc"); xmlStreamWriter.writeAttribute("dbname", InfoSchemaConstants.IS_CATALOG_NAME); xmlStreamWriter.writeAttribute("odbc-connect-string-extras", format("AUTHENTICATIONTYPE=Basic Authentication;CONNECTIONTYPE=Direct;HOST=%s", hostname)); // It has to match what is returned by the driver/Tableau xmlStreamWriter.writeAttribute("odbc-dbms-name", "Dremio"); xmlStreamWriter.writeAttribute("odbc-driver", "Dremio Connector"); xmlStreamWriter.writeAttribute("odbc-dsn", ""); xmlStreamWriter.writeAttribute("odbc-suppress-connection-pooling", ""); xmlStreamWriter.writeAttribute("odbc-use-connection-pooling", ""); xmlStreamWriter.writeAttribute("schema", dataset.toParentPath()); xmlStreamWriter.writeAttribute("port", String.valueOf(endpoint.getUserPort())); xmlStreamWriter.writeAttribute("server", ""); xmlStreamWriter.writeAttribute("username", ""); writeRelation(xmlStreamWriter, datasetConfig); if (customizationEnabled) { writeConnectionCustomization(xmlStreamWriter); } xmlStreamWriter.writeEndElement(); }
/** * generate a context message * @return string containing all context information concatenated */ String generateContextMessage(boolean includeErrorIdAndIdentity) { StringBuilder sb = new StringBuilder(); for (String context : contextList) { sb.append(context).append("\n"); } if (includeErrorIdAndIdentity) { // add identification infos sb.append("\n[Error Id: "); sb.append(errorId).append(" "); if (endpoint != null) { sb.append("on ") .append(endpoint.getAddress()) .append(":") .append(endpoint.getUserPort()); } sb.append("]"); } return sb.toString(); }
private void setupExecution() throws Exception{ logger.debug("Starting fragment {}:{} on {}:{}", fragment.getHandle().getMajorFragmentId(), fragment.getHandle().getMinorFragmentId(), fragment.getAssignment().getAddress(), fragment.getAssignment().getUserPort()); outputAllocator = ticket.newChildAllocator("output-frag:" + QueryIdHelper.getFragmentId(fragment.getHandle()), fragmentOptions.getOption(ExecConstants.OUTPUT_ALLOCATOR_RESERVATION), Long.MAX_VALUE); contextCreator.setFragmentOutputAllocator(outputAllocator); final PhysicalOperator rootOperator = reader.readFragmentOperator(fragment.getFragmentJson(), fragment.getFragmentCodec(), SharedDataMap.create(sharedData, fragment.getHandle())); final OperatorCreator operatorCreator = new UserDelegatingOperatorCreator(contextInfo.getQueryUser(), opCreator); pipeline = PipelineCreator.get( new FragmentExecutionContext(fragment.getForeman(), updater, sources, cancelled), buffers, operatorCreator, contextCreator, functionLookupContext, rootOperator, tunnelProvider, new SharedResourcesContextImpl(sharedResources) ); pipeline.setup(); clusterCoordinator.getServiceSet(ClusterCoordinator.Role.COORDINATOR).addNodeStatusListener(crashListener); transitionToRunning(); isSetup = true; }
@Override public void nodesRegistered(Set<CoordinationProtos.NodeEndpoint> registeredNodes) { Iterator<CoordinationProtos.NodeEndpoint> iterator = registeredNodes.iterator(); if (!iterator.hasNext()) { logger.warn("Received empty node registration"); return; } CoordinationProtos.NodeEndpoint endpoint = iterator.next(); synchronized(taskLeaderLock) { if (taskLeaderNode != null && !taskLeaderNode.equals(endpoint)) { logger.info("TaskLeader node for {} changed. Previous was {}:{}, new is {}:{}", taskName, taskLeaderNode.getAddress(), taskLeaderNode.getFabricPort(), endpoint.getAddress(), endpoint.getFabricPort()); } else { logger.info("New TaskLeader node for {} {}:{} registered itself.", taskName, endpoint.getAddress(), endpoint.getFabricPort()); } taskLeaderNode = endpoint; taskLeaderUp = true; taskLeaderLock.notifyAll(); } }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.dremio.exec.proto.CoordExecRPC.FragmentAssignment)) { return super.equals(obj); } com.dremio.exec.proto.CoordExecRPC.FragmentAssignment other = (com.dremio.exec.proto.CoordExecRPC.FragmentAssignment) obj; boolean result = true; result = result && (hasAssignment() == other.hasAssignment()); if (hasAssignment()) { result = result && getAssignment() .equals(other.getAssignment()); } result = result && getMinorFragmentIdList() .equals(other.getMinorFragmentIdList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }