this.client = new DrillClient(dConfig, set.getCoordinator()); } else if(config.isDirect()) { final DrillConfig dConfig = DrillConfig.forClient(); this.allocator = RootAllocatorFactory.newRoot(dConfig); this.client = new DrillClient(dConfig, true); // Get a direct connection connect = config.getZookeeperConnectionString(); } else { this.client = new DrillClient(); connect = config.getZookeeperConnectionString(); this.client.setClientName("Apache Drill JDBC Driver"); this.client.connect(connect, info); } catch (OutOfMemoryException e) { throw new SQLNonTransientConnectionException("Failure creating root allocator", e);
@AfterClass public static void closeMyClient() throws IOException { if (client != null) { client.close(); } client = parent_client; }
DrillRpcFuture<CreatePreparedStatementResp> respFuture = connection.getClient().createPreparedStatement(signature.sql);
@BeforeClass public static void openMyClient() throws Exception { parent_client = client; client = new DrillClient(config, serviceSet.getCoordinator()); client.setSupportComplexTypes(false); client.connect(); }
/** * * @param statement * @param signature * @throws SQLException */ DrillCursor(DrillConnectionImpl connection, AvaticaStatement statement, Signature signature) throws SQLException { this.connection = connection; this.statement = statement; this.signature = signature; DrillClient client = connection.getClient(); final int batchQueueThrottlingThreshold = client.getConfig().getInt( ExecConstants.JDBC_BATCH_QUEUE_THROTTLING_THRESHOLD ); resultsListener = new ResultsListener(this, batchQueueThrottlingThreshold); currentBatchHolder = new RecordBatchLoader(client.getAllocator()); setTimeout(this.statement.getQueryTimeout()); }
connection.getClient().executePreparedStatement(preparedStatement.getServerHandle(), resultsListener); connection.getClient().runQuery(QueryType.SQL, signature.sql, resultsListener);
drillbits[i].run(); client = new DrillClient(config, serviceSet.getCoordinator()); } else { ZKClusterCoordinator clusterCoordinator = new ZKClusterCoordinator(config, zkQuorum); clusterCoordinator.start(10000); client = new DrillClient(config, clusterCoordinator); client.connect(); } finally { if (client != null) { client.close();
/** * Start's a connection from client to server * @param props - not null {@link Properties} filled with connection url parameters * @throws RpcException */ public void connect(Properties props) throws RpcException { connect(null, props); }
endpoints.addAll(parseAndVerifyEndpoints(properties.getProperty(DrillProperties.DRILLBIT_CONNECTION), config.getString(ExecConstants.INITIAL_USER_PORT))); } else { eventLoopGroup = createEventLoop(config.getInt(ExecConstants.CLIENT_RPC_THREADS), "Client-"); executor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), connect(endpoint); connected = true; logger.info("Successfully connected to server {}:{}", endpoint.getAddress(), endpoint.getUserPort());
for (String query : queries) { AwaitableUserResultsListener listener = new AwaitableUserResultsListener(new LoggingResultsListener(client.getConfig(), outputFormat, width)); watch.start(); client.runQuery(queryType, query, listener); int rows = listener.await(); System.out.println(String.format("%d record%s selected (%f seconds)", rows, rows > 1 ? "s" : "", (float) watch.elapsed(TimeUnit.MILLISECONDS) / (float) 1000));
@Override public void setSchema(String schema) throws SQLException { checkOpen(); try { client.runQuery(UserBitShared.QueryType.SQL, String.format("use %s", schema)); } catch (RpcException e) { throw new SQLException("Error when setting schema", e); } }
synchronized void cleanup() { if (resultsListener.getQueryId() != null && ! resultsListener.completed) { connection.getClient().cancelQuery(resultsListener.getQueryId()); } resultsListener.close(); currentBatchHolder.clear(); }
/** * Connects the client to a Drillbit server * * @throws RpcException */ public void connect() throws RpcException { connect(null, new Properties()); }
/** * * @param statement * @param signature * @throws SQLException */ DrillCursor(DrillConnectionImpl connection, AvaticaStatement statement, Signature signature) throws SQLException { this.connection = connection; this.statement = statement; this.signature = signature; DrillClient client = connection.getClient(); final int batchQueueThrottlingThreshold = client.getConfig().getInt( ExecConstants.JDBC_BATCH_QUEUE_THROTTLING_THRESHOLD ); resultsListener = new ResultsListener(this, batchQueueThrottlingThreshold); currentBatchHolder = new RecordBatchLoader(client.getAllocator()); setTimeout(this.statement.getQueryTimeout()); }
connection.getClient().executePreparedStatement(preparedStatement.getServerHandle(), resultsListener); connection.getClient().runQuery(QueryType.SQL, signature.sql, resultsListener);
synchronized void cleanup() { if (resultsListener.getQueryId() != null && ! resultsListener.completed) { connection.getClient().cancelQuery(resultsListener.getQueryId()); } resultsListener.close(); currentBatchHolder.clear(); }
this.client = new DrillClient(dConfig, set.getCoordinator()); } else if(config.isDirect()) { final DrillConfig dConfig = DrillConfig.forClient(); this.allocator = RootAllocatorFactory.newRoot(dConfig); this.client = new DrillClient(dConfig, true); // Get a direct connection connect = config.getZookeeperConnectionString(); } else { this.client = new DrillClient(); connect = config.getZookeeperConnectionString(); this.client.setClientName("Apache Drill JDBC Driver"); this.client.connect(connect, info); } catch (OutOfMemoryException e) { throw new SQLNonTransientConnectionException("Failure creating root allocator", e);
public synchronized boolean reconnect() { if (client.isActive()) { return true; } int retry = reconnectTimes; while (retry > 0) { retry--; try { Thread.sleep(this.reconnectDelay); // Gets the drillbit endpoints that are ONLINE and excludes the drillbits that are // in QUIESCENT state. This avoids the clients connecting to drillbits that are // shutting down thereby reducing the chances of query failures. final ArrayList<DrillbitEndpoint> endpoints = new ArrayList<>(clusterCoordinator.getOnlineEndPoints()); if (endpoints.isEmpty()) { continue; } client.close(); Collections.shuffle(endpoints); connect(endpoints.iterator().next()); return true; } catch (Exception e) { } } return false; }
@Test public void testStringColumnsMetadata() throws Exception { String query = "select varchar_field, char_field, string_field from hive.readtest"; Map<String, Integer> expectedResult = new HashMap<>(); expectedResult.put("varchar_field", 50); expectedResult.put("char_field", 10); expectedResult.put("string_field", HiveVarchar.MAX_VARCHAR_LENGTH); verifyColumnsMetadata(client.createPreparedStatement(query).get() .getPreparedStatement().getColumnsList(), expectedResult); try { test("alter session set `%s` = true", ExecConstants.EARLY_LIMIT0_OPT_KEY); verifyColumnsMetadata(client.createPreparedStatement(String.format("select * from (%s) t limit 0", query)).get() .getPreparedStatement().getColumnsList(), expectedResult); } finally { test("alter session reset `%s`", ExecConstants.EARLY_LIMIT0_OPT_KEY); } }
DrillRpcFuture<CreatePreparedStatementResp> respFuture = connection.getClient().createPreparedStatement(signature.sql);