static DescriptiveStatistics benchmarkListManyPartitions(@NotNull MicroBenchmark bench, @NotNull BenchData data, int howMany) { final HMSClient client = data.getClient(); String dbName = data.dbName; String tableName = data.tableName; createPartitionedTable(client, dbName, tableName); try { addManyPartitions(client, dbName, tableName, null, Collections.singletonList("d"), howMany); LOG.debug("Created {} partitions", howMany); LOG.debug("started benchmark... "); return bench.measure(() -> throwingSupplierWrapper(() -> client.listPartitions(dbName, tableName))); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); } }
static DescriptiveStatistics benchmarkGetPartitions(@NotNull MicroBenchmark bench, @NotNull BenchData data, int howMany) { final HMSClient client = data.getClient(); String dbName = data.dbName; String tableName = data.tableName; createPartitionedTable(client, dbName, tableName); try { addManyPartitions(client, dbName, tableName, null, Collections.singletonList("d"), howMany); LOG.debug("Created {} partitions", howMany); LOG.debug("started benchmark... "); return bench.measure(() -> throwingSupplierWrapper(() -> client.getPartitions(dbName, tableName))); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); } }
public String toBinaryString() throws IOException { org.apache.hadoop.hive.ql.plan.api.Query q = getQueryPlan(); TMemoryBuffer tmb = new TMemoryBuffer(q.toString().length() * 5); TBinaryProtocol oprot = new TBinaryProtocol(tmb); try { q.write(oprot); } catch (TException e) { // TODO Auto-generated catch block e.printStackTrace(); return q.toString(); } byte[] buf = new byte[tmb.length()]; tmb.read(buf, 0, tmb.length()); return new String(buf); // return getQueryPlan().toString(); }
public String toBinaryString() throws IOException { org.apache.hadoop.hive.ql.plan.api.Query q = getQueryPlan(); TMemoryBuffer tmb = new TMemoryBuffer(q.toString().length() * 5); TBinaryProtocol oprot = new TBinaryProtocol(tmb); try { q.write(oprot); } catch (TException e) { // TODO Auto-generated catch block e.printStackTrace(); return q.toString(); } byte[] buf = new byte[tmb.length()]; tmb.read(buf, 0, tmb.length()); return new String(buf); // return getQueryPlan().toString(); }
public String toThriftJSONString() throws IOException { org.apache.hadoop.hive.ql.plan.api.Query q = getQueryPlan(); TMemoryBuffer tmb = new TMemoryBuffer(q.toString().length() * 5); TJSONProtocol oprot = new TJSONProtocol(tmb); try { q.write(oprot); } catch (TException e) { // TODO Auto-generated catch block e.printStackTrace(); return q.toString(); } return tmb.toString("UTF-8"); }
static DescriptiveStatistics benchmarkListPartition(@NotNull MicroBenchmark bench, @NotNull BenchData data) { final HMSClient client = data.getClient(); String dbName = data.dbName; String tableName = data.tableName; createPartitionedTable(client, dbName, tableName); try { addManyPartitions(client, dbName, tableName, null, Collections.singletonList("d"), 1); return bench.measure(() -> throwingSupplierWrapper(() -> client.listPartitions(dbName, tableName))); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); } }
public String toThriftJSONString() throws IOException { org.apache.hadoop.hive.ql.plan.api.Query q = getQueryPlan(); TMemoryBuffer tmb = new TMemoryBuffer(q.toString().length() * 5); TJSONProtocol oprot = new TJSONProtocol(tmb); try { q.write(oprot); } catch (TException e) { // TODO Auto-generated catch block e.printStackTrace(); return q.toString(); } return tmb.toString("UTF-8"); }
public TestMetaStoreServerUtils() { try { db = new DatabaseBuilder().setName(DB_NAME).build(null); } catch (TException e) { e.printStackTrace(); } }
static DescriptiveStatistics benchmarkCreatePartition(@NotNull MicroBenchmark bench, @NotNull BenchData data) { final HMSClient client = data.getClient(); String dbName = data.dbName; String tableName = data.tableName; createPartitionedTable(client, dbName, tableName); final List<String> values = Collections.singletonList("d1"); try { Table t = client.getTable(dbName, tableName); Partition partition = new Util.PartitionBuilder(t) .withValues(values) .build(); return bench.measure(null, () -> throwingSupplierWrapper(() -> client.addPartition(partition)), () -> throwingSupplierWrapper(() -> client.dropPartition(dbName, tableName, values))); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); } }
static DescriptiveStatistics benchmarkDropPartition(@NotNull MicroBenchmark bench, @NotNull BenchData data) { final HMSClient client = data.getClient(); String dbName = data.dbName; String tableName = data.tableName; createPartitionedTable(client, dbName, tableName); final List<String> values = Collections.singletonList("d1"); try { Table t = client.getTable(dbName, tableName); Partition partition = new Util.PartitionBuilder(t) .withValues(values) .build(); return bench.measure( () -> throwingSupplierWrapper(() -> client.addPartition(partition)), () -> throwingSupplierWrapper(() -> client.dropPartition(dbName, tableName, values)), null); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); } }
} catch (TException e) { System.err.println("Error on reading last transformed record"); e.printStackTrace(System.err); srcDlsn = DLSN.InitialDLSN;
static DescriptiveStatistics benchmarkRenameTable(@NotNull MicroBenchmark bench, @NotNull BenchData data, int count) { final HMSClient client = data.getClient(); String dbName = data.dbName; String tableName = data.tableName; createPartitionedTable(client, dbName, tableName); try { addManyPartitionsNoException(client, dbName, tableName, null, Collections.singletonList("d"), count); Table oldTable = client.getTable(dbName, tableName); oldTable.getSd().setLocation(""); Table newTable = oldTable.deepCopy(); newTable.setTableName(tableName + "_renamed"); return bench.measure( () -> { // Measuring 2 renames, so the tests are idempotent throwingSupplierWrapper(() -> client.alterTable(oldTable.getDbName(), oldTable.getTableName(), newTable)); throwingSupplierWrapper(() -> client.alterTable(newTable.getDbName(), newTable.getTableName(), oldTable)); } ); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); } }
@Test public void testGetConfigValue() { String val = "value"; if (!isThriftClient) { try { assertEquals(client.getConfigValue("hive.key1", val), "value1"); assertEquals(client.getConfigValue("hive.key2", val), "http://www.example.com"); assertEquals(client.getConfigValue("hive.key3", val), ""); assertEquals(client.getConfigValue("hive.key4", val), "0"); assertEquals(client.getConfigValue("hive.key5", val), val); assertEquals(client.getConfigValue(null, val), val); } catch (TException e) { e.printStackTrace(); fail(); } } boolean threwException = false; try { // Attempting to get the password should throw an exception client.getConfigValue("javax.jdo.option.ConnectionPassword", "password"); } catch (ConfigValSecurityException e) { threwException = true; } catch (TException e) { e.printStackTrace(); fail(); } assert (threwException); }
public static void main(String[] args) { TTransport transport; try { transport = new TSocket("localhost", 2000); TProtocol protocol = new TBinaryProtocol(transport); Suggestor.Client client = new Suggestor.Client(protocol); transport.open(); List<String> suggestions = client.complete(args[0], "text"); transport.close(); System.out.println(suggestions); } catch (TTransportException e) { e.printStackTrace(); } catch (TException e) { e.printStackTrace(); } }
e.printStackTrace(); } catch (TException e) { e.printStackTrace();
public void registerNonXSEDEHosts() { try { System.out.println("\n #### Registering Non-XSEDE Computational Resources #### \n"); //Register LSF resource List<BatchQueue> lsfQueues = new ArrayList<BatchQueue>(); lsfResourceId = registerComputeHost("ghpcc06.umassrc.org", "LSF Cluster", ResourceJobManagerType.LSF, "push", "source /etc/bashrc;/lsf/9.1/linux2.6-glibc2.3-x86_64/bin", SecurityProtocol.SSH_KEYS, 22, "mpiexec", lsfQueues); System.out.println("LSF Resource Id is " + lsfResourceId); } catch (TException e) { e.printStackTrace(); } }
public void registerLocalApps (){ try { System.out.println("#### Registering Application Deployments on Localhost #### \n"); //Register Echo String echoAppDeployId = airavataClient.registerApplicationDeployment(new AuthzToken(""), DEFAULT_GATEWAY, RegisterSampleApplicationsUtils.createApplicationDeployment(echoModuleId, localhostId, "/bin/echo", ApplicationParallelismType.SERIAL, echoDescription, null, null, null)); System.out.println("Echo on localhost Id " + echoAppDeployId); }catch (TException e) { e.printStackTrace(); } }
public void close() { try { super.close(); if (remoteMode) { client.clean(); transport.close(); } } catch (IOException ioe) { ioe.printStackTrace(); } catch (TException e) { e.printStackTrace(); } }
public String toThriftJSONString() throws IOException { org.apache.hadoop.hive.ql.plan.api.Query q = getQueryPlan(); TMemoryBuffer tmb = new TMemoryBuffer(q.toString().length() * 5); TJSONProtocol oprot = new TJSONProtocol(tmb); try { q.write(oprot); } catch (TException e) { // TODO Auto-generated catch block e.printStackTrace(); return q.toString(); } return tmb.toString("UTF-8"); }
public static void createErrorTMessage(TProtocol oprot, String methodName, int id, String errMsg) throws Exception { TMessage tmessage = new TMessage(methodName, TMessageType.EXCEPTION, id); oprot.writeMessageBegin(tmessage); oprot.writeMessageEnd(); TApplicationException ex = new TApplicationException(TApplicationException.INTERNAL_ERROR, errMsg); try { ex.write(oprot); } catch (TException e) { e.printStackTrace(); } } }