protected Transaction newTransaction() { return new HiveTransaction(transactionManager, metadataFactory.get()); }
locationService = new HiveLocationService(hdfsEnvironment); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); metadataFactory = new HiveMetadataFactory( config, metastoreClient,
protected ConnectorMetadata newMetadata() { return metadataFactory.create(); }
locationService = new HiveLocationService(hdfsEnvironment); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); metadataFactory = new HiveMetadataFactory( metastoreClient, hdfsEnvironment,
@Override public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel, boolean readOnly) { checkConnectorSupports(READ_UNCOMMITTED, isolationLevel); ConnectorTransactionHandle transaction = new HiveTransactionHandle(); try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { transactions.put(transaction, metadataFactory.create()); } return transaction; }
TypeRegistry typeManager = new TypeRegistry(); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); metadataFactory = new HiveMetadataFactory( connectorId, hiveClientConfig,
@Test public void testGetRecordsS3() throws Exception { HiveTransactionHandle transaction = new HiveTransactionHandle(); HiveMetadata metadata = metadataFactory.create(); ConnectorTableHandle table = getTableHandle(metadata, tableS3); List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(SESSION, table).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); List<ConnectorTableLayoutResult> tableLayoutResults = metadata.getTableLayouts(SESSION, table, new Constraint<>(TupleDomain.all(), bindings -> true), Optional.empty()); HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) getOnlyElement(tableLayoutResults).getTableLayout().getHandle(); assertEquals(layoutHandle.getPartitions().get().size(), 1); ConnectorSplitSource splitSource = splitManager.getSplits(transaction, SESSION, layoutHandle); long sum = 0; for (ConnectorSplit split : getAllSplits(splitSource)) { try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction, SESSION, split, columnHandles)) { MaterializedResult result = materializeSourceDataStream(SESSION, pageSource, getTypes(columnHandles)); for (MaterializedRow row : result) { sum += (Long) row.getField(columnIndex.get("t_bigint")); } } } assertEquals(sum, 78300); }
protected Transaction newTransaction() { return new HiveTransaction(transactionManager, metadataFactory.get()); }
locationService = new HiveLocationService(hdfsEnvironment); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); metadataFactory = new HiveMetadataFactory( config, metastoreClient,
HiveMetadata metadata = metadataFactory.create();
protected Transaction newTransaction() { return new HiveTransaction(transactionManager, metadataFactory.get()); }
TypeManager typeManager = new TypeRegistry(); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); metadataFactory = new HiveMetadataFactory( connectorId, metastoreClient,
protected Transaction newTransaction() { return new HiveTransaction(transactionManager, metadataFactory.get()); }
locationService = new HiveLocationService(hdfsEnvironment); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); metadataFactory = new HiveMetadataFactory( metastoreClient, hdfsEnvironment,