void noMoreSplits() { if (setIf(stateReference, State.noMoreSplits(), state -> state.getKind() == INITIAL)) { // Stop the split loader before finishing the queue. // Once the queue is finished, it will always return a completed future to avoid blocking any caller. // This could lead to a short period of busy loop in splitLoader (although unlikely in general setup). splitLoader.stop(); queues.finish(); } }
@Override public void close() { if (setIf(stateReference, State.closed(), state -> state.getKind() == INITIAL || state.getKind() == NO_MORE_SPLITS)) { // Stop the split loader before finishing the queue. // Once the queue is finished, it will always return a completed future to avoid blocking any caller. // This could lead to a short period of busy loop in splitLoader (although unlikely in general setup). splitLoader.stop(); queues.finish(); } }
public void validateColumns(List<HiveColumnHandle> handles) { if (this == AVRO) { for (HiveColumnHandle handle : handles) { if (!handle.isPartitionKey()) { validateAvroType(handle.getHiveType().getTypeInfo(), handle.getName()); } } } }
@Override public void renameColumn(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle source, String target) { HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; failIfAvroSchemaIsSet(hiveTableHandle); HiveColumnHandle sourceHandle = (HiveColumnHandle) source; metastore.renameColumn(hiveTableHandle.getSchemaName(), hiveTableHandle.getTableName(), sourceHandle.getName(), target); }
protected Path getStagingPathRoot(ConnectorInsertTableHandle insertTableHandle) { HiveInsertTableHandle handle = (HiveInsertTableHandle) insertTableHandle; WriteInfo writeInfo = getLocationService().getQueryWriteInfo(handle.getLocationHandle()); if (writeInfo.getWriteMode() != STAGE_AND_MOVE_TO_TARGET_DIRECTORY) { throw new AssertionError("writeMode is not STAGE_AND_MOVE_TO_TARGET_DIRECTORY"); } return writeInfo.getWritePath(); }
private ConnectorSession sampleSize(int sampleSize) { HiveSessionProperties properties = new HiveSessionProperties( getHiveClientConfig().setPartitionStatisticsSampleSize(sampleSize), new OrcFileWriterConfig(), new ParquetFileWriterConfig()); return new TestingConnectorSession(properties.getSessionProperties()); }
protected Path getStagingPathRoot(ConnectorOutputTableHandle outputTableHandle) { HiveOutputTableHandle handle = (HiveOutputTableHandle) outputTableHandle; return getLocationService() .getQueryWriteInfo(handle.getLocationHandle()) .getWritePath(); }
protected Path getTargetPathRoot(ConnectorInsertTableHandle insertTableHandle) { HiveInsertTableHandle hiveInsertTableHandle = (HiveInsertTableHandle) insertTableHandle; return getLocationService() .getQueryWriteInfo(hiveInsertTableHandle.getLocationHandle()) .getTargetPath(); }
@Override public WriteInfo getQueryWriteInfo(LocationHandle locationHandle) { return new WriteInfo(locationHandle.getTargetPath(), locationHandle.getWritePath(), locationHandle.getWriteMode()); }
public static OrcFileWriterFactory getDefaultOrcFileWriterFactory(HiveClientConfig hiveClientConfig) { HdfsEnvironment testHdfsEnvironment = createTestHdfsEnvironment(hiveClientConfig); return new OrcFileWriterFactory( testHdfsEnvironment, TYPE_MANAGER, new NodeVersion("test_version"), hiveClientConfig, new FileFormatDataSourceStats(), new OrcFileWriterConfig()); }
public static Optional<DecimalType> getDecimalType(HiveType hiveType) { return getDecimalType(hiveType.getHiveTypeName().toString()); }
void fail(Throwable e) { // The error must be recorded before setting the finish marker to make sure // isFinished will observe failure instead of successful completion. // Only record the first error message. if (setIf(stateReference, State.failed(e), state -> state.getKind() == INITIAL)) { // Stop the split loader before finishing the queue. // Once the queue is finished, it will always return a completed future to avoid blocking any caller. // This could lead to a short period of busy loop in splitLoader (although unlikely in general setup). splitLoader.stop(); queues.finish(); } }
@SuppressWarnings("unused") public TestHiveIntegrationSmokeTest() { this(() -> createQueryRunner(ORDERS, CUSTOMER), createBucketedSession(), HIVE_CATALOG, new HiveTypeTranslator()); }
private FileFormatAssertion assertThatFileFormat(HiveStorageFormat hiveStorageFormat) { return new FileFormatAssertion(hiveStorageFormat.name()) .withStorageFormat(hiveStorageFormat); }
@Override public void rollback() { for (TempFile file : tempFiles) { cleanupFile(file.getPath()); } outputWriter.rollback(); }
private List<HivePartition> getPartitionsAsList(ConnectorTableHandle tableHandle, Constraint<ColumnHandle> constraint) { HivePartitionResult partitions = partitionManager.getPartitions(metastore, tableHandle, constraint); return getPartitionsAsList(partitions); }
private boolean shouldUseTemporaryDirectory(ConnectorSession session, HdfsContext context, Path path) { return isTemporaryStagingDirectoryEnabled(session) // skip using temporary directory for S3 && !isS3FileSystem(context, hdfsEnvironment, path); }
@Override public BucketFunction getBucketFunction( ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle, List<Type> partitionChannelTypes, int bucketCount) { HivePartitioningHandle handle = (HivePartitioningHandle) partitioningHandle; List<HiveType> hiveTypes = handle.getHiveTypes(); return new HiveBucketFunction(bucketCount, hiveTypes); }
protected Transaction newTransaction() { return new HiveTransaction(transactionManager, metadataFactory.get()); }
@Override public WriteInfo getTableWriteInfo(LocationHandle locationHandle) { return new WriteInfo(locationHandle.getTargetPath(), locationHandle.getWritePath(), locationHandle.getWriteMode()); }