public HdfsContext(ConnectorSession session, String schemaName, String tableName) { requireNonNull(session, "session is null"); requireNonNull(schemaName, "schemaName is null"); requireNonNull(tableName, "tableName is null"); this.identity = requireNonNull(session.getIdentity(), "session.getIdentity() is null"); this.source = requireNonNull(session.getSource(), "session.getSource()"); this.queryId = Optional.of(session.getQueryId()); this.schemaName = Optional.of(schemaName); this.tableName = Optional.of(tableName); }
public HdfsContext(ConnectorSession session, String schemaName) { requireNonNull(session, "session is null"); requireNonNull(schemaName, "schemaName is null"); this.identity = requireNonNull(session.getIdentity(), "session.getIdentity() is null"); this.source = requireNonNull(session.getSource(), "session.getSource()"); this.queryId = Optional.of(session.getQueryId()); this.schemaName = Optional.of(schemaName); this.tableName = Optional.empty(); }
private HiveSplitSource( ConnectorSession session, String databaseName, String tableName, TupleDomain<? extends ColumnHandle> compactEffectivePredicate, PerBucket queues, int maxInitialSplits, DataSize maxOutstandingSplitsSize, HiveSplitLoader splitLoader, AtomicReference<State> stateReference, CounterStat highMemorySplitSourceCounter) { requireNonNull(session, "session is null"); this.queryId = session.getQueryId(); this.databaseName = requireNonNull(databaseName, "databaseName is null"); this.tableName = requireNonNull(tableName, "tableName is null"); this.compactEffectivePredicate = requireNonNull(compactEffectivePredicate, "compactEffectivePredicate is null"); this.queues = requireNonNull(queues, "queues is null"); this.maxOutstandingSplitsBytes = toIntExact(maxOutstandingSplitsSize.toBytes()); this.splitLoader = requireNonNull(splitLoader, "splitLoader is null"); this.stateReference = requireNonNull(stateReference, "stateReference is null"); this.highMemorySplitSourceCounter = requireNonNull(highMemorySplitSourceCounter, "highMemorySplitSourceCounter is null"); this.maxSplitSize = getMaxSplitSize(session); this.maxInitialSplitSize = getMaxInitialSplitSize(session); this.remainingInitialSplits = new AtomicInteger(maxInitialSplits); }
@Override public Optional<ConnectorOutputMetadata> finishInsert(ConnectorSession session, ConnectorInsertTableHandle insertHandle, Collection<Slice> fragments, Collection<ComputedStatistics> computedStatistics) { RaptorInsertTableHandle handle = (RaptorInsertTableHandle) insertHandle; long transactionId = handle.getTransactionId(); long tableId = handle.getTableId(); Optional<String> externalBatchId = handle.getExternalBatchId(); List<ColumnInfo> columns = handle.getColumnHandles().stream().map(ColumnInfo::fromHandle).collect(toList()); long updateTime = session.getStartTime(); Collection<ShardInfo> shards = parseFragments(fragments); log.info("Committing insert into tableId %s (queryId: %s, shards: %s, columns: %s)", handle.getTableId(), session.getQueryId(), shards.size(), columns.size()); shardManager.commitShards(transactionId, tableId, columns, shards, externalBatchId, updateTime); clearRollback(); return Optional.empty(); }
ImmutableMap.<String, String>builder() .put(HiveMetadata.PRESTO_VERSION_NAME, nodeVersion.toString()) .put(HiveMetadata.PRESTO_QUERY_ID_NAME, session.getQueryId()) .build(), validationInputFactory));
tableName.getTableName(), handles, session.getQueryId(), metastore.generatePageSinkMetadata(tableName), locationHandle,
.put(PRESTO_VIEW_FLAG, "true") .put(PRESTO_VERSION_NAME, prestoVersion) .put(PRESTO_QUERY_ID_NAME, session.getQueryId()) .build();
private Partition buildPartitionObject(ConnectorSession session, Table table, PartitionUpdate partitionUpdate) { return Partition.builder() .setDatabaseName(table.getDatabaseName()) .setTableName(table.getTableName()) .setColumns(table.getDataColumns()) .setValues(extractPartitionValues(partitionUpdate.getName())) .setParameters(ImmutableMap.<String, String>builder() .put(PRESTO_VERSION_NAME, prestoVersion) .put(PRESTO_QUERY_ID_NAME, session.getQueryId()) .build()) .withStorage(storage -> storage .setStorageFormat(isRespectTableFormat(session) ? table.getStorage().getStorageFormat() : fromHiveStorageFormat(HiveSessionProperties.getHiveStorageFormat(session))) .setLocation(partitionUpdate.getTargetPath().toString()) .setBucketProperty(table.getStorage().getBucketProperty()) .setSerdeParameters(table.getStorage().getSerdeParameters())) .build(); }
session.getQueryId(), schemaName, tableName,
tableName, columnHandles, session.getQueryId(), metastore.generatePageSinkMetadata(schemaTableName), locationHandle,
ConnectorSession session = newSession(); ConnectorMetadata metadata = transaction.getMetadata(); queryId = session.getQueryId();
ConnectorSession session = newSession(); ConnectorMetadata metadata = transaction.getMetadata(); queryId = session.getQueryId();
public static Session toSession(ConnectorTransactionHandle transactionHandle, ConnectorSession session) { TransactionId transactionId = ((GlobalSystemTransactionHandle) transactionHandle).getTransactionId(); ConnectorIdentity connectorIdentity = session.getIdentity(); Identity identity = new Identity(connectorIdentity.getUser(), connectorIdentity.getPrincipal()); return Session.builder(new SessionPropertyManager(SYSTEM_SESSION_PROPERTIES)) .setQueryId(new QueryId(session.getQueryId())) .setTransactionId(transactionId) .setCatalog("catalog") .setSchema("schema") .setPath(new SqlPath(Optional.of("path"))) .setIdentity(identity) .setTimeZoneKey(session.getTimeZoneKey()) .setLocale(session.getLocale()) .setStartTime(session.getStartTime()) .build(); } }
public static Session toSession(ConnectorTransactionHandle transactionHandle, ConnectorSession session) { TransactionId transactionId = ((GlobalSystemTransactionHandle) transactionHandle).getTransactionId(); ConnectorIdentity connectorIdentity = session.getIdentity(); Identity identity = new Identity(connectorIdentity.getUser(), connectorIdentity.getPrincipal()); return Session.builder(new SessionPropertyManager(SYSTEM_SESSION_PROPERTIES)) .setQueryId(new QueryId(session.getQueryId())) .setTransactionId(transactionId) .setCatalog("catalog") .setSchema("schema") .setPath(new SqlPath(Optional.of("path"))) .setIdentity(identity) .setTimeZoneKey(session.getTimeZoneKey()) .setLocale(session.getLocale()) .setStartTime(session.getStartTime()) .build(); } }
ConnectorSession session = newSession(); ConnectorMetadata metadata = transaction.getMetadata(); queryId = session.getQueryId();
ConnectorSession session = newSession(); ConnectorMetadata metadata = transaction.getMetadata(); queryId = session.getQueryId();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle); queryId = session.getQueryId(); writePath = getStagingPathRoot(insertTableHandle); targetPath = getTargetPathRoot(insertTableHandle);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle); queryId = session.getQueryId(); writePath = getStagingPathRoot(insertTableHandle); targetPath = getTargetPathRoot(insertTableHandle);
.setParameters(ImmutableMap.of( PRESTO_VERSION_NAME, TEST_SERVER_VERSION, PRESTO_QUERY_ID_NAME, session.getQueryId())) .setDataColumns(columns) .setPartitionColumns(partitionColumns);
.setParameters(ImmutableMap.of( PRESTO_VERSION_NAME, TEST_SERVER_VERSION, PRESTO_QUERY_ID_NAME, session.getQueryId())) .setDataColumns(columns) .setPartitionColumns(partitionColumns);