@Test public void testJsonRoundTrip() { JsonCodec<ExampleTableHandle> codec = jsonCodec(ExampleTableHandle.class); String json = codec.toJson(tableHandle); ExampleTableHandle copy = codec.fromJson(json); assertEquals(copy, tableHandle); }
.filter(HiveColumnHandle::isPartitionKey) .map(HiveColumnHandle::getName) .collect(toImmutableList()); if (!Objects.equals(partitionColumnNames, actualPartitionColumnNames)) { throw new PrestoException(INVALID_PROCEDURE_ARGUMENT, "input partition column names doesn't match actual partition column names"); .collect(toImmutableList()); if (metastore.getPartition(schema, table, partitionStringValues).isPresent()) { throw new PrestoException(ALREADY_EXISTS, "Partition already exists"); WriteInfo writeInfo = locationService.getPartitionWriteInfo(hiveInsertTableHandle.getLocationHandle(), Optional.empty(), partitionName); Slice serializedPartitionUpdate = Slices.wrappedBuffer( partitionUpdateJsonCodec.toJsonBytes( new PartitionUpdate( partitionName, writeInfo.getWritePath(), writeInfo.getTargetPath(), ImmutableList.of(), 0, 0,
@Test public void testGetFailedNodes() { List<Stats> nodes = client.execute( prepareGet().setUri(server.resolve("/v1/node/failed")).build(), createJsonResponseHandler(listJsonCodec(Stats.class))); assertTrue(nodes.isEmpty()); } }
public static List<Suite> readSuites(File file) throws IOException { requireNonNull(file, "file is null"); checkArgument(file.canRead(), "Cannot read file: %s", file); byte[] json = Files.readAllBytes(file.toPath()); Map<String, OptionsJson> options = mapJsonCodec(String.class, OptionsJson.class).fromJson(json); ImmutableList.Builder<Suite> runOptions = ImmutableList.builder(); for (Entry<String, OptionsJson> entry : options.entrySet()) { runOptions.add(entry.getValue().toSuite(entry.getKey())); } return runOptions.build(); }
private static Expression createFailureFunction(RuntimeException exception, Type type) { requireNonNull(exception, "Exception is null"); String failureInfo = JsonCodec.jsonCodec(FailureInfo.class).toJson(Failures.toFailure(exception).toFailureInfo()); FunctionCall jsonParse = new FunctionCall(QualifiedName.of("json_parse"), ImmutableList.of(new StringLiteral(failureInfo))); FunctionCall failureFunction = new FunctionCall(QualifiedName.of("fail"), ImmutableList.of(jsonParse)); return new Cast(failureFunction, type.getTypeSignature().toString()); }
static class HiveTransaction implements Transaction { private final HiveTransactionManager transactionManager; private final ConnectorTransactionHandle transactionHandle; private boolean closed; public HiveTransaction(HiveTransactionManager transactionManager, HiveMetadata hiveMetadata) { this.transactionManager = requireNonNull(transactionManager, "transactionManager is null"); this.transactionHandle = new HiveTransactionHandle(); transactionManager.put(transactionHandle, hiveMetadata); getMetastore().testOnlyThrowOnCleanupFailures(); } @Override public ConnectorMetadata getMetadata() { return transactionManager.get(transactionHandle); } @Override public SemiTransactionalHiveMetastore getMetastore(String schema) { return getMetastore(); } private SemiTransactionalHiveMetastore getMetastore() { return ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore();
private Optional<QueryFailureInfo> createQueryFailureInfo(ExecutionFailureInfo failureInfo, Optional<StageInfo> outputStage) { if (failureInfo == null) { return Optional.empty(); } Optional<TaskInfo> failedTask = outputStage.flatMap(QueryMonitor::findFailedTask); return Optional.of(new QueryFailureInfo( failureInfo.getErrorCode(), Optional.ofNullable(failureInfo.getType()), Optional.ofNullable(failureInfo.getMessage()), failedTask.map(task -> task.getTaskStatus().getTaskId().toString()), failedTask.map(task -> task.getTaskStatus().getSelf().getHost()), executionFailureInfoCodec.toJson(failureInfo))); }
@Test public void testMatch() { ResourceGroupId resourceGroupId1 = new ResourceGroupId(ImmutableList.of("global", "test", "user", "insert")); ResourceGroupId resourceGroupId2 = new ResourceGroupId(ImmutableList.of("global", "test", "user", "select")); dao.insertExactMatchSelector("test", "@test@test_pipeline", INSERT.name(), CODEC.toJson(resourceGroupId1)); dao.insertExactMatchSelector("test", "@test@test_pipeline", SELECT.name(), CODEC.toJson(resourceGroupId2)); DbSourceExactMatchSelector selector = new DbSourceExactMatchSelector("test", dao); assertEquals( selector.match(new SelectionCriteria(true, "testuser", Optional.of("@test@test_pipeline"), ImmutableSet.of("tag"), EMPTY_RESOURCE_ESTIMATES, Optional.empty())), Optional.empty()); assertEquals( selector.match(new SelectionCriteria(true, "testuser", Optional.of("@test@test_pipeline"), ImmutableSet.of("tag"), EMPTY_RESOURCE_ESTIMATES, Optional.of(INSERT.name()))).map(SelectionContext::getResourceGroupId), Optional.of(resourceGroupId1)); assertEquals( selector.match(new SelectionCriteria(true, "testuser", Optional.of("@test@test_pipeline"), ImmutableSet.of("tag"), EMPTY_RESOURCE_ESTIMATES, Optional.of(SELECT.name()))).map(SelectionContext::getResourceGroupId), Optional.of(resourceGroupId2)); assertEquals( selector.match(new SelectionCriteria(true, "testuser", Optional.of("@test@test_pipeline"), ImmutableSet.of("tag"), EMPTY_RESOURCE_ESTIMATES, Optional.of(DELETE.name()))), Optional.empty()); assertEquals( selector.match(new SelectionCriteria(true, "testuser", Optional.of("@test@test_new"), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.of(INSERT.name()))), Optional.empty()); } }
private Map<SchemaTableName, ElasticsearchTableDescription> createTableDescriptions(ElasticsearchConnectorConfig config, JsonCodec<ElasticsearchTableDescription> codec) { Optional<File[]> files = listFiles(config.getTableDescriptionDirectory()); if (!files.isPresent()) { return ImmutableMap.of(); } ImmutableMap.Builder<SchemaTableName, ElasticsearchTableDescription> builder = ImmutableMap.builder(); for (File file : files.get()) { if (!file.isFile() || !file.getName().endsWith(".json")) { continue; } ElasticsearchTableDescription table; try { table = codec.fromJson(readAllBytes(file.toPath())); } catch (IOException e) { throw new UncheckedIOException(e); } String schemaName = firstNonNull(table.getSchemaName(), config.getDefaultSchema()); builder.put(new SchemaTableName(schemaName, table.getTableName()), table); } return builder.build(); }
private static void testSelectorUpdate(H2ResourceGroupsDao dao, Map<Long, SelectorRecord> map) { dao.updateSelector(2, "ping.*", "ping_source", LIST_STRING_CODEC.toJson(ImmutableList.of("tag1")), "ping_user", ".*", null); SelectorRecord updated = new SelectorRecord( 2, 1L, Optional.of(Pattern.compile("ping.*")), Optional.of(Pattern.compile("ping_source")), Optional.empty(), Optional.of(ImmutableList.of("tag1")), Optional.empty()); map.put(2L, updated); compareSelectors(map, dao.getSelectors(ENVIRONMENT)); }
Optional<PlanFragment> fragment = sendPlan.get() ? Optional.of(planFragment) : Optional.empty(); TaskUpdateRequest updateRequest = new TaskUpdateRequest( session.toSessionRepresentation(), outputBuffers.get(), totalPartitions); byte[] taskUpdateRequestJson = taskUpdateRequestCodec.toJsonBytes(updateRequest); if (fragment.isPresent()) { stats.updateWithPlanBytes(taskUpdateRequestJson.length); Request request = preparePost() .setUri(uriBuilder.build()) .setHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8.toString()) .setBodyGenerator(createStaticBodyGenerator(taskUpdateRequestJson)) .build(); ListenableFuture<JsonResponse<TaskInfo>> future = httpClient.executeAsync(request, createFullJsonResponseHandler(taskInfoCodec)); currentRequest = future; currentRequestStartNanos = System.nanoTime();
@Test public void testJsonRoundTrip() { AccumuloSplit expected = new AccumuloSplit( "accumulo", "schema", "table", "id", LexicoderRowSerializer.class.getCanonicalName(), ImmutableList.of(new Range(), new Range("bar", "foo"), new Range("bar", false, "baz", false)).stream().map(WrappedRange::new).collect(Collectors.toList()), ImmutableList.of( new AccumuloColumnConstraint( "id", "fam1", "qual1", Optional.empty(), true), new AccumuloColumnConstraint( "bar", "fam2", "qual2", Optional.empty(), true)), Optional.of("foo,bar"), Optional.of("localhost:9000")); String json = codec.toJson(expected); AccumuloSplit actual = codec.fromJson(json); assertSplit(actual, expected); }
@Test public void testToJsonWithLengthLimitComplex() { JsonCodec<List<ImmutablePerson>> jsonCodec = listJsonCodec(jsonCodec(ImmutablePerson.class)); ImmutablePerson person = new ImmutablePerson(Strings.repeat("a", 1000), false); List<ImmutablePerson> people = Collections.nCopies(10, person); assertFalse(jsonCodec.toJsonWithLengthLimit(people, 0).isPresent()); assertFalse(jsonCodec.toJsonWithLengthLimit(people, 5000).isPresent()); assertFalse(jsonCodec.toJsonWithLengthLimit(people, 10381).isPresent()); assertTrue(jsonCodec.toJsonWithLengthLimit(people, 10382).isPresent()); } }
@Override public ListenableFuture<?> execute(CreateView statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, QueryStateMachine stateMachine, List<Expression> parameters) { Session session = stateMachine.getSession(); QualifiedObjectName name = createQualifiedObjectName(session, statement, statement.getName()); accessControl.checkCanCreateView(session.getRequiredTransactionId(), session.getIdentity(), name); String sql = getFormattedSql(statement.getQuery(), sqlParser, Optional.of(parameters)); Analysis analysis = analyzeStatement(statement, session, metadata, accessControl, parameters, stateMachine.getWarningCollector()); List<ViewColumn> columns = analysis.getOutputDescriptor(statement.getQuery()) .getVisibleFields().stream() .map(field -> new ViewColumn(field.getName().get(), field.getType())) .collect(toImmutableList()); String data = codec.toJson(new ViewDefinition(sql, session.getCatalog(), session.getSchema(), columns, Optional.of(session.getUser()))); metadata.createView(session, name, data, statement.isReplace()); return immediateFuture(null); }
private <T> Optional<T> readFile(String type, Path path, JsonCodec<T> codec) { try { if (!metadataFileSystem.isFile(path)) { return Optional.empty(); } try (FSDataInputStream inputStream = metadataFileSystem.open(path)) { byte[] json = ByteStreams.toByteArray(inputStream); return Optional.of(codec.fromJson(json)); } } catch (Exception e) { throw new PrestoException(HIVE_METASTORE_ERROR, "Could not read " + type, e); } }
@Test public void testLegacyViewWithoutOwner() { // very old view before owner was added ViewDefinition view = CODEC.fromJson("{" + BASE_JSON + "}"); assertBaseView(view); assertFalse(view.getOwner().isPresent()); }
@Test public void testIOExplain() { String query = "SELECT * FROM orders"; MaterializedResult result = computeActual("EXPLAIN (TYPE IO, FORMAT JSON) " + query); TableColumnInfo input = new TableColumnInfo( new CatalogSchemaTableName("local", "sf0.01", "orders"), ImmutableSet.of( new ColumnConstraint( "orderstatus", createVarcharType(1).getTypeSignature(), new FormattedDomain( false, ImmutableSet.of( new FormattedRange( new FormattedMarker(Optional.of("F"), EXACTLY), new FormattedMarker(Optional.of("F"), EXACTLY)), new FormattedRange( new FormattedMarker(Optional.of("O"), EXACTLY), new FormattedMarker(Optional.of("O"), EXACTLY)), new FormattedRange( new FormattedMarker(Optional.of("P"), EXACTLY), new FormattedMarker(Optional.of("P"), EXACTLY))))))); assertEquals( jsonCodec(IOPlan.class).fromJson((String) getOnlyElement(result.getOnlyColumnAsSet())), new IOPlan(ImmutableSet.of(input), Optional.empty())); } }
@Test public void testBackwardsCompatible() { ServerInfo newServerInfo = new ServerInfo(UNKNOWN, "test", true, false, Optional.empty()); ServerInfo legacyServerInfo = SERVER_INFO_CODEC.fromJson("{\"nodeVersion\":{\"version\":\"<unknown>\"},\"environment\":\"test\",\"coordinator\":true}"); assertEquals(newServerInfo, legacyServerInfo); }
private ListenableFuture<Collection<Slice>> doFinish() ImmutableList.Builder<Slice> partitionUpdates = ImmutableList.builder(); List<Callable<Object>> verificationTasks = new ArrayList<>(); for (HiveWriter writer : writers) { writer.commit(); PartitionUpdate partitionUpdate = writer.getPartitionUpdate(); partitionUpdates.add(wrappedBuffer(partitionUpdateCodec.toJsonBytes(partitionUpdate))); writer.getVerificationTask() .map(Executors::callable) .ifPresent(verificationTasks::add);
private static void testSelectorDeleteNull(H2ResourceGroupsDao dao, Map<Long, SelectorRecord> map) { dao.updateSelector(3, null, null, null, "admin_user", ".*", LIST_STRING_CODEC.toJson(ImmutableList.of("tag1", "tag2"))); SelectorRecord nullRegexes = new SelectorRecord(3L, 2L, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); map.put(3L, nullRegexes); compareSelectors(map, dao.getSelectors(ENVIRONMENT)); dao.deleteSelector(3, null, null, null); map.remove(3L); compareSelectors(map, dao.getSelectors(ENVIRONMENT)); }