Schema.Builder idFromLibrary = new Schema.Builder(); for (final String parentSchemaId : parentSchemaIds) { Schema tempSchema = graphLibrary.getSchema(parentSchemaId); if (null != tempSchema) { idFromLibrary.merge(tempSchema); if (!fromLibrary.toString().equals(idFromLibrary.build().toString())) { result.addError(String.format(GRAPH_S_ALREADY_EXISTS_SO_YOU_CANNOT_USE_A_DIFFERENT_S_DO_NOT_SET_THE_S_FIELD, graphId, SCHEMA_STRING, PARENT_SCHEMA_IDS));
mergedParentSchema = parentSchema; } else { mergedParentSchema = new Schema.Builder() .merge(mergedParentSchema) .merge(parentSchema) .build(); schema = mergedParentSchema; } else { schema = new Schema.Builder() .merge(mergedParentSchema) .merge(schema) .build(); final Schema newSchema = new Schema.Builder() .json(schemaClass, schemaBytesList.toArray(new byte[schemaBytesList.size()][])) .build(); addSchema(newSchema);
final Schema exportSchema = new Schema.Builder() .id("exportSchemaId") .edge("edge", new SchemaEdgeDefinition.Builder() .source("int") .destination("int") .aggregate(false) .build()) .type("int", Integer.class) .type("true", new TypeDefinition.Builder() .clazz(Boolean.class) .validateFunctions(new IsTrue()) .build()) .build(); graphLibrary.addSchema("exportSchemaId", exportSchema);
final Schema exportSchema = new Schema.Builder() .id("exportSchemaId") .edge("edge", new SchemaEdgeDefinition.Builder() .source("int") .destination("int") .aggregate(false) .build()) .type("int", Integer.class) .type("true", new TypeDefinition.Builder() .clazz(Boolean.class) .validateFunctions(new IsTrue()) .build()) .build();
final Builder schemaBuilder = new Builder(); try { if (operation.isCompact()) { graphs.forEach(g -> { try { schemaBuilder.merge(g.execute(getSchema, context)); } catch (final OperationException e) { throw new RuntimeException("Unable to fetch schema from graph " + g.getGraphId(), e); graphs.forEach(g -> schemaBuilder.merge(g.getSchema())); throw new SchemaException("Unable to merge the schemas for all of your federated graphs: " + resultGraphIds + ". You can limit which graphs to query for using the operation option: " + KEY_OPERATION_OPTIONS_GRAPH_IDS, e); return schemaBuilder.build();
public Schema getSchema(final Map<String, String> config, final User user) { if (null == user) { // no user then return an empty schema return new Schema(); } final List<String> graphIds = FederatedStoreUtil.getGraphIds(config); final Stream<Graph> graphs = getStream(user, graphIds); final Builder schemaBuilder = new Builder(); try { graphs.forEach(g -> schemaBuilder.merge(g.getSchema())); } catch (final SchemaException e) { final List<String> resultGraphIds = getStream(user, graphIds).map(Graph::getGraphId).collect(Collectors.toList()); throw new SchemaException(String.format(UNABLE_TO_MERGE_THE_SCHEMAS_FOR_ALL_OF_YOUR_FEDERATED_GRAPHS, resultGraphIds, KEY_OPERATION_OPTIONS_GRAPH_IDS), e); } return schemaBuilder.build(); }
public Builder addSchema(final Schema schemaModule) { if (null != schemaModule) { if (null != schema) { schema = new Schema.Builder() .merge(schema) .merge(schemaModule) .build(); } else { schema = schemaModule; } } return this; }
public static Schema createSchema(final Long timeToLive) { final Schema.Builder builder = new Schema.Builder() .json(StreamUtil.openStreams(GafferResultCacheUtil.class, "gafferResultCache/schema")); if (null != timeToLive) { builder.merge(new Schema.Builder() .type("timestamp", new TypeDefinition.Builder() .validateFunctions(new AgeOff(timeToLive)) .build()) .build()); } return builder.build(); } }