public Schema getSchema(final Map<String, String> config, final User user) { if (null == user) { // no user then return an empty schema return new Schema(); } final List<String> graphIds = FederatedStoreUtil.getGraphIds(config); final Stream<Graph> graphs = getStream(user, graphIds); final Builder schemaBuilder = new Builder(); try { graphs.forEach(g -> schemaBuilder.merge(g.getSchema())); } catch (final SchemaException e) { final List<String> resultGraphIds = getStream(user, graphIds).map(Graph::getGraphId).collect(Collectors.toList()); throw new SchemaException(String.format(UNABLE_TO_MERGE_THE_SCHEMAS_FOR_ALL_OF_YOUR_FEDERATED_GRAPHS, resultGraphIds, KEY_OPERATION_OPTIONS_GRAPH_IDS), e); } return schemaBuilder.build(); }
if (!fromLibrary.toString().equals(idFromLibrary.build().toString())) { result.addError(String.format(GRAPH_S_ALREADY_EXISTS_SO_YOU_CANNOT_USE_A_DIFFERENT_S_DO_NOT_SET_THE_S_FIELD, graphId, SCHEMA_STRING, PARENT_SCHEMA_IDS));
throw new SchemaException("Unable to merge the schemas for all of your federated graphs: " + resultGraphIds + ". You can limit which graphs to query for using the operation option: " + KEY_OPERATION_OPTIONS_GRAPH_IDS, e); return schemaBuilder.build();
.validateFunctions(new IsTrue()) .build()) .build(); graphLibrary.addSchema("exportSchemaId", exportSchema);
.validateFunctions(new IsTrue()) .build()) .build();
public static Schema createSchema(final Long timeToLive) { final Schema.Builder builder = new Schema.Builder() .json(StreamUtil.openStreams(GafferResultCacheUtil.class, "gafferResultCache/schema")); if (null != timeToLive) { builder.merge(new Schema.Builder() .type("timestamp", new TypeDefinition.Builder() .validateFunctions(new AgeOff(timeToLive)) .build()) .build()); } return builder.build(); } }
public Builder addSchema(final Schema schemaModule) { if (null != schemaModule) { if (null != schema) { schema = new Schema.Builder() .merge(schema) .merge(schemaModule) .build(); } else { schema = schemaModule; } } return this; }