private static void assertStorageExists(Backend backend, boolean exists) throws Exception { final String suffix = exists ? "should exist before clearing" : "should not exist after clearing"; assertTrue("graph " + suffix, backend.getStoreManager().exists() == exists); for (final IndexInformation index : backend.getIndexInformation().values()) { assertTrue("index " + suffix, ((IndexProvider) index).exists() == exists); } }
public Backend getBackend() { Backend backend = new Backend(configuration); backend.initialize(configuration); storeFeatures = backend.getStoreFeatures(); return backend; }
public static void clearGraph(WriteConfiguration config) throws BackendException { getBackend(config, true).clearStorage(); }
public VertexIDAssigner getIDAssigner(Backend backend) { return new VertexIDAssigner(configuration, backend.getIDAuthority(), backend.getStoreFeatures()); }
graph.getBackend().getStoreManagerClass(); if (CASSANDRA_STORE_MANAGER_CLASSES.contains(storeManagerClass)) { inputFormat = CassandraBinaryInputFormat.class; ((AbstractCassandraStoreManager)graph.getBackend().getStoreManager()).getCassandraPartitioner(); hadoopConf.set("cassandra.input.partitioner.class", part.getClass().getName()); } else if (HBASE_STORE_MANAGER_CLASSES.contains(storeManagerClass)) {
public Backend(Configuration configuration) { this.configuration = configuration; KeyColumnValueStoreManager manager = getStorageManager(configuration); if (configuration.get(BASIC_METRICS)) { storeManager = new MetricInstrumentedStoreManager(manager,METRICS_STOREMANAGER_NAME,configuration.get(METRICS_MERGE_STORES),METRICS_MERGED_STORE); storeManager = manager; indexes = getIndexes(configuration); storeFeatures = storeManager.getFeatures(); managementLogManager = getKCVSLogManager(MANAGEMENT_LOG); txLogManager = getKCVSLogManager(TRANSACTION_LOG); userLogManager = getLogManager(USER_LOG);
StoreFeatures storeFeatures = backend.getStoreFeatures(); this.indexSerializer = new IndexSerializer(configuration.getConfiguration(), this.serializer, this.backend.getIndexInformation(), storeFeatures.isDistributed() && storeFeatures.isKeyOrdered()); this.edgeSerializer = new EdgeSerializer(this.serializer); this.vertexExistenceQuery = edgeSerializer.getQuery(BaseKey.VertexExists, Direction.OUT, new EdgeSerializer.TypedInterval[0]).setLimit(1); Log managementLog = backend.getSystemMgmtLog(); managementLogger = new ManagementLogger(this, managementLog, schemaCache, this.times); managementLog.registerReader(ReadMarker.fromNow(), managementLogger);
@Override public Features features() { return JanusGraphFeatures.getFeatures(this, backend.getStoreFeatures()); }
public static Backend getBackend(WriteConfiguration config, boolean initialize) throws BackendException { final ModifiableConfiguration adjustedConfig = new ModifiableConfiguration(GraphDatabaseConfiguration.ROOT_NS,config.copy(), BasicConfiguration.Restriction.NONE); adjustedConfig.set(GraphDatabaseConfiguration.LOCK_LOCAL_MEDIATOR_GROUP, "tmp"); adjustedConfig.set(GraphDatabaseConfiguration.UNIQUE_INSTANCE_ID, "inst"); final Backend backend = new Backend(adjustedConfig); if (initialize) { backend.initialize(adjustedConfig); } return backend; }
private static Map<String, IndexProvider> getIndexes(Configuration config) { ImmutableMap.Builder<String, IndexProvider> builder = ImmutableMap.builder(); for (String index : config.getContainedNamespaces(INDEX_NS)) { Preconditions.checkArgument(StringUtils.isNotBlank(index), "Invalid index name [%s]", index); log.info("Configuring index [{}]", index); IndexProvider provider = getImplementationClass(config.restrictTo(index), config.get(INDEX_BACKEND,index), StandardIndexProvider.getAllProviderClasses()); Preconditions.checkNotNull(provider); builder.put(index, provider); } return builder.build(); }
final boolean hasTxIsolation = backend.getStoreFeatures().hasTxIsolation(); final boolean logTransaction = config.hasLogTransactions() && !tx.getConfiguration().hasEnabledBatchLoading(); final KCVSLog txLog = logTransaction?backend.getSystemTxLog():null; final TransactionLogHeader txLogHeader = new TransactionLogHeader(transactionId,txTimestamp, times); ModificationSummary commitSummary; try { userlogSuccess = false; final Log userLog = backend.getUserLog(logTxIdentifier); Future<Message> env = userLog.add(txLogHeader.serializeModifications(serializer, LogTxStatus.USER_LOG, tx, addedRelations, deletedRelations)); if (env.isDone()) {
/** * Ensure clearing storage actually removes underlying database. * @throws Exception */ @Test public void testClearStorage() throws Exception { tearDown(); config.set(ConfigElement.getPath(GraphDatabaseConfiguration.DROP_ON_CLEAR), true); final Backend backend = getBackend(config, false); assertTrue("graph should exist before clearing storage", backend.getStoreManager().exists()); clearGraph(config); assertFalse("graph should not exist after clearing storage", backend.getStoreManager().exists()); }
break; case REINDEX: builder = graph.getBackend().buildEdgeScanJob(); builder.setFinishJob(indexId.getIndexJobFinisher(graph, SchemaAction.ENABLE_INDEX)); builder.setJobId(indexId); case REMOVE_INDEX: if (index instanceof RelationTypeIndex) { builder = graph.getBackend().buildEdgeScanJob(); } else { JanusGraphIndex graphIndex = (JanusGraphIndex) index; if (graphIndex.isMixedIndex()) throw new UnsupportedOperationException("External mixed indexes must be removed in the indexing system directly."); builder = graph.getBackend().buildGraphIndexScanJob();
ModifiableConfiguration overwrite = new ModifiableConfiguration(ROOT_NS,new CommonsConfiguration(), BasicConfiguration.Restriction.NONE); final KeyColumnValueStoreManager storeManager = Backend.getStorageManager(localBasicConfiguration); KCVSConfiguration keyColumnValueStoreConfiguration=Backend.getStandaloneGlobalConfiguration(storeManager,localBasicConfiguration); final ReadConfiguration globalConfig;
try { b = graph.getConfiguration().getBackend(); if (b.getStoreFeatures().hasTxIsolation()) { log.info("Skipping " + getClass().getSimpleName() + "." + methodName.getMethodName()); return; b.close();
private ScanMetrics executeScanJob(ScanJob job) throws Exception { return graph.getBackend().buildEdgeScanJob() .setNumProcessingThreads(2) .setWorkBlockSize(100) .setJob(job) .execute().get(); }
private Log openLog(String logManagerName, String logName) { try { final ModifiableConfiguration configuration = new ModifiableConfiguration(GraphDatabaseConfiguration.ROOT_NS,config.copy(), BasicConfiguration.Restriction.NONE); configuration.set(GraphDatabaseConfiguration.UNIQUE_INSTANCE_ID, "reader"); configuration.set(GraphDatabaseConfiguration.LOG_READ_INTERVAL, Duration.ofMillis(500L), logManagerName); if (logStoreManager==null) { logStoreManager = Backend.getStorageManager(configuration); } final StoreFeatures f = logStoreManager.getFeatures(); final boolean part = f.isDistributed() && f.isKeyOrdered(); if (part) { for (final String partitionedLogName : new String[]{USER_LOG,TRANSACTION_LOG,MANAGEMENT_LOG}) configuration.set(KCVSLogManager.LOG_MAX_PARTITIONS,8,partitionedLogName); } assert logStoreManager!=null; if (!logManagers.containsKey(logManagerName)) { //Open log manager - only supports KCVSLog final Configuration logConfig = configuration.restrictTo(logManagerName); Preconditions.checkArgument(logConfig.get(LOG_BACKEND).equals(LOG_BACKEND.getDefaultValue())); logManagers.put(logManagerName,new KCVSLogManager(logStoreManager,logConfig)); } assert logManagers.containsKey(logManagerName); return logManagers.get(logManagerName).openLog(logName); } catch (final BackendException e) { throw new JanusGraphException("Could not open log: "+ logName,e); } }
graph.getBackend().getStoreManagerClass(); if (CASSANDRA_STORE_MANAGER_CLASSES.contains(storeManagerClass)) { inputFormat = CassandraBinaryInputFormat.class; ((AbstractCassandraStoreManager)graph.getBackend().getStoreManager()).getCassandraPartitioner(); hadoopConf.set("cassandra.input.partitioner.class", part.getClass().getName()); } else if (HBASE_STORE_MANAGER_CLASSES.contains(storeManagerClass)) {
/** * Sets time-to-live for those schema types that support it * * @param type * @param duration Note that only 'seconds' granularity is supported */ @Override public void setTTL(final JanusGraphSchemaType type, final Duration duration) { if (!graph.getBackend().getStoreFeatures().hasCellTTL()) throw new UnsupportedOperationException("The storage engine does not support TTL"); if (type instanceof VertexLabelVertex) { Preconditions.checkArgument(((VertexLabelVertex) type).isStatic(), "must define vertex label as static to allow setting TTL"); } else { Preconditions.checkArgument(type instanceof EdgeLabelVertex || type instanceof PropertyKeyVertex, "TTL is not supported for type " + type.getClass().getSimpleName()); } Preconditions.checkArgument(type instanceof JanusGraphSchemaVertex); Integer ttlSeconds = (duration.isZero()) ? null : (int) duration.getSeconds(); setTypeModifier(type, ModifierType.TTL, ttlSeconds); }
public static KeyColumnValueStoreManager getStorageManager(Configuration storageConfig) { StoreManager manager = getImplementationClass(storageConfig, storageConfig.get(STORAGE_BACKEND), StandardStoreManager.getAllManagerClasses()); if (manager instanceof OrderedKeyValueStoreManager) { manager = new OrderedKeyValueStoreManagerAdapter((OrderedKeyValueStoreManager) manager, ImmutableMap.of(EDGESTORE_NAME, 8, EDGESTORE_NAME + LOCK_STORE_SUFFIX, 8, storageConfig.get(IDS_STORE_NAME), 8)); } Preconditions.checkArgument(manager instanceof KeyColumnValueStoreManager,"Invalid storage manager: %s",manager.getClass()); return (KeyColumnValueStoreManager) manager; }