protected void validateDatasetState(MetadataProvider metadataProvider, Dataset dataset, SourceLocation sourceLoc) throws Exception { validateIfResourceIsActiveInFeed(metadataProvider.getApplicationContext(), dataset, sourceLoc); } }
@SuppressWarnings("squid:S1181") protected void cancelJobSafely(MetadataProvider metadataProvider, Throwable e) { try { metadataProvider.getApplicationContext().getHcc().cancelJob(jobId); } catch (Throwable th) { LOGGER.warn("Failed to cancel active job", th); e.addSuppressed(th); } }
private static IStatementExecutor getSQLPPTranslator(MetadataProvider metadataProvider, SessionOutput sessionOutput) { List<Statement> stmts = new ArrayList<>(); DefaultStatementExecutorFactory qtFactory = new DefaultStatementExecutorFactory(); IStatementExecutor translator = qtFactory.create(metadataProvider.getApplicationContext(), stmts, sessionOutput, new SqlppCompilationProvider(), new StorageComponentProvider()); return translator; }
private static int getSortNumFrames(MetadataProvider metadataProvider, SourceLocation sourceLoc) throws AlgebricksException { return OptimizationConfUtil.getSortNumFrames(metadataProvider.getApplicationContext().getCompilerProperties(), metadataProvider.getConfig(), sourceLoc); }
public TestUserActor(String name, MetadataProvider metadataProvider, TestClusterControllerActor clusterController) { super(name, metadataProvider); this.clusterController = clusterController; this.lockManager = metadataProvider.getApplicationContext().getMetadataLockManager(); }
public static JobSpecification dropDatasetJobSpec(Dataset dataset, MetadataProvider metadataProvider) throws AlgebricksException, ACIDException { LOGGER.info("DROP DATASET: " + dataset); if (dataset.getDatasetType() == DatasetType.EXTERNAL) { return RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext()); } JobSpecification specPrimary = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext()); Pair<IFileSplitProvider, AlgebricksPartitionConstraint> splitsAndConstraint = metadataProvider.getSplitProviderAndConstraints(dataset); IIndexDataflowHelperFactory indexHelperFactory = new IndexDataflowHelperFactory( metadataProvider.getStorageComponentProvider().getStorageManager(), splitsAndConstraint.first); IndexDropOperatorDescriptor primaryBtreeDrop = new IndexDropOperatorDescriptor(specPrimary, indexHelperFactory); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(specPrimary, primaryBtreeDrop, splitsAndConstraint.second); specPrimary.addRoot(primaryBtreeDrop); return specPrimary; }
private static Pair<JobSpecification, IAdapterFactory> buildFeedIntakeJobSpec(Feed feed, MetadataProvider metadataProvider, FeedPolicyAccessor policyAccessor) throws Exception { JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext()); spec.setFrameSize(metadataProvider.getApplicationContext().getCompilerProperties().getFrameSize()); IAdapterFactory adapterFactory; IOperatorDescriptor feedIngestor; AlgebricksPartitionConstraint ingesterPc; Triple<IOperatorDescriptor, AlgebricksPartitionConstraint, IAdapterFactory> t = metadataProvider.buildFeedIntakeRuntime(spec, feed, policyAccessor); feedIngestor = t.first; ingesterPc = t.second; adapterFactory = t.third; AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, feedIngestor, ingesterPc); NullSinkOperatorDescriptor nullSink = new NullSinkOperatorDescriptor(spec); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, nullSink, ingesterPc); spec.connect(new OneToOneConnectorDescriptor(spec), feedIngestor, 0, nullSink, 0); spec.addRoot(nullSink); return Pair.of(spec, adapterFactory); }
private static void tryDropDatasetNodegroup(Dataset source, MetadataProvider metadataProvider) throws Exception { ICcApplicationContext appCtx = metadataProvider.getApplicationContext(); String sourceNodeGroup = source.getNodeGroupName(); appCtx.getMetadataLockManager().acquireNodeGroupWriteLock(metadataProvider.getLocks(), sourceNodeGroup); MetadataManager.INSTANCE.dropNodegroup(metadataProvider.getMetadataTxnContext(), sourceNodeGroup, true); }
public static JobSpecification dropDataverseJobSpec(Dataverse dataverse, MetadataProvider metadata) { JobSpecification jobSpec = RuntimeUtils.createJobSpecification(metadata.getApplicationContext()); Pair<IFileSplitProvider, AlgebricksPartitionConstraint> splitsAndConstraint = metadata.splitAndConstraints(dataverse.getDataverseName()); FileRemoveOperatorDescriptor frod = new FileRemoveOperatorDescriptor(jobSpec, splitsAndConstraint.first, false); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(jobSpec, frod, splitsAndConstraint.second); jobSpec.addRoot(frod); return jobSpec; } }
private void printPlanAsResult(MetadataProvider metadataProvider, SessionOutput output) throws AlgebricksException { final SessionConfig conf = output.config(); boolean quoteResult = output.config().getPlanFormat() == SessionConfig.PlanFormat.STRING; conf.set(SessionConfig.FORMAT_QUOTE_RECORD, quoteResult); try { ResultUtil.printResults(metadataProvider.getApplicationContext(), executionPlans.getOptimizedLogicalPlan(), output, new Stats(), null); } catch (HyracksDataException e) { throw new AlgebricksException(e); } }
public static JobSpecification buildDropFilesIndexJobSpec(MetadataProvider metadataProvider, Dataset dataset) throws AlgebricksException { String indexName = IndexingConstants.getFilesIndexName(dataset.getDatasetName()); JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext()); Pair<IFileSplitProvider, AlgebricksPartitionConstraint> splitsAndConstraint = metadataProvider.getSplitProviderAndConstraints(dataset, indexName); IIndexDataflowHelperFactory dataflowHelperFactory = new IndexDataflowHelperFactory( metadataProvider.getStorageComponentProvider().getStorageManager(), splitsAndConstraint.first); IndexDropOperatorDescriptor btreeDrop = new IndexDropOperatorDescriptor(spec, dataflowHelperFactory); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, btreeDrop, splitsAndConstraint.second); spec.addRoot(btreeDrop); return spec; }
public static JobSpecification buildDropFilesIndexJobSpec(MetadataProvider metadataProvider, Dataset dataset) throws AlgebricksException { String indexName = IndexingConstants.getFilesIndexName(dataset.getDatasetName()); JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext()); Pair<IFileSplitProvider, AlgebricksPartitionConstraint> splitsAndConstraint = metadataProvider.getSplitProviderAndConstraints(dataset, indexName); IIndexDataflowHelperFactory indexHelperFactory = new IndexDataflowHelperFactory( metadataProvider.getStorageComponentProvider().getStorageManager(), splitsAndConstraint.first); IndexDropOperatorDescriptor btreeDrop = new IndexDropOperatorDescriptor(spec, indexHelperFactory); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, btreeDrop, splitsAndConstraint.second); spec.addRoot(btreeDrop); return spec; }
@Override public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildDatasourceScanRuntime( MetadataProvider metadataProvider, IDataSource<DataSourceId> dataSource, List<LogicalVariable> scanVariables, List<LogicalVariable> projectVariables, boolean projectPushed, List<LogicalVariable> minFilterVars, List<LogicalVariable> maxFilterVars, ITupleFilterFactory tupleFilterFactory, long outputLimit, IOperatorSchema opSchema, IVariableTypeEnvironment typeEnv, JobGenContext context, JobSpecification jobSpec, Object implConfig) throws AlgebricksException { if (tupleFilterFactory != null || outputLimit >= 0) { throw CompilationException.create(ErrorCode.COMPILATION_ILLEGAL_STATE, "tuple filter and limit are not supported by FunctionDataSource"); } GenericAdapterFactory adapterFactory = new GenericAdapterFactory(); adapterFactory.setOutputType(RecordUtil.FULLY_OPEN_RECORD_TYPE); IClusterStateManager csm = metadataProvider.getApplicationContext().getClusterStateManager(); FunctionDataSourceFactory factory = new FunctionDataSourceFactory(createFunction(metadataProvider, getLocations(csm))); adapterFactory.configure(factory); return metadataProvider.buildExternalDatasetDataScannerRuntime(jobSpec, itemType, adapterFactory); }
@Override public JobSpecification buildDropJobSpec(Set<DropOption> options) throws AlgebricksException { JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext()); Pair<IFileSplitProvider, AlgebricksPartitionConstraint> splitsAndConstraint = metadataProvider.getSplitProviderAndConstraints(dataset, index.getIndexName()); IIndexDataflowHelperFactory dataflowHelperFactory = new IndexDataflowHelperFactory( metadataProvider.getStorageComponentProvider().getStorageManager(), splitsAndConstraint.first); // The index drop operation should be persistent regardless of temp datasets or permanent dataset. IndexDropOperatorDescriptor btreeDrop = new IndexDropOperatorDescriptor(spec, dataflowHelperFactory, options); btreeDrop.setSourceLocation(sourceLoc); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, btreeDrop, splitsAndConstraint.second); spec.addRoot(btreeDrop); return spec; }
@Override protected void setRunning(MetadataProvider metadataProvider, boolean running) { try { IMetadataLockManager lockManager = metadataProvider.getApplicationContext().getMetadataLockManager(); LockList locks = metadataProvider.getLocks(); lockManager.acquireDataverseReadLock(locks, entityId.getDataverse()); lockManager.acquireActiveEntityWriteLock(locks, entityId.getDataverse() + '.' + entityId.getEntityName()); // persist entity } catch (Throwable th) { // This failure puts the system in a bad state. throw new IllegalStateException(th); } }
@Override public JobSpecification buildCreationJobSpec() throws AlgebricksException { JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext()); IStorageComponentProvider storageComponentProvider = metadataProvider.getStorageComponentProvider(); IResourceFactory resourceFactory = dataset.getResourceFactory(metadataProvider, index, itemType, metaType, mergePolicyFactory, mergePolicyProperties); IIndexBuilderFactory indexBuilderFactory = new IndexBuilderFactory(storageComponentProvider.getStorageManager(), secondaryFileSplitProvider, resourceFactory, true); IndexCreateOperatorDescriptor secondaryIndexCreateOp = new IndexCreateOperatorDescriptor(spec, indexBuilderFactory); secondaryIndexCreateOp.setSourceLocation(sourceLoc); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, secondaryIndexCreateOp, secondaryPartitionConstraint); spec.addRoot(secondaryIndexCreateOp); spec.setConnectorPolicyAssignmentPolicy(new ConnectorPolicyAssignmentPolicy()); return spec; }
@Override public JobSpecification buildCompactJobSpec() throws AlgebricksException { JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext()); Pair<IFileSplitProvider, AlgebricksPartitionConstraint> splitsAndConstraint = metadataProvider.getSplitProviderAndConstraints(dataset, index.getIndexName()); IIndexDataflowHelperFactory dataflowHelperFactory = new IndexDataflowHelperFactory( metadataProvider.getStorageComponentProvider().getStorageManager(), splitsAndConstraint.first); LSMTreeIndexCompactOperatorDescriptor compactOp = new LSMTreeIndexCompactOperatorDescriptor(spec, dataflowHelperFactory); compactOp.setSourceLocation(sourceLoc); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, compactOp, secondaryPartitionConstraint); spec.addRoot(compactOp); spec.setConnectorPolicyAssignmentPolicy(new ConnectorPolicyAssignmentPolicy()); return spec; } }
public static JobSpecification buildFilesIndexUpdateJobSpec(Dataset dataset, List<ExternalFile> externalFilesSnapshot, MetadataProvider metadataProvider) throws AlgebricksException { IStorageComponentProvider storageComponentProvider = metadataProvider.getStorageComponentProvider(); JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext()); Pair<IFileSplitProvider, AlgebricksPartitionConstraint> secondarySplitsAndConstraint = metadataProvider .getSplitProviderAndConstraints(dataset, IndexingConstants.getFilesIndexName(dataset.getDatasetName())); IFileSplitProvider secondaryFileSplitProvider = secondarySplitsAndConstraint.first; IIndexDataflowHelperFactory dataflowHelperFactory = new IndexDataflowHelperFactory( storageComponentProvider.getStorageManager(), secondaryFileSplitProvider); ExternalFilesIndexModificationOperatorDescriptor externalFilesOp = new ExternalFilesIndexModificationOperatorDescriptor(spec, dataflowHelperFactory, externalFilesSnapshot); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, externalFilesOp, secondarySplitsAndConstraint.second); spec.addRoot(externalFilesOp); spec.setConnectorPolicyAssignmentPolicy(new ConnectorPolicyAssignmentPolicy()); return spec; }
public static JobSpecification compactDatasetJobSpec(Dataverse dataverse, String datasetName, MetadataProvider metadataProvider) throws AlgebricksException { String dataverseName = dataverse.getDataverseName(); Dataset dataset = metadataProvider.findDataset(dataverseName, datasetName); if (dataset == null) { throw new AsterixException("Could not find dataset " + datasetName + " in dataverse " + dataverseName); } JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext()); Pair<IFileSplitProvider, AlgebricksPartitionConstraint> splitsAndConstraint = metadataProvider.getSplitProviderAndConstraints(dataset); IIndexDataflowHelperFactory indexHelperFactory = new IndexDataflowHelperFactory( metadataProvider.getStorageComponentProvider().getStorageManager(), splitsAndConstraint.first); LSMTreeIndexCompactOperatorDescriptor compactOp = new LSMTreeIndexCompactOperatorDescriptor(spec, indexHelperFactory); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, compactOp, splitsAndConstraint.second); AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, compactOp, splitsAndConstraint.second); spec.addRoot(compactOp); return spec; }
protected void sendStopMessages(MetadataProvider metadataProvider, long timeout, TimeUnit unit) throws Exception { ICcApplicationContext applicationCtx = metadataProvider.getApplicationContext(); ICCMessageBroker messageBroker = (ICCMessageBroker) applicationCtx.getServiceContext().getMessageBroker(); AlgebricksAbsolutePartitionConstraint runtimeLocations = getLocations(); int partition = 0; if (LOGGER.isInfoEnabled()) { LOGGER.log(Level.INFO, "Sending stop messages to " + runtimeLocations); } for (String location : runtimeLocations.getLocations()) { if (LOGGER.isInfoEnabled()) { LOGGER.log(Level.INFO, "Sending to " + location); } ActiveRuntimeId runtimeId = getActiveRuntimeId(partition++); messageBroker.sendApplicationMessageToNC(new ActiveManagerMessage(ActiveManagerMessage.Kind.STOP_ACTIVITY, new StopRuntimeParameters(runtimeId, timeout, unit)), location); } }