@Override public Object getApplicationContext() { return ncs.getApplicationContext(); } }
public NCAppRuntimeContext getAppRuntimeContext() { return (NCAppRuntimeContext) ExecutionTestUtil.integrationUtil.ncs[0].getApplicationContext(); }
public TransactionSubsystem getTransactionSubsystem() { return (TransactionSubsystem) ((NCAppRuntimeContext) ExecutionTestUtil.integrationUtil.ncs[0] .getApplicationContext()).getTransactionSubsystem(); }
public static NcLocalCounters collect(CcId ccId, NodeControllerService ncs) throws HyracksDataException { final INcApplicationContext appContext = (INcApplicationContext) ncs.getApplicationContext(); long maxResourceId = Math.max(appContext.getLocalResourceRepository().maxId(), MetadataIndexImmutableProperties.FIRST_AVAILABLE_USER_DATASET_ID); long maxTxnId = appContext.getMaxTxnId(); long maxJobId = ncs.getMaxJobId(ccId); return new NcLocalCounters(maxResourceId, maxTxnId, maxJobId); }
public NCMessageBroker(NodeControllerService ncs, MessagingProperties messagingProperties) { this.ncs = ncs; appContext = (INcApplicationContext) ncs.getApplicationContext(); maxMsgSize = messagingProperties.getFrameSize(); int messagingMemoryBudget = messagingProperties.getFrameSize() * messagingProperties.getFrameCount(); messagingFramePool = new ConcurrentFramePool(ncs.getId(), messagingMemoryBudget, messagingProperties.getFrameSize()); receivedMsgsQ = new LinkedBlockingQueue<>(); futureIdGenerator = new AtomicLong(); futureMap = new LongObjectHashMap<>(); MessageDeliveryService msgDeliverySvc = new MessageDeliveryService(); appContext.getThreadExecutor().execute(msgDeliverySvc); }
@BeforeClass public static void setUp() throws Exception { LangExecutionUtil.setUp(TEST_CONFIG_FILE_NAME, testExecutor); final NodeControllerService[] ncs = ExecutionTestUtil.integrationUtil.ncs; final Map<String, InetSocketAddress> ncEndPoints = new HashMap<>(); final String ip = InetAddress.getLoopbackAddress().getHostAddress(); for (NodeControllerService nc : ncs) { final String nodeId = nc.getId(); final INcApplicationContext appCtx = (INcApplicationContext) nc.getApplicationContext(); int apiPort = appCtx.getExternalProperties().getNcApiPort(); ncEndPoints.put(nodeId, InetSocketAddress.createUnresolved(ip, apiPort)); } testExecutor.setNcEndPoints(ncEndPoints); }
public void init(boolean deleteOldInstanceData, String externalLibPath, String confDir) throws Exception { List<ILibraryManager> libraryManagers = new ArrayList<>(); ExternalUDFLibrarian librarian = new ExternalUDFLibrarian(libraryManagers); librarian.cleanup(); init(deleteOldInstanceData, confDir); if (externalLibPath != null && externalLibPath.length() != 0) { libraryManagers.add(((ICcApplicationContext) cc.getApplicationContext()).getLibraryManager()); for (NodeControllerService nc : ncs) { INcApplicationContext runtimeCtx = (INcApplicationContext) nc.getApplicationContext(); libraryManagers.add(runtimeCtx.getLibraryManager()); } librarian.install(System.getProperty("external.lib.dataverse", "test"), System.getProperty("external.lib.libname", "testlib"), externalLibPath); } }
public void setTestPersistedResourceRegistry() { for (NodeControllerService nc : ncs) { INcApplicationContext runtimeCtx = (INcApplicationContext) nc.getApplicationContext(); runtimeCtx.getServiceContext() .setPersistedResourceRegistry(new AsterixHyracksIntegrationUtil.TestPersistedResourceRegistry()); } }
@Test public void interruptedLogFileSwitch() throws Exception { final INcApplicationContext ncAppCtx = (INcApplicationContext) integrationUtil.ncs[0].getApplicationContext(); final LogManager logManager = (LogManager) ncAppCtx.getTransactionSubsystem().getLogManager(); int logFileCountBeforeInterrupt = logManager.getOrderedLogFileIds().size(); // ensure an interrupted transactor will create next log file but will fail to position the log channel final AtomicBoolean failed = new AtomicBoolean(false); Thread interruptedTransactor = new Thread(() -> { Thread.currentThread().interrupt(); try { prepareNextLogFile(logManager); } catch (Exception e) { failed.set(true); } }); interruptedTransactor.start(); interruptedTransactor.join(); // ensure a new log file was created and survived interrupt int logFileCountAfterInterrupt = logManager.getOrderedLogFileIds().size(); Assert.assertEquals(logFileCountBeforeInterrupt + 1, logFileCountAfterInterrupt); Assert.assertFalse(failed.get()); // make sure we can still log to the new file interruptedLogPageSwitch(); }
@Before public void before() throws Exception { LangExecutionUtil.setUp(TEST_CONFIG_FILE_NAME, testExecutor); if (!configured) { final NodeControllerService[] ncs = ExecutionTestUtil.integrationUtil.ncs; Map<String, InetSocketAddress> ncEndPoints = new HashMap<>(); Map<String, InetSocketAddress> replicationAddress = new HashMap<>(); final String ip = InetAddress.getLoopbackAddress().getHostAddress(); for (NodeControllerService nc : ncs) { final String nodeId = nc.getId(); final INcApplicationContext appCtx = (INcApplicationContext) nc.getApplicationContext(); int apiPort = appCtx.getExternalProperties().getNcApiPort(); int replicationPort = (int) appCtx.getServiceContext().getAppConfig().get(NCConfig.Option.REPLICATION_LISTEN_PORT); ncEndPoints.put(nodeId, InetSocketAddress.createUnresolved(ip, apiPort)); replicationAddress.put(nodeId, InetSocketAddress.createUnresolved(ip, replicationPort)); } testExecutor.setNcEndPoints(ncEndPoints); testExecutor.setNcReplicationAddress(replicationAddress); configured = true; } }
@Before public void before() throws Exception { TestUtils.redirectLoggingToConsole(); LangExecutionUtil.setUp(TEST_CONFIG_FILE_NAME, testExecutor); if (!configured) { final NodeControllerService[] ncs = ExecutionTestUtil.integrationUtil.ncs; Map<String, InetSocketAddress> ncEndPoints = new HashMap<>(); Map<String, InetSocketAddress> replicationAddress = new HashMap<>(); final String ip = InetAddress.getLoopbackAddress().getHostAddress(); for (NodeControllerService nc : ncs) { final String nodeId = nc.getId(); final INcApplicationContext appCtx = (INcApplicationContext) nc.getApplicationContext(); int apiPort = appCtx.getExternalProperties().getNcApiPort(); int replicationPort = (int) appCtx.getServiceContext().getAppConfig().get(NCConfig.Option.REPLICATION_LISTEN_PORT); ncEndPoints.put(nodeId, InetSocketAddress.createUnresolved(ip, apiPort)); replicationAddress.put(nodeId, InetSocketAddress.createUnresolved(ip, replicationPort)); } testExecutor.setNcEndPoints(ncEndPoints); testExecutor.setNcReplicationAddress(replicationAddress); configured = true; } }
@Test public void deleteMaskedFiles() throws Exception { final INcApplicationContext ncAppCtx = (INcApplicationContext) integrationUtil.ncs[0].getApplicationContext(); final String nodeId = ncAppCtx.getServiceContext().getNodeId(); final String datasetName = "ds";
INcApplicationContext runtimeCtx = (INcApplicationContext) nc.getApplicationContext(); libraryManagers.add(runtimeCtx.getLibraryManager());
@Test public void deleteCorruptedResourcesTest() throws Exception { final INcApplicationContext ncAppCtx = (INcApplicationContext) integrationUtil.ncs[0].getApplicationContext(); final String nodeId = ncAppCtx.getServiceContext().getNodeId(); final String datasetName = "ds"; TestDataUtil.createIdOnlyDataset(datasetName); final Dataset dataset = TestDataUtil.getDataset(integrationUtil, datasetName); final String indexPath = TestDataUtil.getIndexPath(integrationUtil, dataset, nodeId); final FileReference indexDirRef = ncAppCtx.getIoManager().resolve(indexPath); final File indexMetadataFile = new File(indexDirRef.getFile(), StorageConstants.METADATA_FILE_NAME); Assert.assertTrue(indexMetadataFile.exists()); // forge a mask file and ensure the metadata file and its mask files will be deleted after restart final File indexMetadataMaskFile = new File(indexDirRef.getFile(), StorageConstants.MASK_FILE_PREFIX + StorageConstants.METADATA_FILE_NAME); Files.createFile(indexMetadataMaskFile.toPath()); Assert.assertTrue(indexMetadataMaskFile.exists()); integrationUtil.deinit(false); integrationUtil.init(false, TEST_CONFIG_FILE_NAME); Assert.assertFalse(indexMetadataFile.exists()); Assert.assertFalse(indexMetadataMaskFile.exists()); } }
((INcApplicationContext) integrationUtil.ncs[0].getApplicationContext()).getDatasetLifecycleManager(); int maxMetadatasetId = 14; for (int i = 1; i <= maxMetadatasetId; i++) {
@Test public void waitLogTest() throws Exception { final INcApplicationContext ncAppCtx = (INcApplicationContext) integrationUtil.ncs[0].getApplicationContext(); LogRecord logRecord = new LogRecord(); final long txnId = 1; logRecord.setTxnCtx(TransactionContextFactory.create(new TxnId(txnId), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL))); logRecord.setLogSource(LogSource.LOCAL); logRecord.setLogType(LogType.WAIT); logRecord.setTxnId(txnId); logRecord.isFlushed(false); logRecord.computeAndSetLogSize(); Thread transactor = new Thread(() -> { final LogManager logManager = (LogManager) ncAppCtx.getTransactionSubsystem().getLogManager(); logManager.log(logRecord); }); transactor.start(); transactor.join(TimeUnit.SECONDS.toMillis(30)); Assert.assertTrue(logRecord.isFlushed()); }
@Test public void deleteInvalidComponents() throws Exception { final INcApplicationContext ncAppCtx = (INcApplicationContext) integrationUtil.ncs[0].getApplicationContext(); final String nodeId = ncAppCtx.getServiceContext().getNodeId(); final String datasetName = "ds";
@Test public void interruptedLogPageSwitch() throws Exception { final INcApplicationContext ncAppCtx = (INcApplicationContext) integrationUtil.ncs[0].getApplicationContext(); final String nodeId = ncAppCtx.getServiceContext().getNodeId();
@Test public void fullMergeTest() throws Exception { String datasetName = "ds"; TestDataUtil.createIdOnlyDataset(datasetName); INcApplicationContext appCtx = (INcApplicationContext) (integrationUtil.ncs[0].getApplicationContext()); IDatasetLifecycleManager dlcm = appCtx.getDatasetLifecycleManager(); IMetadataIndex dsIdx = MetadataPrimaryIndexes.DATASET_DATASET; DatasetInfo datasetInfo = dlcm.getDatasetInfo(dsIdx.getDatasetId().getId()); // flush to ensure multiple disk components dlcm.flushAllDatasets(); datasetInfo.waitForIO(); AbstractLSMIndex index = (AbstractLSMIndex) dlcm.getIndex(dsIdx.getDatasetId().getId(), dsIdx.getResourceId()); Assert.assertTrue(index.getDiskComponents().size() > 1); // trigger full merge and ensure we have a single disk component when merge completes testExecutor.executeSqlppUpdateOrDdl("COMPACT DATASET Metadata.`Dataset`;", TestCaseContext.OutputFormat.CLEAN_JSON); datasetInfo.waitForIO(); Assert.assertTrue(index.getDiskComponents().size() == 1); } }
metadataProvider.getLocks().unlock(); INcApplicationContext appCtx = (INcApplicationContext) integrationUtil.ncs[0].getApplicationContext(); IDatasetLifecycleManager dlcm = appCtx.getDatasetLifecycleManager(); dlcm.flushAllDatasets();