@Override public void deinitialize() throws HyracksDataException { treeIndexHelper.close(); } }
@Override public void deinitialize() throws HyracksDataException { indexHelper.close(); }
@Override public void close() throws HyracksDataException { try { super.close(); } finally { if (primaryIndex != null) { primaryIndexHelper.close(); } } }
@Override public void close() throws HyracksDataException { if (lsmIndex != null) { try { indexHelper.close(); } finally { writer.close(); } } }
@Override public void close() throws HyracksDataException { if (index != null) { try { writer.close(); } finally { indexHelper.close(); } } }
/** * Close the IIndexDataflowHelper and suppress any Throwable thrown by the close call. * This method must NEVER throw any Throwable * * @param indexHelper * the indexHelper to close * @param root * the first exception encountered during release of resources * @return the root Throwable if not null or a new Throwable if any was thrown, otherwise, it returns null */ public static Throwable close(IIndexDataflowHelper indexHelper, Throwable root) { if (indexHelper != null) { try { indexHelper.close(); } catch (Throwable th) { // NOSONAR Will be re-thrown try { LOGGER.log(Level.WARN, "Failure closing a closeable resource", th); } catch (Throwable loggingFailure) { // Do nothing } root = ExceptionUtils.suppress(root, th); } } return root; }
@Override public void close() throws HyracksDataException { if (index != null) { try { bulkLoader.end(); } catch (Throwable th) { throw HyracksDataException.create(th); } finally { try { indexHelper.close(); } finally { writer.close(); } } } }
@Override public void close() throws HyracksDataException { try { // bulkloader can be null if an exception is thrown before it is initialized. if (bulkLoader != null) { bulkLoader.end(); } } catch (Throwable th) { throw HyracksDataException.create(th); } finally { if (index != null) { // If index was opened! try { indexHelper.close(); } finally { writer.close(); } } } }
private void dropInUseWithWait(IHyracksTaskContext ctx, IndexDataflowHelperFactory helperFactory, IIndexDataflowHelper dataflowHelper) throws Exception { dropFailed.set(false); // drop with option wait for in-use should be successful once the index is closed final IndexDropOperatorNodePushable dropWithWaitOp = new IndexDropOperatorNodePushable(helperFactory, EnumSet.of(DropOption.IF_EXISTS, DropOption.WAIT_ON_IN_USE), ctx, 0); Thread dropThread = new Thread(() -> { try { dropWithWaitOp.initialize(); } catch (HyracksDataException e) { dropFailed.set(true); e.printStackTrace(); } }); dropThread.start(); // wait for the drop thread to start while (dropThread.getState() == Thread.State.NEW) { TimeUnit.MILLISECONDS.sleep(100); } // close the index to allow the drop to complete dataflowHelper.close(); dropThread.join(); Assert.assertFalse(dropFailed.get()); }
@Override public void initialize() throws HyracksDataException { IIndexBuilder indexBuilder = indexBuilderFactory.create(ctx, partition); IIndexDataflowHelper indexHelper = dataflowHelperFactory.create(ctx.getJobletContext().getServiceContext(), partition); FileIndexTupleTranslator filesTupleTranslator = new FileIndexTupleTranslator(); // Build the index indexBuilder.build(); // Open the index indexHelper.open(); try { ILSMIndex index = (ILSMIndex) indexHelper.getIndexInstance(); Map<String, Object> parameters = new HashMap<>(); parameters.put(LSMIOOperationCallback.KEY_FLUSHED_COMPONENT_ID, LSMComponentId.DEFAULT_COMPONENT_ID); // Create bulk loader IIndexBulkLoader bulkLoader = index.createBulkLoader(BTree.DEFAULT_FILL_FACTOR, false, files.size(), false, parameters); // Load files for (ExternalFile file : files) { bulkLoader.add(filesTupleTranslator.getTupleFromFile(file)); } bulkLoader.end(); } finally { indexHelper.close(); } }
indexHelper.close();
private void createSecondaryIndex() throws HyracksDataException, RemoteException, ACIDException, AlgebricksException { SecondaryIndexInfo secondaryIndexInfo = nc.createSecondaryIndex(primaryIndexInfo, secondaryIndex, storageManager, 0); IndexDataflowHelperFactory iHelperFactory = new IndexDataflowHelperFactory(nc.getStorageManager(), secondaryIndexInfo.getFileSplitProvider()); secondaryIndexDataflowHelper = iHelperFactory.create(taskCtx.getJobletContext().getServiceContext(), 0); secondaryIndexDataflowHelper.open(); secondaryLsmBtree = (TestLsmBtree) secondaryIndexDataflowHelper.getIndexInstance(); secondaryIndexDataflowHelper.close(); }
@Before public void createIndex() throws Exception { List<List<String>> partitioningKeys = new ArrayList<>(); partitioningKeys.add(Collections.singletonList("key")); dataset = new TestDataset(DATAVERSE_NAME, DATASET_NAME, DATAVERSE_NAME, DATA_TYPE_NAME, NODE_GROUP_NAME, NoMergePolicyFactory.NAME, null, new InternalDatasetDetails(null, PartitioningStrategy.HASH, partitioningKeys, null, null, null, false, null), null, DatasetType.INTERNAL, DATASET_ID, 0); PrimaryIndexInfo primaryIndexInfo = nc.createPrimaryIndex(dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, null, storageManager, KEY_INDEXES, KEY_INDICATORS_LIST, 0); IndexDataflowHelperFactory iHelperFactory = new IndexDataflowHelperFactory(nc.getStorageManager(), primaryIndexInfo.getFileSplitProvider()); JobId jobId = nc.newJobId(); ctx = nc.createTestContext(jobId, 0, false); indexDataflowHelper = iHelperFactory.create(ctx.getJobletContext().getServiceContext(), 0); indexDataflowHelper.open(); lsmBtree = (TestLsmBtree) indexDataflowHelper.getIndexInstance(); indexDataflowHelper.close(); txnCtx = nc.getTransactionManager().beginTransaction(nc.getTxnJobId(ctx), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL)); insertOp = nc.getInsertPipeline(ctx, dataset, KEY_TYPES, RECORD_TYPE, META_TYPE, null, KEY_INDEXES, KEY_INDICATORS_LIST, storageManager, null).getLeft(); }
private void readIndex() throws HyracksDataException { primaryIndexDataflowHelpers = new IIndexDataflowHelper[NUM_PARTITIONS]; primaryIndexes = new TestLsmBtree[NUM_PARTITIONS]; for (int i = 0; i < NUM_PARTITIONS; i++) { IIndexDataflowHelperFactory factory = new IndexDataflowHelperFactory(nc.getStorageManager(), primaryIndexInfos[i].getFileSplitProvider()); primaryIndexDataflowHelpers[i] = factory.create(testCtxs[i].getJobletContext().getServiceContext(), i); primaryIndexDataflowHelpers[i].open(); primaryIndexes[i] = (TestLsmBtree) primaryIndexDataflowHelpers[i].getIndexInstance(); primaryIndexDataflowHelpers[i].close(); } secondaryIndexDataflowHelpers = new IIndexDataflowHelper[NUM_PARTITIONS]; secondaryIndexes = new TestLsmBtree[NUM_PARTITIONS]; for (int i = 0; i < NUM_PARTITIONS; i++) { IIndexDataflowHelperFactory factory = new IndexDataflowHelperFactory(nc.getStorageManager(), secondaryIndexInfo[i].getFileSplitProvider()); secondaryIndexDataflowHelpers[i] = factory.create(testCtxs[i].getJobletContext().getServiceContext(), i); secondaryIndexDataflowHelpers[i].open(); secondaryIndexes[i] = (TestLsmBtree) secondaryIndexDataflowHelpers[i].getIndexInstance(); secondaryIndexDataflowHelpers[i].close(); } }
@Test public void testFlushMetadataOnlyComponent() throws Exception { // allow all operations StorageTestUtils.allowAllOps(lsmBtree); // ensure no disk component and memory component is empty Assert.assertEquals(0, lsmBtree.getDiskComponents().size()); Assert.assertFalse(lsmBtree.isMemoryComponentsAllocated()); MutableArrayValueReference key = new MutableArrayValueReference("FlushMetadataOnlyTestKey".getBytes()); MutableArrayValueReference value = new MutableArrayValueReference("FlushMetadataOnlyTestValue".getBytes()); indexDataflowHelper.open(); ILSMIndexAccessor accessor = lsmBtree.createAccessor(NoOpIndexAccessParameters.INSTANCE); accessor.updateMeta(key, value); Assert.assertTrue(lsmBtree.isMemoryComponentsAllocated()); Assert.assertTrue(lsmBtree.getCurrentMemoryComponent().isModified()); indexDataflowHelper.close(); // flush synchronously StorageTestUtils.flush(dsLifecycleMgr, lsmBtree, false); // assert one disk component Assert.assertEquals(1, lsmBtree.getDiskComponents().size()); ArrayBackedValueStorage pointable = new ArrayBackedValueStorage(); ComponentUtils.get(lsmBtree, key, pointable); Assert.assertTrue(DataUtils.equals(pointable, value)); // ensure that we can search this component StorageTestUtils.searchAndAssertCount(nc, PARTITION, 0); }
@Before public void createIndex() throws Exception { PrimaryIndexInfo primaryIndexInfo = StorageTestUtils.createPrimaryIndex(nc, PARTITION); IndexDataflowHelperFactory iHelperFactory = new IndexDataflowHelperFactory(nc.getStorageManager(), primaryIndexInfo.getFileSplitProvider()); JobId jobId = nc.newJobId(); ctx = nc.createTestContext(jobId, PARTITION, false); indexDataflowHelper = iHelperFactory.create(ctx.getJobletContext().getServiceContext(), PARTITION); indexDataflowHelper.open(); lsmBtree = (TestLsmBtree) indexDataflowHelper.getIndexInstance(); indexDataflowHelper.close(); }
primaryIndexDataflowHelper.open(); primaryLsmBtree = (TestLsmBtree) primaryIndexDataflowHelper.getIndexInstance(); primaryIndexDataflowHelper.close();
writer.close(); } finally { treeIndexHelper.close();
@Before public void createIndex() throws Exception { PrimaryIndexInfo primaryIndexInfo = StorageTestUtils.createPrimaryIndex(nc, PARTITION); IndexDataflowHelperFactory iHelperFactory = new IndexDataflowHelperFactory(nc.getStorageManager(), primaryIndexInfo.getFileSplitProvider()); JobId jobId = nc.newJobId(); ctx = nc.createTestContext(jobId, PARTITION, false); indexDataflowHelper = iHelperFactory.create(ctx.getJobletContext().getServiceContext(), PARTITION); indexDataflowHelper.open(); lsmBtree = (TestLsmBtree) indexDataflowHelper.getIndexInstance(); indexDataflowHelper.close(); txnCtx = nc.getTransactionManager().beginTransaction(nc.getTxnJobId(ctx), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL)); insertOp = StorageTestUtils.getInsertPipeline(nc, ctx); indexPath = indexDataflowHelper.getResource().getPath(); }
@Before public void createIndex() throws Exception { PrimaryIndexInfo primaryIndexInfo = StorageTestUtils.createPrimaryIndex(nc, PARTITION); IndexDataflowHelperFactory iHelperFactory = new IndexDataflowHelperFactory(nc.getStorageManager(), primaryIndexInfo.getFileSplitProvider()); JobId jobId = nc.newJobId(); ctx = nc.createTestContext(jobId, PARTITION, false); indexDataflowHelper = iHelperFactory.create(ctx.getJobletContext().getServiceContext(), PARTITION); indexDataflowHelper.open(); lsmBtree = (TestLsmBtree) indexDataflowHelper.getIndexInstance(); indexDataflowHelper.close(); txnCtx = nc.getTransactionManager().beginTransaction(nc.getTxnJobId(ctx), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL)); insertOp = StorageTestUtils.getInsertPipeline(nc, ctx, null); JobId abortJobId = nc.newJobId(); abortCtx = nc.createTestContext(abortJobId, PARTITION, false); abortTxnCtx = nc.getTransactionManager().beginTransaction(nc.getTxnJobId(abortCtx), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL)); // abortOp is initialized by each test separately tupleGenerator = StorageTestUtils.getTupleGenerator(); }