/** * Drop the table from helix cluster. * * @param tableName: Name of table to be dropped. * @return */ public PinotResourceManagerResponse dropTable(String tableName) { if (!_helixAdmin.getResourcesInCluster(_helixClusterName).contains(tableName)) { return PinotResourceManagerResponse.failure("Table " + tableName + " not found"); } if (getSegmentsFor(tableName).size() != 0) { return PinotResourceManagerResponse.failure("Table " + tableName + " has segments, drop them first"); } _helixAdmin.dropResource(_helixClusterName, tableName); // remove from property store ZKMetadataProvider.removeResourceSegmentsFromPropertyStore(getPropertyStore(), tableName); ZKMetadataProvider.removeResourceConfigFromPropertyStore(getPropertyStore(), tableName); return PinotResourceManagerResponse.success("Table " + tableName + " dropped"); }
public void deleteOfflineTable(String tableName) { String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(tableName); // Remove the table from brokerResource HelixHelper.removeResourceFromBrokerIdealState(_helixZkManager, offlineTableName); // Drop the table if (_helixAdmin.getResourcesInCluster(_helixClusterName).contains(offlineTableName)) { _helixAdmin.dropResource(_helixClusterName, offlineTableName); } // Remove all segments for the table _segmentDeletionManager.removeSegmentsFromStore(offlineTableName, getSegmentsFor(offlineTableName)); ZKMetadataProvider.removeResourceSegmentsFromPropertyStore(_propertyStore, offlineTableName); // Remove table config ZKMetadataProvider.removeResourceConfigFromPropertyStore(_propertyStore, offlineTableName); // Remove replica group partition assignment ZKMetadataProvider.removeInstancePartitionAssignmentFromPropertyStore(_propertyStore, offlineTableName); }
"expected <=" + maxNumSegmentsPerInstance + " actual:" + instance2NumSegmentsMap.get(instance)); _helixAdmin.dropResource(HELIX_CLUSTER_NAME, TableNameBuilder.OFFLINE.tableNameWithType(TABLE_NAME_BALANCED));
public synchronized void deleteTopicInMirrorMaker(String topicName) { _helixAdmin.dropResource(_helixClusterName, topicName); }
public void deleteRealtimeTable(String tableName) { String realtimeTableName = TableNameBuilder.REALTIME.tableNameWithType(tableName); // Remove the table from brokerResource HelixHelper.removeResourceFromBrokerIdealState(_helixZkManager, realtimeTableName); // Cache the state and drop the table Set<String> instancesForTable = null; if (_helixAdmin.getResourcesInCluster(_helixClusterName).contains(realtimeTableName)) { instancesForTable = getAllInstancesForTable(realtimeTableName); _helixAdmin.dropResource(_helixClusterName, realtimeTableName); } // Remove all segments for the table _segmentDeletionManager.removeSegmentsFromStore(realtimeTableName, getSegmentsFor(realtimeTableName)); ZKMetadataProvider.removeResourceSegmentsFromPropertyStore(_propertyStore, realtimeTableName); // Remove table config ZKMetadataProvider.removeResourceConfigFromPropertyStore(_propertyStore, realtimeTableName); // Remove groupId/PartitionId mapping for HLC table if (instancesForTable != null) { for (String instance : instancesForTable) { InstanceZKMetadata instanceZKMetadata = ZKMetadataProvider.getInstanceZKMetadata(_propertyStore, instance); if (instanceZKMetadata != null) { instanceZKMetadata.removeResource(realtimeTableName); ZKMetadataProvider.setInstanceZKMetadata(_propertyStore, instanceZKMetadata); } } } }
public synchronized void deleteTopicInMirrorMaker(String topicName) { _helixAdmin.dropResource(_helixClusterName, topicName); }
public void dropResourceFromCluster(String clusterName, String resourceName) { _admin.dropResource(clusterName, resourceName); }
public void dropResourceFromCluster(String clusterName, String resourceName) { _admin.dropResource(clusterName, resourceName); }
public void activateCluster(String clusterName, String grandCluster, boolean enable) { if (enable) { _admin.addClusterToGrandCluster(clusterName, grandCluster); } else { _admin.dropResource(grandCluster, clusterName); } }
public void activateCluster(String clusterName, String grandCluster, boolean enable) { if (enable) { _admin.addClusterToGrandCluster(clusterName, grandCluster); } else { _admin.dropResource(grandCluster, clusterName); } }
public synchronized void deletePipelineInMirrorMaker(String pipeline) { _lock.lock(); try { _helixAdmin.dropResource(_helixClusterName, pipeline); List<String> deletedInstances = new ArrayList<>(); List<TopicPartition> tpToDelete = new ArrayList<>(); for (TopicPartition tp : _routeToInstanceMap.keySet()) { if (tp.getTopic().equals(pipeline)) { deletedInstances.addAll(_routeToInstanceMap.get(tp)); tpToDelete.add(tp); } } for (TopicPartition tp : tpToDelete) { _routeToInstanceMap.remove(tp); } _availableWorkerList.addAll(deletedInstances); } finally { _lock.unlock(); } }
public synchronized void deletePipelineInMirrorMaker(String pipeline) { // TODO: delete topic first _lock.lock(); try { LOGGER.info("Trying to delete pipeline: {}", pipeline); _workerHelixManager.deletePipelineInMirrorMaker(pipeline); _helixAdmin.dropResource(_helixClusterName, pipeline); _pipelineToInstanceMap.remove(pipeline); // Maybe clear instanceHolder's worker set List<String> topicsToDelete = new ArrayList<>(); for (String topic : _topicToPipelineInstanceMap.keySet()) { if (_topicToPipelineInstanceMap.get(topic).containsKey(pipeline)) { _topicToPipelineInstanceMap.get(topic).remove(pipeline); } if (_topicToPipelineInstanceMap.get(topic).isEmpty()) { topicsToDelete.add(topic); } } for (String topic : topicsToDelete) { _topicToPipelineInstanceMap.remove(topic); } } finally { _lock.unlock(); } }
@DELETE @Path("{resourceName}") public Response deleteResource(@PathParam("clusterId") String clusterId, @PathParam("resourceName") String resourceName) { HelixAdmin admin = getHelixAdmin(); try { admin.dropResource(clusterId, resourceName); } catch (Exception e) { _logger.error("Error in deleting a resource: " + resourceName, e); return serverError(); } return OK(); }
private void removeJob(String queueName, String jobName) { // Remove the job from the queue in the DAG removeJobFromDag(queueName, jobName); // delete the ideal state and resource config for the job final String namespacedJobName = TaskUtil.getNamespacedJobName(queueName, jobName); _admin.dropResource(_clusterName, namespacedJobName); // update queue's property to remove job from JOB_STATES if it is already started. removeJobStateFromQueue(queueName, jobName); // Delete the job from property store removeJobContext(_propertyStore, jobName); }
public synchronized void deleteTopicInMirrorMaker(String topicName, String src, String dst, String pipeline) throws Exception { _lock.lock(); try { LOGGER.info("Trying to delete topic: {} in pipeline: {}", topicName, pipeline); InstanceTopicPartitionHolder instance = _topicToPipelineInstanceMap.get(topicName).get(pipeline); IdealState currIdealState = getIdealStateForTopic(topicName); if (currIdealState.getPartitionSet().contains(instance.getRouteString()) && currIdealState.getNumPartitions() == 1) { _helixAdmin.dropResource(_helixClusterName, topicName); } else { _helixAdmin.setResourceIdealState(_helixClusterName, topicName, IdealStateBuilder.shrinkCustomIdealStateFor(currIdealState, topicName, instance.getRouteString())); } TopicPartition tp = _srcKafkaValidationManager.getClusterToObserverMap().get(src) .getTopicPartitionWithRefresh(topicName); instance.removeTopicPartition(tp); _topicToPipelineInstanceMap.get(topicName).remove(pipeline); if (instance.getServingTopicPartitionSet().isEmpty()) { _availableControllerList.add(instance.getInstanceName()); _assignedControllerCount.dec(); } } finally { _lock.unlock(); } }
_admin.dropResource(_clusterName, namespacedJobName);
@Test public void testDropResource() { String className = TestHelper.getTestClassName(); String methodName = TestHelper.getTestMethodName(); String clusterName = className + "_" + methodName; System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); HelixAdmin tool = new ZKHelixAdmin(_gZkClient); tool.addCluster(clusterName, true); Assert.assertTrue(ZKUtil.isClusterSetup(clusterName, _gZkClient), "Cluster should be setup"); tool.addStateModelDef(clusterName, "MasterSlave", new StateModelDefinition( StateModelConfigGenerator.generateConfigForMasterSlave())); tool.addResource(clusterName, "test-db", 4, "MasterSlave"); Map<String, String> resourceConfig = new HashMap<String, String>(); resourceConfig.put("key1", "value1"); tool.setConfig(new HelixConfigScopeBuilder(ConfigScopeProperty.RESOURCE) .forCluster(clusterName).forResource("test-db").build(), resourceConfig); PropertyKey.Builder keyBuilder = new PropertyKey.Builder(clusterName); Assert.assertTrue(_gZkClient.exists(keyBuilder.idealStates("test-db").getPath()), "test-db ideal-state should exist"); Assert.assertTrue(_gZkClient.exists(keyBuilder.resourceConfig("test-db").getPath()), "test-db resource config should exist"); tool.dropResource(clusterName, "test-db"); Assert.assertFalse(_gZkClient.exists(keyBuilder.idealStates("test-db").getPath()), "test-db ideal-state should be dropped"); Assert.assertFalse(_gZkClient.exists(keyBuilder.resourceConfig("test-db").getPath()), "test-db resource config should be dropped"); tool.dropCluster(clusterName); System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); }
@Test(dataProvider = "rebalanceModes") public void testDisablePersist(RebalanceMode rebalanceMode) throws Exception { String testDb = "TestDB2-" + rebalanceMode.name(); _gSetupTool.addResourceToCluster(CLUSTER_NAME, testDb, 5, BuiltInStateModelDefinitions.LeaderStandby.name(), rebalanceMode.name()); _gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, testDb, 3); BestPossibleExternalViewVerifier.Builder verifierBuilder = new BestPossibleExternalViewVerifier.Builder(CLUSTER_NAME).setZkAddr(ZK_ADDR) .setResources(new HashSet<String>(Collections.singleton(testDb))); Assert.assertTrue(verifierBuilder.build().verifyByPolling()); // kill 1 node _participants[0].syncStop(); Set<String> liveInstances = new HashSet<String>(_instanceNames); liveInstances.remove(_participants[0].getInstanceName()); verifierBuilder.setExpectLiveInstances(liveInstances); Assert.assertTrue(verifierBuilder.build().verifyByPolling()); IdealState idealState = _gSetupTool.getClusterManagementTool().getResourceIdealState(CLUSTER_NAME, testDb); Set<String> excludedInstances = new HashSet<String>(); excludedInstances.add(_participants[0].getInstanceName()); verifyAssignmentInIdealStateWithPersistDisabled(idealState, excludedInstances); // clean up _gSetupTool.getClusterManagementTool().dropResource(CLUSTER_NAME, testDb); _participants[0] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, _participants[0].getInstanceName()); _participants[0].syncStart(); }
_gSetupTool.getClusterManagementTool().dropResource(CLUSTER_NAME, testDb); _participants[0] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, _participants[0].getInstanceName());
@Test (dependsOnMethods = "testParticipantUnavailable") public void testTagSetIncorrect() throws Exception { _gSetupTool.addResourceToCluster(CLUSTER_NAME, testDb, 5, BuiltInStateModelDefinitions.MasterSlave.name(), RebalanceMode.FULL_AUTO.name()); ZkHelixClusterVerifier verifier = new BestPossibleExternalViewVerifier.Builder(CLUSTER_NAME).setZkAddr(ZK_ADDR) .setResources(new HashSet<>(Collections.singleton(testDb))).build(); Assert.assertTrue(verifier.verifyByPolling()); // Verify there is no rebalance error logged Assert.assertNull(accessor.getProperty(errorNodeKey)); checkRebalanceFailureGauge(false); checkResourceBestPossibleCalFailureState(ResourceMonitor.RebalanceStatus.NORMAL, testDb); // set expected instance tag IdealState is = _gSetupTool.getClusterManagementTool().getResourceIdealState(CLUSTER_NAME, testDb); is.setInstanceGroupTag("RandomTag"); _gSetupTool.getClusterManagementTool().setResourceIdealState(CLUSTER_NAME, testDb, is); _gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, testDb, 3); // Verify there is rebalance error logged pollForError(accessor, errorNodeKey); checkRebalanceFailureGauge(true); checkResourceBestPossibleCalFailureState( ResourceMonitor.RebalanceStatus.BEST_POSSIBLE_STATE_CAL_FAILED, testDb); // clean up _gSetupTool.getClusterManagementTool().dropResource(CLUSTER_NAME, testDb); }