/** * Override this if is needed a different implementation of {@link NodeManager} to be used into {@link #createConfigs()}. */ protected NodeManager createNodeManager() throws Exception { return new InVMNodeManager(false); }
@Override public SimpleString readNodeId() throws ActiveMQIllegalStateException, IOException { return getNodeId(); } }
public InVMNodeManager(boolean replicatedBackup, File directory) { super(replicatedBackup, directory); liveLock = new Semaphore(1); backupLock = new Semaphore(1); setUUID(UUIDGenerator.getInstance().generateUUID()); }
@Override protected NodeManager createNodeManager() throws Exception { return new InVMNodeManager(false); }
/** * Override this if is needed a different implementation of {@link NodeManager} to be used into {@link #createReplicatedConfigs()}. */ protected NodeManager createReplicatedBackupNodeManager(Configuration backupConfig) { return new InVMNodeManager(true, backupConfig.getJournalLocation()); }
public void performWork(NodeManagerAction... actions) throws Exception { NodeManager nodeManager = new InVMNodeManager(false); List<NodeRunner> nodeRunners = new ArrayList<>(); Thread[] threads = new Thread[actions.length];
@Override protected NodeManager createNodeManager() throws Exception { switch (nodeManagerType) { case InVM: return new InVMNodeManager(false); case Jdbc: final ThreadFactory daemonThreadFactory = t -> { final Thread th = new Thread(t); th.setDaemon(true); return th; }; final ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(daemonThreadFactory); scheduledExecutorServices.add(scheduledExecutorService); final ExecutorService executor = Executors.newFixedThreadPool(2, daemonThreadFactory); executors.add(executor); final DatabaseStorageConfiguration dbConf = createDefaultDatabaseStorageConfiguration(); final ExecutorFactory executorFactory = new OrderedExecutorFactory(executor); return JdbcNodeManager.with(dbConf, scheduledExecutorService, executorFactory, (code, message, file) -> { code.printStackTrace(); Assert.fail(message); }); default: throw new AssertionError("enum type not supported!"); } }
protected NodeManager createNodeManager(final File directory, boolean replicatingBackup) { NodeManager manager; if (!configuration.isPersistenceEnabled()) { manager = new InVMNodeManager(replicatingBackup); } else if (configuration.getStoreConfiguration() != null && configuration.getStoreConfiguration().getStoreType() == StoreConfiguration.StoreType.DATABASE) { final HAPolicyConfiguration.TYPE haType = configuration.getHAPolicyConfiguration() == null ? null : configuration.getHAPolicyConfiguration().getType(); if (haType == HAPolicyConfiguration.TYPE.SHARED_STORE_MASTER || haType == HAPolicyConfiguration.TYPE.SHARED_STORE_SLAVE) { if (replicatingBackup) { throw new IllegalArgumentException("replicatingBackup is not supported yet while using JDBC persistence"); } final DatabaseStorageConfiguration dbConf = (DatabaseStorageConfiguration) configuration.getStoreConfiguration(); manager = JdbcNodeManager.with(dbConf, scheduledPool, executorFactory, shutdownOnCriticalIO); } else if (haType == null || haType == HAPolicyConfiguration.TYPE.LIVE_ONLY) { if (logger.isDebugEnabled()) { logger.debug("Detected no Shared Store HA options on JDBC store"); } //LIVE_ONLY should be the default HA option when HA isn't configured manager = new FileLockNodeManager(directory, replicatingBackup, configuration.getJournalLockAcquisitionTimeout()); } else { throw new IllegalArgumentException("JDBC persistence allows only Shared Store HA options"); } } else { manager = new FileLockNodeManager(directory, replicatingBackup, configuration.getJournalLockAcquisitionTimeout()); } return manager; }
@Override @Before public void setUp() throws Exception { super.setUp(); forceGC(); ActiveMQTestBase.checkFreePort(ClusterTestBase.PORTS); consumers = new ConsumerHolder[ClusterTestBase.MAX_CONSUMERS]; servers = new ActiveMQServer[ClusterTestBase.MAX_SERVERS]; timeStarts = new long[ClusterTestBase.MAX_SERVERS]; sfs = new ClientSessionFactory[ClusterTestBase.MAX_SERVERS]; nodeManagers = new NodeManager[ClusterTestBase.MAX_SERVERS]; for (int i = 0, nodeManagersLength = nodeManagers.length; i < nodeManagersLength; i++) { nodeManagers[i] = new InVMNodeManager(isSharedStore(), new File(getJournalDir(i, true))); } locators = new ServerLocator[ClusterTestBase.MAX_SERVERS]; }
/** * Test the broadcasted packages length.<br> * Broadcast and MultiCast techniques are commonly limited in size by * underlying hardware. Broadcast and MultiCast protocols are typically not * guaranteed (UDP) and as such large packages may be silently discarded by * underlying hardware.<br> * This test validates that Artemis Server does not broadcast packages above * a size of 1500 bytes. The limit is not derived from any normative * documents, but is rather derived from common MTU for network equipment. */ @Test public void testBroadcastDatagramLength() throws Throwable { BroadcastEndpointFactoryImpl befi; befi = new BroadcastEndpointFactoryImpl(); InVMNodeManager node; node = new InVMNodeManager(false); String name; name = "BroadcastGroupImplTest"; BroadcastGroupImpl test; test = new BroadcastGroupImpl(node, name, 1000, null, befi); TransportConfiguration tcon; tcon = new TransportConfiguration(getClass().getName()); test.addConnector(tcon); // Broadcast test.broadcastConnectors(); // Make sure we sent one package assertEquals("Incorrect number of sent datagrams", 1, befi.sent); } }
@Test public void testMultipleFailovers2LiveServers() throws Exception { NodeManager nodeManager1 = new InVMNodeManager(!sharedStore); NodeManager nodeManager2 = new InVMNodeManager(!sharedStore); createLiveConfig(nodeManager1, 0, 3, 4, 5); createBackupConfig(nodeManager1, 0, 1, true, new int[]{0, 2}, 3, 4, 5);
@Test public void testMultipleFailovers() throws Exception { nodeManager = new InVMNodeManager(!sharedStore); createLiveConfig(0); createBackupConfig(0, 1, 0, 2, 3, 4, 5);
nodeManager = new InVMNodeManager(false);
NodeManager nodeManager = new InVMNodeManager(false); server0 = createActiveMQServer(0, server0Params, isNetty(), nodeManager); server2 = createBackupActiveMQServer(2, server2Params, isNetty(), 0, nodeManager);
@Test public void testFailoverAndReconnectAfterAFewTries() throws Exception { NodeManager nodeManager = new InVMNodeManager(false);
@Test public void testFailoverAndReconnectImmediately() throws Exception { NodeManager nodeManager = new InVMNodeManager(false); server0 = createActiveMQServer(0, server0Params, isNetty(), nodeManager); server2 = createBackupActiveMQServer(2, server2Params, isNetty(), 0, nodeManager);
@Override protected void createConfigs() throws Exception { nodeManager = new InVMNodeManager(false); TransportConfiguration liveConnector = getConnectorTransportConfiguration(true); TransportConfiguration backupConnector = getConnectorTransportConfiguration(false); System.out.println("backup config created - mnovak"); backupConfig = super.createDefaultConfig(false).clearAcceptorConfigurations().addAcceptorConfiguration(getAcceptorTransportConfiguration(false)).setHAPolicyConfiguration(new SharedStoreSlavePolicyConfiguration().setScaleDownConfiguration(new ScaleDownConfiguration().setEnabled(false)).setRestartBackup(false)).addConnectorConfiguration(liveConnector.getName(), liveConnector).addConnectorConfiguration(backupConnector.getName(), backupConnector).addClusterConfiguration(basicClusterConnectionConfig(backupConnector.getName(), liveConnector.getName())); backupServer = createTestableServer(backupConfig); liveConfig = super.createDefaultConfig(false).clearAcceptorConfigurations().addAcceptorConfiguration(getAcceptorTransportConfiguration(true)).setHAPolicyConfiguration(new SharedStoreMasterPolicyConfiguration().setFailoverOnServerShutdown(true)).addClusterConfiguration(basicClusterConnectionConfig(liveConnector.getName())).addConnectorConfiguration(liveConnector.getName(), liveConnector); liveServer = createTestableServer(liveConfig); }
@Override protected void createConfigs() throws Exception { nodeManager0 = new InVMNodeManager(false); nodeManager1 = new InVMNodeManager(false); TransportConfiguration liveConnector0 = getConnectorTransportConfiguration(true, 0); TransportConfiguration liveConnector1 = getConnectorTransportConfiguration(true, 1); backupConfig = super.createDefaultInVMConfig(1).clearAcceptorConfigurations().addAcceptorConfiguration(getAcceptorTransportConfiguration(true, 1)).setHAPolicyConfiguration(new ColocatedPolicyConfiguration().setRequestBackup(true).setLiveConfig(new SharedStoreMasterPolicyConfiguration()).setBackupConfig(new SharedStoreSlavePolicyConfiguration().setScaleDownConfiguration(new ScaleDownConfiguration().addConnector(liveConnector1.getName())))).addConnectorConfiguration(liveConnector0.getName(), liveConnector0).addConnectorConfiguration(liveConnector1.getName(), liveConnector1).addClusterConfiguration(basicClusterConnectionConfig(liveConnector1.getName(), liveConnector0.getName())); backupServer = createColocatedTestableServer(backupConfig, nodeManager1, nodeManager0, 1); liveConfig = super.createDefaultInVMConfig(0).clearAcceptorConfigurations().addAcceptorConfiguration(getAcceptorTransportConfiguration(true, 0)).setHAPolicyConfiguration(new ColocatedPolicyConfiguration().setRequestBackup(true).setBackupRequestRetryInterval(1000).setLiveConfig(new SharedStoreMasterPolicyConfiguration()).setBackupConfig(new SharedStoreSlavePolicyConfiguration().setScaleDownConfiguration(new ScaleDownConfiguration()))).addConnectorConfiguration(liveConnector0.getName(), liveConnector0).addConnectorConfiguration(liveConnector1.getName(), liveConnector1).addClusterConfiguration(basicClusterConnectionConfig(liveConnector0.getName(), liveConnector1.getName())); liveServer = createColocatedTestableServer(liveConfig, nodeManager0, nodeManager1, 0); }
@Override protected void createConfigs() throws Exception { nodeManager = new InVMNodeManager(false); TransportConfiguration liveConnector = getConnectorTransportConfiguration(true); TransportConfiguration backupConnector = getConnectorTransportConfiguration(false); backupConfig = super.createDefaultInVMConfig().clearAcceptorConfigurations().addAcceptorConfiguration(getAcceptorTransportConfiguration(false)).setHAPolicyConfiguration(new SharedStoreSlavePolicyConfiguration()).addConnectorConfiguration(liveConnector.getName(), liveConnector).addConnectorConfiguration(backupConnector.getName(), backupConnector).addClusterConfiguration(basicClusterConnectionConfig(backupConnector.getName(), liveConnector.getName())); backupServer = createTestableServer(backupConfig); liveConfig = super.createDefaultInVMConfig().clearAcceptorConfigurations().addAcceptorConfiguration(getAcceptorTransportConfiguration(true)).setHAPolicyConfiguration(new SharedStoreMasterPolicyConfiguration()).addClusterConfiguration(basicClusterConnectionConfig(liveConnector.getName(), backupConnector.getName())).addConnectorConfiguration(liveConnector.getName(), liveConnector).addConnectorConfiguration(backupConnector.getName(), backupConnector); liveServer = createTestableServer(liveConfig); }
@Override protected void createConfigs() throws Exception { nodeManager = new InVMNodeManager(false); TransportConfiguration liveConnector = getConnectorTransportConfiguration(true); TransportConfiguration backupConnector = getConnectorTransportConfiguration(false); backupConfig = super.createDefaultInVMConfig().clearAcceptorConfigurations().addAcceptorConfiguration(getAcceptorTransportConfiguration(false)).setHAPolicyConfiguration(new SharedStoreSlavePolicyConfiguration().setAllowFailBack(false)).addConnectorConfiguration(liveConnector.getName(), liveConnector).addConnectorConfiguration(backupConnector.getName(), backupConnector).addClusterConfiguration(basicClusterConnectionConfig(backupConnector.getName(), liveConnector.getName())); backupServer = createTestableServer(backupConfig); liveConfig = super.createDefaultInVMConfig().clearAcceptorConfigurations().addAcceptorConfiguration(getAcceptorTransportConfiguration(true)).setHAPolicyConfiguration(new SharedStoreMasterPolicyConfiguration()).addConnectorConfiguration(liveConnector.getName(), liveConnector).addConnectorConfiguration(backupConnector.getName(), backupConnector).addClusterConfiguration(basicClusterConnectionConfig(liveConnector.getName(), backupConnector.getName())); liveServer = createTestableServer(liveConfig); }