public SubmitOptions deepCopy() { return new SubmitOptions(this); }
@Override public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException { submitTopologyCalls.mark(); submitTopologyWithOpts(name, uploadedJarLocation, jsonConf, topology, new SubmitOptions(TopologyInitialStatus.ACTIVE)); }
public static void main(String[] args) throws Exception { Options options = buildOptions(); CommandLineParser parser = new DefaultParser(); CommandLine commandLine = parser.parse(options, args); if (!commandLine.hasOption(OPTION_SQL_FILE_LONG)) { printUsageAndExit(options, OPTION_SQL_FILE_LONG + " is required"); } String filePath = commandLine.getOptionValue(OPTION_SQL_FILE_LONG); List<String> stmts = Files.readAllLines(Paths.get(filePath), StandardCharsets.UTF_8); StormSql sql = StormSql.construct(); @SuppressWarnings("unchecked") Map<String, Object> conf = Utils.readStormConfig(); if (commandLine.hasOption(OPTION_SQL_EXPLAIN_LONG)) { sql.explain(stmts); } else if (commandLine.hasOption(OPTION_SQL_TOPOLOGY_NAME_LONG)) { String topoName = commandLine.getOptionValue(OPTION_SQL_TOPOLOGY_NAME_LONG); SubmitOptions submitOptions = new SubmitOptions(TopologyInitialStatus.ACTIVE); sql.submit(topoName, stmts, conf, submitOptions, null, null); } else { printUsageAndExit(options, "Either " + OPTION_SQL_TOPOLOGY_NAME_LONG + " or " + OPTION_SQL_EXPLAIN_LONG + " must be presented"); } }
if (cmd.hasOption(OPTION_INACTIVE)) { LOG.info("Deploying topology in an INACTIVE state..."); submitOptions = new SubmitOptions(TopologyInitialStatus.INACTIVE); } else { LOG.info("Deploying topology in an ACTIVE state..."); submitOptions = new SubmitOptions(TopologyInitialStatus.ACTIVE);
if (!fullCreds.isEmpty()) { if (opts == null) { opts = new SubmitOptions(TopologyInitialStatus.ACTIVE);
@Test public void testSubmitInactiveTopology() throws Exception { try (LocalCluster cluster = new LocalCluster.Builder() .withSimulatedTime() .withDaemonConf(Collections.singletonMap(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true)) .build()) { FeederSpout feeder = new FeederSpout(new Fields("field1")); AckFailMapTracker tracker = new AckFailMapTracker(); feeder.setAckFailDelegate(tracker); Map<String, SpoutDetails> spoutMap = new HashMap<>(); spoutMap.put("1", Thrift.prepareSpoutDetails(feeder)); spoutMap.put("2", Thrift.prepareSpoutDetails(new OpenTrackedSpout())); Map<String, BoltDetails> boltMap = new HashMap<>(); boltMap.put("3", Thrift.prepareBoltDetails(Collections.singletonMap(Utils.getGlobalStreamId("1", null), Thrift.prepareGlobalGrouping()), new PrepareTrackedBolt())); boltPrepared = false; spoutOpened = false; StormTopology topology = Thrift.buildTopology(spoutMap, boltMap); cluster.submitTopologyWithOpts("test", Collections.singletonMap(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 10), topology, new SubmitOptions(TopologyInitialStatus.INACTIVE)); cluster.advanceClusterTime(11); feeder.feed(new Values("a"), 1); cluster.advanceClusterTime(9); assertThat(boltPrepared, is(false)); assertThat(spoutOpened, is(false)); cluster.getNimbus().activate("test"); cluster.advanceClusterTime(12); assertAcked(tracker, 1); assertThat(boltPrepared, is(true)); assertThat(spoutOpened, is(true)); } }
public SubmitOptions deepCopy() { return new SubmitOptions(this); }
if (cmd.hasOption(OPTION_INACTIVE)) { LOG.info("Deploying topology in an INACTIVE state..."); submitOptions = new SubmitOptions(TopologyInitialStatus.INACTIVE); } else { LOG.info("Deploying topology in an ACTIVE state..."); submitOptions = new SubmitOptions(TopologyInitialStatus.ACTIVE);
if (!fullCreds.isEmpty()) { if (opts == null) { opts = new SubmitOptions(TopologyInitialStatus.ACTIVE);