public void nextTuple() { if (_serveTuples.size() > 0) { FixedTuple ft = _serveTuples.remove(0); String id = UUID.randomUUID().toString(); _pending.put(id, ft); _collector.emit(ft.stream, ft.values, id); } else { Utils.sleep(100); } }
/** * Uses Storm config as returned by {@code Utils.readStormConfig()} * @param blobKey key of PMML model in Blobstore */ public ModelRunnerFromBlobStore(String blobKey, ModelOutputs modelOutputs) { this(blobKey, modelOutputs, Utils.readStormConfig()); }
public static void main(String[] args) throws Exception { int runTime = -1; Map<String, Object> topoConf = Utils.findAndReadConfigFile(args[1]); topoConf.put(Config.TOPOLOGY_SPOUT_RECVQ_SKIPS, 1); if (args.length > 0) { long sleepMs = Integer.parseInt(args[0]); topoConf.put(SLEEP_MS, sleepMs); } if (args.length > 1) { runTime = Integer.parseInt(args[1]); } if (args.length > 2) { System.err.println("args: spoutSleepMs [runDurationSec] "); return; } topoConf.putAll(Utils.readCommandLineOpts()); // Submit topology to storm cluster Helper.runOnClusterAndPrintMetrics(runTime, "LowThroughputTopo", topoConf, getTopology(topoConf)); }
public List<Object> deserialize(ByteBuffer ser) { // Maintain backward compatibility for 0.10 byte[] b = Utils.toByteArray(ser); return Utils.tuple(new Object[]{ b }); }
public static Map<String, Object> readStormConfig() { Map<String, Object> ret = readDefaultConfig(); String confFile = System.getProperty("storm.conf.file"); Map<String, Object> storm; if (confFile == null || confFile.equals("")) { storm = findAndReadConfigFile("storm.yaml", false); } else { storm = findAndReadConfigFile(confFile, true); } ret.putAll(storm); ret.putAll(readCommandLineOpts()); return ret; }
public static void prepare() { Config conf = new Config(); conf.putAll(Utils.readStormConfig()); store = Utils.getClientBlobStore(conf); }
throws AuthorizationException, NotAliveException, InvalidTopologyException { topoConf = new HashMap(topoConf); topoConf.putAll(Utils.readCommandLineOpts()); Map<String, Object> conf = Utils.readStormConfig(); conf.putAll(topoConf); Map<String, String> fullCreds = populateCredentials(conf, credentials);
Map<String, Object> fullConf = Utils.readStormConfig(); fullConf.putAll(Utils.readCommandLineOpts()); fullConf.putAll(conf); return new DRPCClient(fullConf, host, port); } catch (RuntimeException e) { if (Utils.exceptionCauseIsInstanceOf(ConnectException.class, e)) { excpt = e; } else {
public static Integer getInt(Object o) { Integer result = getInt(o, null); if (null == result) { throw new IllegalArgumentException("Don't know how to convert null to int"); } return result; }
private Map<String, Object> mkConf(Map<String, Object> extra) { Map<String, Object> config = Utils.readDefaultConfig(); config.putAll(extra); return config; }
public static Map<String, Object> readDefaultConfig() { return findAndReadConfigFile("defaults.yaml", true); }
/** * Deserialize ByteBuffer to String. * @param byteBuffer input ByteBuffer * @return deserialized string */ public static String deserializeString(ByteBuffer byteBuffer) { if (byteBuffer.hasArray()) { int base = byteBuffer.arrayOffset(); return new String(byteBuffer.array(), base + byteBuffer.position(), byteBuffer.remaining(), StandardCharsets.UTF_8); } else { return new String(Utils.toByteArray(byteBuffer), StandardCharsets.UTF_8); } }
public static byte[] thriftSerialize(TBase t) { return Utils.thriftSerialize(t); }
st.set_bolts(new HashMap<>()); st.set_state_spouts(new HashMap<>()); byte[] serializedState = Utils.gzip(Utils.thriftSerialize(st));
@Test public void testSubmitTopologyToLocalNimbus() throws Exception { int port = Utils.getAvailablePort(); try (ILocalCluster localCluster = new LocalCluster.Builder() .withNimbusDaemon(true) .withDaemonConf(Config.NIMBUS_THRIFT_PORT, port) .build()) { Config topoConf = new Config(); topoConf.putAll(Utils.readDefaultConfig()); topoConf.setDebug(true); topoConf.put("storm.cluster.mode", "local"); // default is aways "distributed" but here local cluster is being used. topoConf.put(Config.STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN, InmemoryTopologySubmitterHook.class.getName()); topoConf.put(Config.NIMBUS_THRIFT_PORT, port); List<TopologyDetails> topologyNames = new ArrayList<>(); for (int i = 0; i < 4; i++) { final String topologyName = "word-count-" + UUID.randomUUID().toString(); final StormTopology stormTopology = createTestTopology(); topologyNames.add(new TopologyDetails(topologyName, stormTopology)); localCluster.submitTopology(topologyName, topoConf, stormTopology); } Assert.assertEquals(InmemoryTopologySubmitterHook.submittedTopologies, topologyNames); } }
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { _rand = new Random(Utils.secureRandomLong()); _state = TransactionalState.newCoordinatorState(conf, (String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID), _spout.getComponentConfiguration()); _coordinatorState = new RotatingTransactionalState(_state, META_DIR, true); _collector = collector; _coordinator = _spout.getCoordinator(conf, context); _currTransaction = getStoredCurrTransaction(_state); _maxTransactionActive = Utils.getInt(conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING), 1); _initializer = new StateInitializer(); }
private List<IWorkerHook> deserializeWorkerHooks() { List<IWorkerHook> myHookList = new ArrayList<>(); if (topology.is_set_worker_hooks()) { for (ByteBuffer hook : topology.get_worker_hooks()) { byte[] hookBytes = Utils.toByteArray(hook); IWorkerHook hookObject = Utils.javaDeserialize(hookBytes, IWorkerHook.class); myHookList.add(hookObject); } } return myHookList; }
/** * initializes member variables */ private void initConfigs() { this.topologyWorkerMaxHeapSize = Utils.getDouble(this.topologyConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), null); this.topologyPriority = Utils.getInt(this.topologyConf.get(Config.TOPOLOGY_PRIORITY), null); assert this.topologyWorkerMaxHeapSize != null; assert this.topologyPriority != null; }
public static void main(String[] args) throws Exception { int runTime = -1; Config topoConf = new Config(); topoConf.put(Config.TOPOLOGY_SPOUT_RECVQ_SKIPS, 1); topoConf.putAll(Utils.readCommandLineOpts()); if (args.length > 0) { long sleepMs = Integer.parseInt(args[0]); topoConf.put(SLEEP_MS, sleepMs); } if (args.length > 1) { runTime = Integer.parseInt(args[1]); } if (args.length > 2) { System.err.println("args: boltSleepMs [runDurationSec] "); return; } // Submit topology to storm cluster Helper.runOnClusterAndPrintMetrics(runTime, "BackPressureTopo", topoConf, getTopology(topoConf)); }