public static IDfs getHdfsInstance(Map conf) { return (IDfs) Utils.newInstance(HDFS_CLASS_PATH, conf); } }
public static BlobStore getNimbusBlobStore(Map conf, String baseDir, NimbusInfo nimbusInfo) { String type = (String) conf.get(Config.NIMBUS_BLOBSTORE); if (type == null) { type = LocalFsBlobStore.class.getName(); } BlobStore store = (BlobStore) Utils.newInstance(type); HashMap nconf = new HashMap(conf); // only enable cleanup of blobstore on nimbus nconf.put(Config.BLOBSTORE_CLEANUP_ENABLE, Boolean.TRUE); store.prepare(nconf, baseDir, nimbusInfo); return store; }
public static ClientBlobStore getClientBlobStoreForSupervisor(Map conf) { ClientBlobStore store = (ClientBlobStore) Utils.newInstance( (String) conf.get(Config.SUPERVISOR_BLOBSTORE)); store.prepare(conf); return store; }
public void createState(TopologyContext context) { windowedStateManager = (IRichCheckpointWindowedState<K, V, String>) Utils.newInstance("com.alibaba.jstorm.hdfs.transaction.WindowedRocksDbHdfsState"); windowedStateManager.setStateName(String.valueOf(context.getThisTaskId())); windowedStateManager.init(context); windowedStates = new HashMap<>(); windowUpdateLock = new ReentrantLock(); }
private void initKeyRangeState(int keyRange) { IRichCheckpointKvState<K, V, String> state = (IRichCheckpointKvState<K, V, String>) Utils.newInstance("com.alibaba.jstorm.hdfs.transaction.RocksDbHdfsState"); state.setStateName(context.getThisComponentId() + "/" + String.valueOf(keyRange)); state.init(context); keyRangeToState.put(keyRange, state); }
@Override public AsmMetric clone() { AsmMetric metric = new AsmGauge((Gauge<Double>) Utils.newInstance(this.gauge.getClass().getName())); metric.setMetricName(this.getMetricName()); return metric; } });
public static ColumnFamilyOptions getColumnFamilyOptions(Map conf) { ColumnFamilyOptions cfOptions = (new RocksDbOptionsFactory.Defaults()).createColumnFamilyOptions(null); String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS); if (optionsFactoryClass != null) { RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass); cfOptions = udfOptionFactory.createColumnFamilyOptions(cfOptions); } return cfOptions; }
public IKvState<K, V> createState(TopologyContext context) { stateInstance = (IRichCheckpointKvState<K, V, String>) Utils.newInstance("com.alibaba.jstorm.hdfs.transaction.RocksDbHdfsState"); stateInstance.setStateName(String.valueOf(context.getThisTaskId())); stateInstance.init(context); return stateInstance; }
public static Options getOptions(Map conf) { Options options = (new RocksDbOptionsFactory.Defaults()).createOptions(null); String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS); if (optionsFactoryClass != null) { RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass); options = udfOptionFactory.createOptions(options); } return options; }
public static DBOptions getDBOptions(Map conf) { DBOptions dbOptions = (new RocksDbOptionsFactory.Defaults()).createDbOptions(null); String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS); if (optionsFactoryClass != null) { RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass); dbOptions = udfOptionFactory.createDbOptions(dbOptions); } return dbOptions; }
public SnapshotState(TopologyContext context, Map<String, Set<Integer>> spouts, Map<String, Set<Integer>> statefulBolts, Map<String, Set<Integer>> nonStatefulBolts, Set<Integer> endBolts, ITopologyStateOperator stateOperator) { this.context = context; this.conf = context.getStormConf(); this.taskToComponentId = context.getTaskToComponent(); this.sourceTasks = spouts; this.statefulTasks = statefulBolts; this.nonStatefulTasks = nonStatefulBolts; this.endTasks = endBolts; this.lastSuccessfulSnapshot = new BatchStateTracker(TransactionCommon.INIT_BATCH_ID, spouts, statefulBolts, endBolts); this.inprogressSnapshots = new RotatingMap<Long, BatchStateTracker>(3, true); this.stateOperator = stateOperator; this.taskStateInitOperators = new HashMap<String, ITaskStateInitOperator>(); LOG.info(""); Map<String, String> taskStateInitOpRegisterMap = ConfigExtension.getTransactionUserTaskInitRegisterMap(conf); if (taskStateInitOpRegisterMap != null) { for (Entry<String, String> entry : taskStateInitOpRegisterMap.entrySet()) { taskStateInitOperators.put(entry.getKey(), (ITaskStateInitOperator) Utils.newInstance(entry.getValue())); } } this.taskSysStateInitOperators = new HashMap<String, ITaskStateInitOperator>(); Map<String, String> taskSysStateInitOpRegisterMap = ConfigExtension.getTransactionSysTaskInitRegisterMap(conf); if (taskSysStateInitOpRegisterMap != null) { for (Entry<String, String> entry : taskSysStateInitOpRegisterMap.entrySet()) { taskSysStateInitOperators.put(entry.getKey(), (ITaskStateInitOperator) Utils.newInstance(entry.getValue())); } } this.state = State.ACTIVE; }
public JStormMetricCache(Map conf, StormClusterState zkCluster) { String dbCacheClass = getNimbusCacheClass(conf); LOG.info("JStorm metrics cache will use {}", dbCacheClass); boolean reset = ConfigExtension.getMetricCacheReset(conf); try { cache = (JStormCache) Utils.newInstance(dbCacheClass); String dbDir = StormConfig.metricDbDir(conf); conf.put(RocksDBCache.ROCKSDB_ROOT_DIR, dbDir); conf.put(RocksDBCache.ROCKSDB_RESET, reset); cache.init(conf); } catch (Exception e) { if (!reset && cache != null) { LOG.error("Failed to init rocks db, will reset and try to re-init..."); conf.put(RocksDBCache.ROCKSDB_RESET, true); try { cache.init(conf); } catch (Exception ex) { LOG.error("Error", ex); } } else { LOG.error("Failed to create metrics cache!", e); throw new RuntimeException(e); } } this.zkCluster = zkCluster; }
public NimbusCache(Map conf, StormClusterState zkCluster) { super(); String dbCacheClass = getNimbusCacheClass(conf); LOG.info("NimbusCache db cache will use {}", dbCacheClass); try { dbCache = (JStormCache) Utils.newInstance(dbCacheClass); String dbDir = StormConfig.masterDbDir(conf); conf.put(RocksDBCache.ROCKSDB_ROOT_DIR, dbDir); conf.put(RocksDBCache.ROCKSDB_RESET, ConfigExtension.getNimbusCacheReset(conf)); dbCache.init(conf); if (dbCache instanceof TimeoutMemCache) { memCache = dbCache; } else { memCache = new TimeoutMemCache(); memCache.init(conf); } } catch (java.lang.UnsupportedClassVersionError e) { if (e.getMessage().contains("Unsupported major.minor version")) { LOG.error("!!!Please update jdk version to 7 or higher!!!"); } LOG.error("Failed to create NimbusCache!", e); throw new RuntimeException(e); } catch (Exception e) { LOG.error("Failed to create NimbusCache!", e); throw new RuntimeException(e); } this.zkCluster = zkCluster; }
Object instance = Utils.newInstance(klass); if (!(instance instanceof MetricUploader)) { throw new RuntimeException(klass + " isn't MetricUploader class "); if (!StringUtils.isBlank(metricQueryClientClass)) { LOG.info("metric query client class:{}", metricQueryClientClass); this.metricQueryClient = (MetricQueryClient) Utils.newInstance(metricQueryClientClass); } else { LOG.warn("use default metric query client class.");
this.configUpdateHandler = (ConfigUpdateHandler) Utils.newInstance(configUpdateHandlerClass); nimbusNotify = (ITopologyActionNotifierPlugin) Utils.newInstance(string); } else { nimbusNotify = null;
TMHandler tmUdfHandler = (TMHandler) Utils.newInstance(udfStreamClass); tmUdfHandler.init(tmContext); handlers.put(USER_DEFINED_STREAM, tmUdfHandler);
stateOperator = new DefaultTopologyStateOperator(); } else { stateOperator = (ITopologyStateOperator) Utils.newInstance(topologyStateOpClassName);
public static Kryo getKryo(Map conf) { IKryoFactory kryoFactory = (IKryoFactory) Utils.newInstance((String) conf.get(Config.TOPOLOGY_KRYO_FACTORY)); Kryo k = kryoFactory.getKryo(conf); if (WorkerClassLoader.getInstance() != null)
ITaskHook iTaskHook = (ITaskHook) Utils.newInstance(hook); userContext.addTaskHook(iTaskHook);
public static BlobStore getNimbusBlobStore(Map conf, String baseDir, NimbusInfo nimbusInfo) { String type = (String) conf.get(Config.NIMBUS_BLOBSTORE); if (type == null) { type = LocalFsBlobStore.class.getName(); } BlobStore store = (BlobStore) Utils.newInstance(type); HashMap nconf = new HashMap(conf); // only enable cleanup of blobstore on nimbus nconf.put(Config.BLOBSTORE_CLEANUP_ENABLE, Boolean.TRUE); store.prepare(nconf, baseDir, nimbusInfo); return store; }