public void markUploading(int idx) { this.metricStat.set(idx, UPLOADING); }
public void markSet(int idx) { this.metricStat.set(idx, SET); }
public void addValue(int... latency) { for (int l : latency) { /* We just wrap around the beginning and over-write if we go past 'dataLength' as that will effectively cause us to "sample" the most recent data */ list.set(index.getAndIncrement() % length, l); // TODO Alternative to AtomicInteger? The getAndIncrement may be a source of contention on high throughput circuits on large multi-core systems. // LongAdder isn't suited to this as it is not consistent. Perhaps a different data structure that doesn't need indexed adds? // A threadlocal data storage that only aggregates when fetched would be ideal. Similar to LongAdder except for accumulating lists of data. } }
private void initializeArray(AtomicIntegerArray arr) { for (int i = 0; i < arr.length(); i++) { arr.set(i, Integer.valueOf(0)); } }
@Override public synchronized void resetRecord() { for (int i = 0; i < timingArr.length(); ++i) { timingArr.set(i, 0); } hasRecord = false; }
@Override public synchronized void resetRecord() { for (int i = 0; i < timingArr.length(); ++i) { timingArr.set(i, 0); } Iterator<Map.Entry<Integer, AtomicInteger>> iterator = timingMap.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry<Integer, AtomicInteger> entry = iterator.next(); if ((entry.getKey() > 1.5 * timingArr.length()) || entry.getValue().get() <= 0) { iterator.remove(); } else { entry.getValue().set(0); } } hasRecord = false; }
private void doIntWrite(final int idx, int value) { synchronized (this.intReadPrepLock[idx]) { if (!isIntDirty(idx)) { // no need to prepare if not dirty this.intStorage.set(idx, value); return; } } prepareThreadStoreList(); synchronized (this.intReadPrepLock[idx]) { if (clearIntDirty(idx)) { for (ThreadStorage ts : this.threadStoreList) { if (ts.intStore.get(idx) != 0) { ts.intStore.set(idx, 0); } } } this.intStorage.set(idx, value); } }
private void setIntDirty(final int idx) { if (!this.intDirty.weakCompareAndSet(idx, 0/* expected */, 1/* update */)) { if (!isIntDirty(idx)) { this.intDirty.set(idx, 1); } } }
private void setLongDirty(final int idx) { if (!this.longDirty.weakCompareAndSet(idx, 0/* expected */, 1/* update */)) { if (!isLongDirty(idx)) { this.longDirty.set(idx, 1); } } }
public void forceMarkUploaded(int idx) { this.metricCache.remove(PENDING_UPLOAD_METRIC_DATA + idx); this.metricCache.remove(PENDING_UPLOAD_METRIC_DATA_INFO + idx); this.metricStat.set(idx, UNSET); if (MetricUploader.rateController.isEnableRateControl()) { MetricUploader.rateController.decrUploadingNum(); } }
private void incData(FederationComponent newComp, FederationComponent oldComp) { Map<String, Object> newState = (newComp != null ? newComp.getObjectState() : null); Map<String, Object> oldState; if (oldComp != null && oldComp.getOldState().size() > 0) { oldState = oldComp.getOldState(); } else { oldState = (oldComp != null ? oldComp.getObjectState() : null); } if (newState != null) { for (int index = 0; index < keys.size(); index++) { prevCounters.set(index, currCounters.get(index)); Integer newVal = (Integer) newState.get(keys.get(index)); if (newVal == null) { continue; } Integer oldVal = 0; if (oldState != null) { Object val = oldState.get(keys.get(index)); if (val != null) { oldVal = (Integer) val; } } currCounters.addAndGet(index, newVal - oldVal); } } }
@Override public Void call() throws Exception { while(!stop.get()) { final int nodeId = r.nextInt(TOTAL_CNT); if (!reservedIdx.compareAndSet(nodeId, 0, 1)) { yield(); continue; } stopGrid(nodeId); doSleep(500 + r.nextInt(1000)); startGrid(nodeId); reservedIdx.set(nodeId, 0); } return null; } }, 1, "tx-restart-thread");
@VisibleForTesting void hit(long timestamp) { long numTimeUnits = timestamp / _timeBucketWidthMs; int index = (int) (numTimeUnits % BUCKET_COUNT); if (_bucketStartTime.get(index) == numTimeUnits) { _bucketHitCount.incrementAndGet(index); } else { synchronized (_bucketStartTime) { if (_bucketStartTime.get(index) != numTimeUnits) { _bucketHitCount.set(index, 1); _bucketStartTime.set(index, numTimeUnits); } else { _bucketHitCount.incrementAndGet(index); } } } }
public ClusterMetricsContext(final NimbusData nimbusData) { LOG.info("create cluster metrics context..."); this.nimbusData = nimbusData; this.metricCache = nimbusData.getMetricCache(); this.stormClusterState = nimbusData.getStormClusterState(); this.isShutdown = nimbusData.getIsShutdown(); clusterName = ConfigExtension.getClusterName(nimbusData.getConf()); if (clusterName == null) { throw new RuntimeException("cluster.name property must be set in storm.yaml!"); } this.maxPendingUploadMetrics = ConfigExtension.getMaxPendingMetricNum(nimbusData.getConf()); this.metricStat = new AtomicIntegerArray(this.maxPendingUploadMetrics); int cnt = 0; for (int i = 0; i < maxPendingUploadMetrics; i++) { TopologyMetricDataInfo obj = getMetricDataInfoFromCache(i); if (obj != null) { this.metricStat.set(i, SET); cnt++; } } LOG.info("pending upload metrics: {}", cnt); // track nimbus JVM heap JStormMetrics.registerWorkerGauge(JStormMetrics.NIMBUS_METRIC_KEY, MetricDef.MEMORY_USED, new AsmGauge(new Gauge<Double>() { @Override public Double getValue() { return JStormUtils.getJVMHeapMemory(); } })); }
locks.set(g, 0);
@Override public void run() { int key = idGen.getAndIncrement(); List<Integer> keys = new ArrayList<>(); for (int k = 0; k < keysCnt; k++) keys.add(k); int cntr = 0; for (int i = 0; i < ITERATIONS; i++) { cntr++; int nodeId; while(!reservedIdx.compareAndSet((nodeId = r.nextInt(TOTAL_CNT)), 0, 1)) doSleep(10); U.awaitQuiet(b); final IgniteEx grid = grid(nodeId); try (final Transaction tx = grid.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, 0, 0)) { reservedIdx.set(nodeId, 0); // Construct deadlock grid.cache(CACHE_NAME).get(keys.get(key)); // Should block. grid.cache(CACHE_NAME).get(keys.get((key + 1) % keysCnt)); fail("Deadlock expected"); } catch (Throwable t) { // Expected. } if (key == 0) log.info("Rolled back: " + cntr); } } }, keysCnt, "tx-lock-thread");
@SuppressWarnings({"BusyWait"}) @Override public Object call() throws Exception { GridRandom rnd = new GridRandom(); while (!restartsDone.get()) { int g; do { g = rnd.nextInt(locks.length()); } while (!locks.compareAndSet(g, 0, -1)); log.info("Stop node: " + g); stopGrid(g); Thread.sleep(rnd.nextInt(nodeLifeTime)); log.info("Start node: " + g); startGrid(g); Thread.sleep(rnd.nextInt(nodeLifeTime)); locks.set(g, 0); int c = restartCnt.incrementAndGet(); if (c % logFreq == 0) info("Node restarts: " + c); } return true; } }, restartThreadsNum, "restart-thread");
@SuppressWarnings("unchecked") public <T> T deserialze(DefaultJSONParser parser, Type clazz, Object fieldName) { if (parser.lexer.token() == JSONToken.NULL) { parser.lexer.nextToken(JSONToken.COMMA); return null; } JSONArray array = new JSONArray(); parser.parseArray(array); if (clazz == AtomicIntegerArray.class) { AtomicIntegerArray atomicArray = new AtomicIntegerArray(array.size()); for (int i = 0; i < array.size(); ++i) { atomicArray.set(i, array.getInteger(i)); } return (T) atomicArray; } AtomicLongArray atomicArray = new AtomicLongArray(array.size()); for (int i = 0; i < array.size(); ++i) { atomicArray.set(i, array.getLong(i)); } return (T) atomicArray; }
locks.set(g, 0);
@SuppressWarnings("unchecked") public <T> T deserialze(DefaultJSONParser parser, Type clazz, Object fieldName) { if (parser.lexer.token() == JSONToken.NULL) { parser.lexer.nextToken(JSONToken.COMMA); return null; } JSONArray array = new JSONArray(); parser.parseArray(array); if (clazz == AtomicIntegerArray.class) { AtomicIntegerArray atomicArray = new AtomicIntegerArray(array.size()); for (int i = 0; i < array.size(); ++i) { atomicArray.set(i, array.getInteger(i)); } return (T) atomicArray; } AtomicLongArray atomicArray = new AtomicLongArray(array.size()); for (int i = 0; i < array.size(); ++i) { atomicArray.set(i, array.getLong(i)); } return (T) atomicArray; }