/** * Add the given value to current counter value. Concurrent updates will * not be lost, but addAndGet or getAndAdd are not implemented because the * total counter value (i.e., {@link #get}) is not atomically updated. * Updates are striped across an array of counters to avoid cache contention * and has been tested with performance scaling linearly up to 768 CPUs. */ public void add( long x ) { add_if_mask( x,0); } /** {@link #add} with -1 */
/** Atomically set the sum of the striped counters to specified value. * Rather more expensive than a simple store, in order to remain atomic. */ public void set( long x ) { CAT newcat = new CAT(null,4,x); // Spin until CAS works while( !CAS_cat(_cat,newcat) ); }
private long add_if_mask( long x, long mask ) { return _cat.add_if_mask(x,mask,hash(),this); }
/** Atomically set the sum of the striped counters to specified value. * Rather more expensive than a simple store, in order to remain atomic. */ public void set( long x ) { CAT newcat = new CAT(null,4,x); // Spin until CAS works while( !CAS_cat(_cat,newcat) ); }
private long add_if_mask( long x, long mask ) { return _cat.add_if_mask(x,mask,hash(),this); }
/** * Add the given value to current counter value. Concurrent updates will * not be lost, but addAndGet or getAndAdd are not implemented because the * total counter value (i.e., {@link #get}) is not atomically updated. * Updates are striped across an array of counters to avoid cache contention * and has been tested with performance scaling linearly up to 768 CPUs. */ public void add( long x ) { add_if_mask( x,0); } /** {@link #add} with -1 */
/** {@link #add} with -1 */ public void decrement() { add_if_mask(-1L,0); } /** {@link #add} with +1 */
/** {@link #add} with -1 */ public void decrement() { add_if_mask(-1L,0); } /** {@link #add} with +1 */
/** {@link #add} with +1 */ public void increment() { add_if_mask( 1L,0); }
/** {@link #add} with +1 */ public void increment() { add_if_mask( 1L,0); }