/** * @return <tt>true</tt> if {@code o} is in the set. */ public boolean contains ( final Object o ) { return _map.containsKey(o); }
@Override public boolean contains( Object k ) { return NonBlockingHashMap.this.containsKey(k); } @Override public boolean remove ( Object k ) { return NonBlockingHashMap.this.remove (k) != null; }
public final boolean containsKey(Object key){return _map.containsKey(key);} public void addKey(String str) {
private int nextInstanceCounter(String algoName, NonBlockingHashMap<String, Integer> instanceCounters) { synchronized (instanceCounters) { int instanceNum = 1; if (instanceCounters.containsKey(algoName)) instanceNum = instanceCounters.get(algoName) + 1; instanceCounters.put(algoName, instanceNum); return instanceNum; } }
private static Enum [] enums(Key eKey, int ncols){ if(!_enums.containsKey(eKey)){ Enum [] enums = new Enum[ncols]; for(int i = 0; i < enums.length; ++i)enums[i] = new Enum(); _enums.putIfAbsent(eKey, enums); } return _enums.get(eKey); }
@Override public void map(Key key) { _lEnums = new Enum[H2O.CLOUD.size()][]; if(MultiFileParseTask._enums.containsKey(_k)){ _lEnums[H2O.SELF.index()] = _gEnums = MultiFileParseTask._enums.get(_k); // if we are the original node (i.e. there will be no sending over // wire), we have to clone the enums not to share the same object // (causes problems when computing column domain and renumbering maps). if( H2O.SELF.index() == _homeNode ) { _gEnums = _gEnums.clone(); for(int i = 0; i < _gEnums.length; ++i) _gEnums[i] = _gEnums[i].clone(); } MultiFileParseTask._enums.remove(_k); } }
private int nextInstanceCounter(String algoName, NonBlockingHashMap<String, Integer> instanceCounters) { synchronized (instanceCounters) { int instanceNum = 1; if (instanceCounters.containsKey(algoName)) instanceNum = instanceCounters.get(algoName) + 1; instanceCounters.put(algoName, instanceNum); return instanceNum; } }