public EncodedDataConsumer(Consumer<ColumnVectorBatch> consumer, final int colCount, LlapDaemonIOMetrics ioMetrics, QueryFragmentCounters counters) { this.downstreamConsumer = consumer; this.ioMetrics = ioMetrics; this.mxBean = LlapUtil.initThreadMxBean(); cvbPool = new FixedSizedObjectPool<ColumnVectorBatch>(CVB_POOL_SIZE, new Pool.PoolObjectHelper<ColumnVectorBatch>() { @Override public ColumnVectorBatch create() { return new ColumnVectorBatch(colCount); } @Override public void resetBeforeOffer(ColumnVectorBatch t) { // Don't reset anything, we are reusing column vectors. } }); this.counters = counters; }
@Override protected void doOneOp() { Object o = pool.take(); if (o != OneObjHelper.THE_OBJECT) { objects.add(o); } } }
@Override public void offer(T t) { tryOffer(t); }
private boolean offerImpl(T t) { long oldState = reserveArrayIndex(EMPTY, OBJECTS); if (oldState == NO_INDEX) return false; // For whatever reason, reserve failed. long originalMarker = EMPTY.getMarker(oldState), delta = EMPTY.getDelta(oldState); int arrayIndex = (int)getArrayIndex(originalMarker, delta); if (pool[arrayIndex] != null) { throwError(oldState, arrayIndex, "non-null"); } pool[arrayIndex] = t; commitArrayIndex(EMPTY, OBJECTS, originalMarker); return true; }
@Test public void testFullEmpty() { final int SIZE = 8; HashSet<Object> offered = new HashSet<>(); FixedSizedObjectPool<Object> pool = new FixedSizedObjectPool<>(SIZE, new DummyHelper(), true); Object newObj = pool.take(); for (int i = 0; i < SIZE; ++i) { Object obj = new Object(); offered.add(obj); assertTrue(pool.tryOffer(obj)); } assertFalse(pool.tryOffer(newObj)); for (int i = 0; i < SIZE; ++i) { Object obj = pool.take(); assertTrue(offered.remove(obj)); } assertTrue(offered.isEmpty()); Object newObj2 = pool.take(); assertNotSame(newObj, newObj2); }
private long reserveArrayIndex(Marker from, Marker to) { while (true) { long oldVal = state.get(), marker = from.getMarker(oldVal), delta = from.getDelta(oldVal), rc = from.getRc(oldVal), toMarker = to.getMarker(oldVal), toDelta = to.getDelta(oldVal); if (marker == NO_MARKER) return NO_INDEX; // The list is empty. if (delta == MAX_DELTA) return NO_INDEX; // Too many concurrent operations; spurious failure. if (delta == NO_DELTA) return NO_INDEX; // List is drained and recreated concurrently. if (toDelta == NO_DELTA) { // Same for the OTHER list; spurious. // TODO: the fact that concurrent re-creation of other list necessitates full stop is not // ideal... the reason is that the list NOT being re-created still uses the list // being re-created for boundary check; it needs the old value of the other marker. // However, NO_DELTA means the other marker was already set to a new value. For now, // assume concurrent re-creation is rare and the gap before commit is tiny. return NO_INDEX; } assert rc <= delta; // There can never be more concurrent takers than uncommitted ones. long newDelta = incDeltaValue(marker, toMarker, delta); // Increase target list pos. if (newDelta == NO_DELTA) return NO_INDEX; // Target list is being drained. long newVal = from.setRc(from.setDelta(oldVal, newDelta), rc + 1); // Set delta and refcount. if (setState(oldVal, newVal)) return oldVal; } }
private void commitArrayIndex(Marker from, Marker to, long originalMarker) { while (true) { long oldVal = state.get(), rc = from.getRc(oldVal); long newVal = from.setRc(oldVal, rc - 1); // Decrease refcount. assert rc > 0; if (rc == 1) { // We are the last of the concurrent operations to finish. Commit. long marker = from.getMarker(oldVal), delta = from.getDelta(oldVal), otherMarker = to.getMarker(oldVal), otherDelta = to.getDelta(oldVal); assert rc <= delta; // Move marker according to delta, change delta to 0. long newMarker = applyDeltaToMarker(marker, otherMarker, delta); newVal = from.setDelta(from.setMarker(newVal, newMarker), 0); if (otherMarker == NO_MARKER) { // The other list doesn't exist, create it at the first index of our op. assert otherDelta == 0; newVal = to.setMarker(newVal, originalMarker); } else if (otherDelta > 0 && otherDelta != NO_DELTA && applyDeltaToMarker(otherMarker, marker, otherDelta) == NO_MARKER) { // The other list will be exhausted when it commits. Create new one pending that commit. newVal = to.setDelta(to.setMarker(newVal, originalMarker), NO_DELTA); } } if (setState(oldVal, newVal)) return; } }
@Override public void returnData(ColumnVectorBatch data) { cvbPool.offer(data); }
private long applyDeltaToMarker(long marker, long markerLimit, long delta) { if (delta == NO_DELTA) return marker; // List was recreated while we were exhausting it. if (delta == pool.length) { assert markerLimit == NO_MARKER; // If we had the entire pool, other list couldn't exist. return NO_MARKER; // We exhausted the entire-pool-sized list. } marker = getArrayIndex(marker, delta); // Just move the marker according to delta. if (marker == markerLimit) return NO_MARKER; // We hit the limit - the list was exhausted. return marker; }
@VisibleForTesting public boolean tryOffer(T t) { if (t == null || pool.length == 0) return false; // 0 size means no-pooling case - passthru. helper.resetBeforeOffer(t); return offerImpl(t); }
final CountDownLatch cdlIn = new CountDownLatch(TASK_COUNT), cdlOut = new CountDownLatch(1); final FixedSizedObjectPool<Object> pool = new FixedSizedObjectPool<>(size, new OneObjHelper(), true); Object o = new Object(); allGiven.add(o); assertTrue(pool.tryOffer(o)); while (pool.take() != OneObjHelper.THE_OBJECT); for (int i = 0; i < size; ++i) { assertTrue(pool.tryOffer(new Object())); assertFalse(pool.tryOffer(new Object())); for (int i = 0; i < size; ++i) { assertTrue(OneObjHelper.THE_OBJECT != pool.take()); assertTrue(OneObjHelper.THE_OBJECT == pool.take());
private T takeImpl() { long oldState = reserveArrayIndex(OBJECTS, EMPTY); if (oldState == NO_INDEX) return null; // For whatever reason, reserve failed. long originalMarker = OBJECTS.getMarker(oldState), delta = OBJECTS.getDelta(oldState); int arrayIndex = (int)getArrayIndex(originalMarker, delta); T result = pool[arrayIndex]; if (result == null) { throwError(oldState, arrayIndex, "null"); } pool[arrayIndex] = null; commitArrayIndex(OBJECTS, EMPTY, originalMarker); return result; }
private long reserveArrayIndex(Marker from, Marker to) { while (true) { long oldVal = state.get(), marker = from.getMarker(oldVal), delta = from.getDelta(oldVal), rc = from.getRc(oldVal), toMarker = to.getMarker(oldVal), toDelta = to.getDelta(oldVal); if (marker == NO_MARKER) return NO_INDEX; // The list is empty. if (delta == MAX_DELTA) return NO_INDEX; // Too many concurrent operations; spurious failure. if (delta == NO_DELTA) return NO_INDEX; // List is drained and recreated concurrently. if (toDelta == NO_DELTA) { // Same for the OTHER list; spurious. // TODO: the fact that concurrent re-creation of other list necessitates full stop is not // ideal... the reason is that the list NOT being re-created still uses the list // being re-created for boundary check; it needs the old value of the other marker. // However, NO_DELTA means the other marker was already set to a new value. For now, // assume concurrent re-creation is rare and the gap before commit is tiny. return NO_INDEX; } assert rc <= delta; // There can never be more concurrent takers than uncommitted ones. long newDelta = incDeltaValue(marker, toMarker, delta); // Increase target list pos. if (newDelta == NO_DELTA) return NO_INDEX; // Target list is being drained. long newVal = from.setRc(from.setDelta(oldVal, newDelta), rc + 1); // Set delta and refcount. if (setState(oldVal, newVal)) return oldVal; } }
private void commitArrayIndex(Marker from, Marker to, long originalMarker) { while (true) { long oldVal = state.get(), rc = from.getRc(oldVal); long newVal = from.setRc(oldVal, rc - 1); // Decrease refcount. assert rc > 0; if (rc == 1) { // We are the last of the concurrent operations to finish. Commit. long marker = from.getMarker(oldVal), delta = from.getDelta(oldVal), otherMarker = to.getMarker(oldVal), otherDelta = to.getDelta(oldVal); assert rc <= delta; // Move marker according to delta, change delta to 0. long newMarker = applyDeltaToMarker(marker, otherMarker, delta); newVal = from.setDelta(from.setMarker(newVal, newMarker), 0); if (otherMarker == NO_MARKER) { // The other list doesn't exist, create it at the first index of our op. assert otherDelta == 0; newVal = to.setMarker(newVal, originalMarker); } else if (otherDelta > 0 && otherDelta != NO_DELTA && applyDeltaToMarker(otherMarker, marker, otherDelta) == NO_MARKER) { // The other list will be exhausted when it commits. Create new one pending that commit. newVal = to.setDelta(to.setMarker(newVal, originalMarker), NO_DELTA); } } if (setState(oldVal, newVal)) return; } }
@Override public void returnData(OrcEncodedColumnBatch ecb) { for (int colIx = 0; colIx < ecb.getTotalColCount(); ++colIx) { if (!ecb.hasData(colIx)) continue; ColumnStreamData[] datas = ecb.getColumnData(colIx); for (ColumnStreamData data : datas) { if (data == null || data.decRef() != 0) continue; if (LlapIoImpl.LOCKING_LOGGER.isTraceEnabled()) { for (MemoryBuffer buf : data.getCacheBuffers()) { LlapIoImpl.LOCKING_LOGGER.trace("Unlocking {} at the end of processing", buf); } } bufferManager.decRefBuffers(data.getCacheBuffers()); if (useObjectPools) { CSD_POOL.offer(data); } } } // We can offer ECB even with some streams not discarded; reset() will clear the arrays. if (useObjectPools) { ECB_POOL.offer(ecb); } }
private long incDeltaValue(long markerFrom, long otherMarker, long delta) { if (delta == pool.length) return NO_DELTA; // The (pool-sized) list is being fully drained. long result = delta + 1; if (getArrayIndex(markerFrom, result) == getArrayIndex(otherMarker, 1)) { return NO_DELTA; // The list is being drained, cannot increase the delta anymore. } return result; }
@VisibleForTesting public boolean tryOffer(T t) { if (t == null || pool.length == 0) return false; // 0 size means no-pooling case - passthru. helper.resetBeforeOffer(t); return offerImpl(t); }
private boolean offerImpl(T t) { long oldState = reserveArrayIndex(EMPTY, OBJECTS); if (oldState == NO_INDEX) return false; // For whatever reason, reserve failed. long originalMarker = EMPTY.getMarker(oldState), delta = EMPTY.getDelta(oldState); int arrayIndex = (int)getArrayIndex(originalMarker, delta); if (pool[arrayIndex] != null) { throwError(oldState, arrayIndex, "non-null"); } pool[arrayIndex] = t; commitArrayIndex(EMPTY, OBJECTS, originalMarker); return true; }
private void processColumnCacheData(LlapSerDeDataBuffer[][][] cacheBuffers, OrcEncodedColumnBatch ecb, int colIx) { // The column has been obtained from cache. LlapSerDeDataBuffer[][] colData = cacheBuffers[colIx]; if (LlapIoImpl.CACHE_LOGGER.isTraceEnabled()) { LlapIoImpl.CACHE_LOGGER.trace("Processing cache data for column " + colIx + ": " + SerDeLowLevelCacheImpl.toString(colData)); } for (int streamIx = 0; streamIx < colData.length; ++streamIx) { if (colData[streamIx] == null) continue; ColumnStreamData cb = useObjectPools ? CSD_POOL.take() : new ColumnStreamData(); cb.incRef(); cb.setCacheBuffers(Lists.<MemoryBuffer>newArrayList(colData[streamIx])); ecb.setStreamData(colIx, streamIx, cb); } }
public static FixedSizedObjectPool<IoTrace> createTracePool(Configuration conf) { final int ioTraceSize = (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_TRACE_SIZE); final boolean isAlwaysDump = HiveConf.getBoolVar(conf, ConfVars.LLAP_IO_TRACE_ALWAYS_DUMP); int ioThreads = HiveConf.getIntVar(conf, ConfVars.LLAP_IO_THREADPOOL_SIZE); return new FixedSizedObjectPool<>(ioThreads, new Pool.PoolObjectHelper<IoTrace>() { @Override public IoTrace create() { return new IoTrace(ioTraceSize, isAlwaysDump); } @Override public void resetBeforeOffer(IoTrace t) { t.reset(); } }); } }