/** * Reconfigures stream / queue consumer due to instances change. * * @param consumerQueues all queues that need to reconfigure * @param groupId consumer group id * @param instances consumer instance count */ public static void reconfigure(Iterable<QueueName> consumerQueues, long groupId, int instances, QueueAdmin queueAdmin) throws Exception { // Then reconfigure stream/queue for (QueueName queueName : consumerQueues) { queueAdmin.configureInstances(queueName, groupId, instances); } }
@Override public void filterRow(List<KeyValue> kvs) { byte[] dataBytes = null; byte[] metaBytes = null; byte[] stateBytes = null; // list is very short so it is ok to loop thru to find columns for (KeyValue kv : kvs) { if (hasQualifier(kv, QueueEntryRow.DATA_COLUMN)) { dataBytes = kv.getValue(); } else if (hasQualifier(kv, QueueEntryRow.META_COLUMN)) { metaBytes = kv.getValue(); } else if (hasQualifier(kv, stateColumnName)) { stateBytes = kv.getValue(); } } if (dataBytes == null || metaBytes == null) { skipRow = true; return; } QueueEntryRow.CanConsume canConsume = QueueEntryRow.canConsume(consumerConfig, transaction, writePointer, counter, metaBytes, stateBytes); // Only skip the row when canConsumer == NO, so that in case of NO_INCLUDING_ALL_OLDER, the client // can still see the row and move the scan start row. skipRow = canConsume == QueueEntryRow.CanConsume.NO; }
@Override public void start(CoprocessorEnvironment env) { if (env instanceof RegionCoprocessorEnvironment) { String tableName = ((RegionCoprocessorEnvironment) env).getRegion().getTableDesc().getNameAsString(); String configTableName = QueueUtils.determineQueueConfigTableName(tableName); appName = HBaseQueueAdmin.getApplicationName(tableName); flowName = HBaseQueueAdmin.getFlowName(tableName); configCache = ConsumerConfigCache.getInstance(env.getConfiguration(), Bytes.toBytes(configTableName)); } }
if (consumerConfig.getNumGroups() == 0) { return true; if (consumerConfig.getNumGroups() < 0) { return false; if (!QueueEntryRow.isDataColumn(cell.getQualifierArray(), cell.getQualifierOffset())) { skippedIncomplete++; return false; if (!QueueEntryRow.isMetaColumn(cell.getQualifierArray(), cell.getQualifierOffset())) { skippedIncomplete++; return false; if (!QueueEntryRow.isStateColumn(cell.getQualifierArray(), cell.getQualifierOffset())) { continue; if (!isProcessed(cell, consumerInstance)) { break; byte[] startRow = consumerConfig.getStartRow(consumerInstance); if (startRow != null && compareRowKey(cell, startRow) < 0) { consumedGroups++; return consumedGroups == consumerConfig.getNumGroups() || compareRowKey(result.get(0), consumerConfig.getSmallestStartRow()) < 0;
if (currentQueue == null || !QueueEntryRow.isQueueEntry(currentQueueRowPrefix, cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) { QueueName queueName = QueueEntryRow.getQueueName( appName, flowName, cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); currentQueue = queueName.toBytes(); currentQueueRowPrefix = QueueEntryRow.getQueueRowPrefix(queueName); consumerConfig = configCache.getConsumerConfig(currentQueue); if (canEvict(consumerConfig, results)) { rowsEvicted++; results.clear();
/** * Returns {@code true} if the given {@link org.apache.hadoop.hbase.KeyValue} has a * {@link ConsumerEntryState#PROCESSED} state and also put the consumer information into the * given {@link ConsumerInstance}. Otherwise, returns {@code false} and the {@link ConsumerInstance} is * left untouched. */ private boolean isProcessed(Cell cell, ConsumerInstance consumerInstance) { int stateIdx = cell.getValueOffset() + cell.getValueLength() - 1; boolean processed = cell.getValueArray()[stateIdx] == ConsumerEntryState.PROCESSED.getState(); if (processed) { // Column is "s<groupId>" long groupId = Bytes.toLong(cell.getQualifierArray(), cell.getQualifierOffset() + 1); // Value is "<writePointer><instanceId><state>" int instanceId = Bytes.toInt(cell.getValueArray(), cell.getValueOffset() + Bytes.SIZEOF_LONG); consumerInstance.setGroupInstance(groupId, instanceId); } return processed; } }
@Override public void deleteFlow(String flowName) { stopFlow(flowName); try { //Delete the Queues queueAdmin.clearAllForFlow(flowName, flowName); //Delete the JAR in HDFS Location jarinHDFS = location.append(flowName); jarinHDFS.delete(); } catch (Exception e) { LOG.warn(e.getMessage(), e); } }
/** * Creates a HBase filter that will filter out rows with state column state = PROCESSED (ignoring transaction). */ private Filter createStateFilter() { byte[] processedMask = new byte[Ints.BYTES * 2 + 1]; processedMask[processedMask.length - 1] = ConsumerEntryState.PROCESSED.getState(); return new SingleColumnValueFilter(QueueEntryRow.COLUMN_FAMILY, stateColumnName, CompareFilter.CompareOp.NOT_EQUAL, new BitComparator(processedMask, BitComparator.BitwiseOp.AND)); } }
queueAdmin.configureGroups(row.getKey(), row.getValue());