void join(RowMutationState newRow) { // If we already have a row and the new row has an ON DUPLICATE KEY clause // ignore the new values (as that's what the server will do). if (newRow.onDupKeyBytes == null) { // increment the column value size by the new row column value size colValuesSize += newRow.colValuesSize; for (Map.Entry<PColumn, byte[]> entry : newRow.columnValues.entrySet()) { PColumn col = entry.getKey(); byte[] oldValue = columnValues.put(col, entry.getValue()); if (oldValue != null) { // decrement column value size by the size of all column values that were replaced colValuesSize -= (col.getEstimatedSize() + oldValue.length); } } } // Concatenate ON DUPLICATE KEY bytes to allow multiple // increments of the same row in the same commit batch. this.onDupKeyBytes = PhoenixIndexBuilder.combineOnDupKey(this.onDupKeyBytes, newRow.onDupKeyBytes); statementIndexes = joinSortedIntArrays(statementIndexes, newRow.getStatementIndexes()); }
void join(RowMutationState newRow) { // If we already have a row and the new row has an ON DUPLICATE KEY clause // ignore the new values (as that's what the server will do). if (newRow.onDupKeyBytes == null) { // increment the column value size by the new row column value size colValuesSize += newRow.colValuesSize; for (Map.Entry<PColumn, byte[]> entry : newRow.columnValues.entrySet()) { PColumn col = entry.getKey(); byte[] oldValue = columnValues.put(col, entry.getValue()); if (oldValue != null) { // decrement column value size by the size of all column values that were replaced colValuesSize -= (col.getEstimatedSize() + oldValue.length); } } } // Concatenate ON DUPLICATE KEY bytes to allow multiple // increments of the same row in the same commit batch. this.onDupKeyBytes = PhoenixIndexBuilder.combineOnDupKey(this.onDupKeyBytes, newRow.onDupKeyBytes); statementIndexes = joinSortedIntArrays(statementIndexes, newRow.getStatementIndexes()); }
void join(RowMutationState newRow) { // If we already have a row and the new row has an ON DUPLICATE KEY clause // ignore the new values (as that's what the server will do). if (newRow.onDupKeyBytes == null) { // increment the column value size by the new row column value size colValuesSize += newRow.colValuesSize; for (Map.Entry<PColumn, byte[]> entry : newRow.columnValues.entrySet()) { PColumn col = entry.getKey(); byte[] oldValue = columnValues.put(col, entry.getValue()); if (oldValue != null) { // decrement column value size by the size of all column values that were replaced colValuesSize -= (col.getEstimatedSize() + oldValue.length); } } } // Concatenate ON DUPLICATE KEY bytes to allow multiple // increments of the same row in the same commit batch. this.onDupKeyBytes = PhoenixIndexBuilder.combineOnDupKey(this.onDupKeyBytes, newRow.onDupKeyBytes); statementIndexes = joinSortedIntArrays(statementIndexes, newRow.getStatementIndexes()); }
private void addUncommittedStatementIndexes(Collection<RowMutationState> rowMutations) { for (RowMutationState rowMutationState : rowMutations) { uncommittedStatementIndexes = joinSortedIntArrays(uncommittedStatementIndexes, rowMutationState.getStatementIndexes()); } }
private void addUncommittedStatementIndexes(Collection<RowMutationState> rowMutations) { for (RowMutationState rowMutationState : rowMutations) { uncommittedStatementIndexes = joinSortedIntArrays(uncommittedStatementIndexes, rowMutationState.getStatementIndexes()); } }
private void addUncommittedStatementIndexes(Collection<RowMutationState> rowMutations) { for (RowMutationState rowMutationState : rowMutations) { uncommittedStatementIndexes = joinSortedIntArrays(uncommittedStatementIndexes, rowMutationState.getStatementIndexes()); } }