/** * Returns the platform preferred implementation of an insertion ordered set based on a hash * table. */ static <E> Set<E> newLinkedHashSetWithExpectedSize(int expectedSize) { return Sets.newLinkedHashSetWithExpectedSize(expectedSize); }
/** * Returns the platform preferred implementation of an insertion ordered set based on a hash * table. */ static <E> Set<E> newLinkedHashSetWithExpectedSize(int expectedSize) { return Sets.newLinkedHashSetWithExpectedSize(expectedSize); }
public void testNewLinkedHashSetWithExpectedSizeSmall() { LinkedHashSet<Integer> set = Sets.newLinkedHashSetWithExpectedSize(0); verifySetContents(set, EMPTY_COLLECTION); }
public void testNewLinkedHashSetWithExpectedSizeLarge() { LinkedHashSet<Integer> set = Sets.newLinkedHashSetWithExpectedSize(1000); verifySetContents(set, EMPTY_COLLECTION); }
private Set<Element> allAnnotatedElements() { Set<Element> elements = Sets.newLinkedHashSetWithExpectedSize(100); for (TypeElement annotation : annotations()) { Set<? extends Element> annotatedElements = round().getElementsAnnotatedWith(annotation); checkAnnotation(annotation, annotatedElements); elements.addAll(annotatedElements); } return elements; }
/** * Process all increasing propagation requests * This is done from the strongest through to the weakest. */ private void processIncrease() { for (int depth = 0; depth < rules.getMaxValue() - 1; depth++) { byte value = (byte) (rules.getMaxValue() - depth); while (!increaseQueues[depth].isEmpty()) { Set<Vector3i> toProcess = increaseQueues[depth]; increaseQueues[depth] = Sets.newLinkedHashSetWithExpectedSize(toProcess.size()); /* This step will add any new values to `increaseQueues` */ for (Vector3i pos : toProcess) { push(pos, value); } } } }
/** * Process all reducing propagation requests * This is done from the largest value through the smallest. */ private void processReduction() { for (int depth = 0; depth < rules.getMaxValue(); depth++) { byte oldValue = (byte) (rules.getMaxValue() - depth); while (!reduceQueues[depth].isEmpty()) { Set<Vector3i> toProcess = reduceQueues[depth]; reduceQueues[depth] = Sets.newLinkedHashSetWithExpectedSize(toProcess.size()); /* This step will add any new reductions to to the `reduceQueues` set */ for (Vector3i pos : toProcess) { purge(pos, oldValue); } } } }
@Override protected Collection<Cache> loadCaches() { Assert.notNull(this.cacheManager, "A backing EhCache CacheManager is required"); Status status = this.cacheManager.getStatus(); Assert.isTrue(Status.STATUS_ALIVE.equals(status), "An 'alive' EhCache CacheManager is required - current cache is " + status.toString()); String[] names = this.cacheManager.getCacheNames(); Collection<Cache> caches = Sets.newLinkedHashSetWithExpectedSize(names.length); for (String name : names) { if (enableMetrics) { caches.add(new InstrumentedEhCacheCache(this.cacheManager.getEhcache(name))); } else { caches.add(new EhCacheCache(this.cacheManager.getEhcache(name))); } } return caches; }
private IndexMaintainer(RowKeySchema dataRowKeySchema, boolean isDataTableSalted, byte[] indexTableName, int nIndexColumns, int nIndexPKColumns, Integer nIndexSaltBuckets, List<PColumnFamily> cfs, boolean indexWALDisabled) { this(dataRowKeySchema, isDataTableSalted); int nDataPKColumns = dataRowKeySchema.getFieldCount() - (isDataTableSalted ? 1 : 0); this.indexTableName = indexTableName; this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexPKColumns-nDataPKColumns); this.indexedColumnTypes = Lists.<PDataType>newArrayListWithExpectedSize(nIndexPKColumns-nDataPKColumns); this.indexedColumnByteSizes = Lists.<Integer>newArrayListWithExpectedSize(nIndexPKColumns-nDataPKColumns); this.coveredColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexColumns-nIndexPKColumns); this.allColumns = Sets.newLinkedHashSetWithExpectedSize(nDataPKColumns + nIndexColumns); this.allColumns.addAll(indexedColumns); this.allColumns.addAll(coveredColumns); this.rowKeyMetaData = newRowKeyMetaData(nIndexPKColumns); this.nIndexSaltBuckets = nIndexSaltBuckets == null ? 0 : nIndexSaltBuckets; this.dataEmptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(cfs); this.nDataCFs = cfs.size(); this.indexWALDisabled = indexWALDisabled; }
nIndexSaltBuckets = WritableUtils.readVInt(input); int nIndexedColumns = WritableUtils.readVInt(input); indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexedColumns); for (int i = 0; i < nIndexedColumns; i++) { byte[] cf = Bytes.readByteArray(input); coveredColumns = Sets.newLinkedHashSetWithExpectedSize(nCoveredColumns); for (int i = 0; i < nCoveredColumns; i++) { byte[] cf = Bytes.readByteArray(input);
this.allColumns = Sets.newLinkedHashSetWithExpectedSize(indexedColumns.size() + coveredColumns.size()); allColumns.addAll(indexedColumns); allColumns.addAll(coveredColumns);
byte[] emptyKvQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(encodingScheme).getFirst(); dataEmptyKeyValueRef = new ColumnReference(emptyKeyValueCFPtr.copyBytesIfNecessary(), emptyKvQualifier); this.allColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size() + coveredColumnsMap.size()); this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size()); for (Expression expression : indexedExpressions) { KeyValueExpressionVisitor visitor = new KeyValueExpressionVisitor() {
indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexedColumns); for (int i = 0; i < nIndexedColumns; i++) { byte[] cf = Bytes.readByteArray(input);
@Override public void readFields(DataInput input) throws IOException { super.readFields(input); containsNull = input.readBoolean(); fixedWidth = WritableUtils.readVInt(input); byte[] valuesBytes = Bytes.readByteArray(input); valuesByteLength = valuesBytes.length; int len = fixedWidth == -1 ? WritableUtils.readVInt(input) : valuesByteLength / fixedWidth; values = Sets.newLinkedHashSetWithExpectedSize(len); int offset = 0; int i = 0; if (i < len) { offset = readValue(input, valuesBytes, offset, minValue = new ImmutableBytesPtr()); while (++i < len-1) { offset = readValue(input, valuesBytes, offset, new ImmutableBytesPtr()); } if (i < len) { offset = readValue(input, valuesBytes, offset, maxValue = new ImmutableBytesPtr()); } else { maxValue = minValue; } } else { minValue = maxValue = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY); } }
@Override public void readFields(DataInput input) throws IOException { super.readFields(input); input.readBoolean(); // Unused, but left for b/w compat. TODO: remove in next major release fixedWidth = WritableUtils.readVInt(input); byte[] valuesBytes = Bytes.readByteArray(input); valuesByteLength = valuesBytes.length; int len = fixedWidth == -1 ? WritableUtils.readVInt(input) : valuesByteLength / fixedWidth; // TODO: consider using a regular HashSet as we never serialize from the server-side values = Sets.newLinkedHashSetWithExpectedSize(len); int offset = 0; int i = 0; if (i < len) { offset = readValue(input, valuesBytes, offset, minValue = new ImmutableBytesPtr()); while (++i < len-1) { offset = readValue(input, valuesBytes, offset, new ImmutableBytesPtr()); } if (i < len) { offset = readValue(input, valuesBytes, offset, maxValue = new ImmutableBytesPtr()); } else { maxValue = minValue; } } else { minValue = maxValue = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY); } }
new TrackOrderPreservingExpressionCompiler(context, groupBy, orderByNodes.size(), Ordering.ORDERED); LinkedHashSet<OrderByExpression> orderByExpressions = Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size()); for (OrderByNode node : orderByNodes) { boolean isAscending = node.isAscending();
} else { columns = newArrayListWithExpectedSize(colDefs.size()); pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size() + 1); // in case salted
tupleProjector, whereExpression); LinkedHashSet<OrderByExpression> orderByExpressions = Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size()); for (OrderByNode node : orderByNodes) { ParseNode parseNode = node.getNode();
addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumnsToBe.size()); for (PColumn column : allColumnsToBe) { if (column.getViewConstant() != null) { int nColumns = onDupKeyPairs.size(); List<Expression> updateExpressions = Lists.newArrayListWithExpectedSize(nColumns); LinkedHashSet<PColumn> updateColumns = Sets.newLinkedHashSetWithExpectedSize(nColumns + 1); updateColumns.add(new PColumnImpl(
Set<RowKeyColumnExpression> unusedPkColumns; if (dataTable.getBucketNum() != null) { // Ignore SALT column unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size()-1); posOffset++; } else { unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size());