@Override public Int2ObjectMap<Aggregator[]> makeDimExtractionAggregateStore() { return new Int2ObjectOpenHashMap<>(); }
/** * Construct a matrix with specified number of rows and columns. */ public SparseMatrix(int rows, int columns) { super(rows, columns); this.rowVectors = new Int2ObjectOpenHashMap<>(); }
public FactoryBuilder(int size, PagesHashStrategy pagesHashStrategy, LongArrayList addresses) { this.size = size; this.comparator = new PositionComparator(pagesHashStrategy, addresses); this.pagesHashStrategy = pagesHashStrategy; this.addresses = addresses; positionLinks = new Int2ObjectOpenHashMap<>(); }
private void putSketchIntoCache(final ByteBuffer buf, final int position, final HllSketch sketch) { final Int2ObjectMap<HllSketch> map = sketchCache.computeIfAbsent(buf, b -> new Int2ObjectOpenHashMap<>()); map.put(position, sketch); }
private void putSketch(final ByteBuffer buffer, final int position, final UpdateDoublesSketch sketch) { Int2ObjectMap<UpdateDoublesSketch> map = sketches.computeIfAbsent(buffer, buf -> new Int2ObjectOpenHashMap<>()); map.put(position, sketch); }
private void putUnion(final ByteBuffer buffer, final int position, final DoublesUnion union) { Int2ObjectMap<DoublesUnion> map = unions.computeIfAbsent(buffer, buf -> new Int2ObjectOpenHashMap<>()); map.put(position, union); }
public FixedLifespanScheduler(BucketNodeMap bucketNodeMap, List<ConnectorPartitionHandle> partitionHandles, OptionalInt concurrentLifespansPerTask) { checkArgument(!partitionHandles.equals(ImmutableList.of(NOT_PARTITIONED))); checkArgument(partitionHandles.size() == bucketNodeMap.getBucketCount()); Map<Node, IntList> nodeToDriverGroupMap = new HashMap<>(); Int2ObjectMap<Node> driverGroupToNodeMap = new Int2ObjectOpenHashMap<>(); for (int bucket = 0; bucket < bucketNodeMap.getBucketCount(); bucket++) { Node node = bucketNodeMap.getAssignedNode(bucket).get(); nodeToDriverGroupMap.computeIfAbsent(node, key -> new IntArrayList()).add(bucket); driverGroupToNodeMap.put(bucket, node); } this.driverGroupToNodeMap = driverGroupToNodeMap; this.nodeToDriverGroupsMap = nodeToDriverGroupMap.entrySet().stream() .collect(toImmutableMap(Map.Entry::getKey, entry -> entry.getValue().iterator())); this.partitionHandles = requireNonNull(partitionHandles, "partitionHandles is null"); if (concurrentLifespansPerTask.isPresent()) { checkArgument(concurrentLifespansPerTask.getAsInt() >= 1, "concurrentLifespansPerTask must be great or equal to 1 if present"); } this.concurrentLifespansPerTask = requireNonNull(concurrentLifespansPerTask, "concurrentLifespansPerTask is null"); }
public SparseMatrix(int rows, int columns, Map<Integer, Vector> rowVectors, boolean shallow) { // Why this is passing in a map? iterating it is pretty inefficient as opposed to simple lists... super(rows, columns); this.rowVectors = new Int2ObjectOpenHashMap<>(); if (shallow) { for (Map.Entry<Integer, Vector> entry : rowVectors.entrySet()) { this.rowVectors.put(entry.getKey().intValue(), entry.getValue()); } } else { for (Map.Entry<Integer, Vector> entry : rowVectors.entrySet()) { this.rowVectors.put(entry.getKey().intValue(), entry.getValue().clone()); } } }
private Int2ObjectOpenHashMap readDataFile(InputStream is) { Int2ObjectOpenHashMap map = new Int2ObjectOpenHashMap(); boolean complete = false; BufferedReader input = null;
private Union createNewUnion(ByteBuffer buf, int position, boolean isWrapped) { WritableMemory mem = getMemory(buf).writableRegion(position, maxIntermediateSize); Union union = isWrapped ? (Union) SetOperation.wrap(mem) : (Union) SetOperation.builder().setNominalEntries(size).build(Family.UNION, mem); Int2ObjectMap<Union> unionMap = unions.get(buf); if (unionMap == null) { unionMap = new Int2ObjectOpenHashMap<>(); unions.put(buf, unionMap); } unionMap.put(position, union); return union; }
Int2ObjectOpenHashMap<Message> messages = new Int2ObjectOpenHashMap<>(); long startSer = theStats.startMsgSerialization(); boolean firstMessage = true;
public PMFModel() { model = new Int2ObjectOpenHashMap<>(); sumOfMeanWeight = new Int2DoubleOpenHashMap(); }
Int2ObjectOpenHashMap<Int2IntOpenHashMap> rowPositionMap = new Int2ObjectOpenHashMap<>(); for (int rowIndex = 0; rowIndex < rowSize(); rowIndex++) { SequentialSparseVector tempRowVector = row(rowIndex); Int2ObjectOpenHashMap<Int2IntOpenHashMap> columnPositionMap = new Int2ObjectOpenHashMap<>(); for (int columnIndex = 0; columnIndex < columnSize(); columnIndex++) { SequentialSparseVector tempColumnVector = column(columnIndex);
/** * The {@link Callable} passed to the Guava cache, because * * Guava cache will use its logging framework to log something, and that logger will end up calling here. * <p> * With the check in the {@link Callable} passed to the Guava cache, we avoid building an extra context. We cannot just use a * map, because it may result in an eternal recurrent call, Guava does a good job at handling that situation. It is just the * logging that Guava tries to do that may disrupt thing when initializing the logging infrastructure. * * @param classLoader * @param key * @return * @throws ExecutionException */ protected LoggerContext doGetLoggerContext(final ClassLoader classLoader, final Integer key) throws ExecutionException { return activeContexts.get(key, k -> { if (builtContexts.containsKey(k.intValue())) { return builtContexts.get(k.intValue()); } else { LoggerContext context = artifactAwareContextSelector.buildContext(classLoader); Int2ObjectMap<LoggerContext> newBuiltContexts = new Int2ObjectOpenHashMap<>(builtContexts); newBuiltContexts.put(k.intValue(), context); builtContexts = newBuiltContexts; return context; } }); }
LoggerContextCache(ArtifactAwareContextSelector artifactAwareContextSelector, ClassLoader reaperContextClassLoader) { acquireContextDisposeDelay(); this.artifactAwareContextSelector = artifactAwareContextSelector; activeContexts = newBuilder().build(); disposedContexts = newBuilder().expireAfterWrite(disposeDelayInMillis, MILLISECONDS) .removalListener((key, value, cause) -> { stop((LoggerContext) value); activeContexts.invalidate(key); Int2ObjectMap<LoggerContext> newBuiltContexts = new Int2ObjectOpenHashMap<>(builtContexts); newBuiltContexts.remove(((Integer) key).intValue()); builtContexts = newBuiltContexts; }).build(); executorService = newScheduledThreadPool(1, new LoggerContextReaperThreadFactory(reaperContextClassLoader)); }
/** * Constructor. */ public MapIntegerDBIDStore() { super(); this.data = new Int2ObjectOpenHashMap<>(); }
/** * Constructor. * * @param size Expected size */ public MapIntegerDBIDStore(int size) { this.data = new Int2ObjectOpenHashMap<>(size); }
/** * Constructor without existing data. * * @param size Expected size * @param rlen Number of columns (record length) */ public MapIntegerDBIDRecordStore(int size, int rlen) { this(rlen, new Int2ObjectOpenHashMap<Object[]>(size)); }
@Override ReusableNodeLongIterator initializeLeftNodeEdgesLongIterator() { return new RightNodeMetadataMultiSegmentIterator( this, new LeftSegmentEdgeAccessor<RightNodeMetadataLeftIndexedBipartiteGraphSegment>( getReaderAccessibleInfo(), new Int2ObjectOpenHashMap<ReusableNodeIntIterator>(getMaxNumSegments()), new Int2ObjectOpenHashMap<ReusableInternalIdToLongIterator>(getMaxNumSegments()) ) ); }
@Override ReusableNodeLongIterator initializeLeftNodeEdgesLongIterator() { return new NodeMetadataMultiSegmentIterator( this, new LeftSegmentEdgeAccessor<NodeMetadataLeftIndexedBipartiteGraphSegment>( getReaderAccessibleInfo(), new Int2ObjectOpenHashMap<ReusableNodeIntIterator>(getMaxNumSegments()), new Int2ObjectOpenHashMap<ReusableInternalIdToLongIterator>(getMaxNumSegments()) ) ); }