public ByteBuffer indexFileFor(int chunk) { return indexFiles.get(chunk).duplicate(); }
public byte[] getBytes(long kthRecord) { int objectSize = schema.getObjectSize(); byte[] bytes = new byte[objectSize]; ByteBuffer newBuffer = buffers.getUnchecked((int) (kthRecord / numRecordsPerFile)).duplicate(); newBuffer.position((int) (kthRecord % numRecordsPerFile) * objectSize); newBuffer.get(bytes, 0, objectSize); return bytes; }
@Override public int getStartOffset(long eventId) { ByteBuffer duplicate = buffer.duplicate(); duplicate.position(META_DATA_SIZE); duplicate = duplicate.slice(); return ByteBufferUtil.binarySearchOffset(duplicate, 0, numRecords, eventId, SIZE_OF_DATA); }
public Block find(long pointer) { final int fileSize = numBlocksPerFile * ( numRecordsPerBlock * UserEventIndex.ID_SIZE + UserEventIndex.Block.MetaData.SIZE); MappedByteBuffer byteBuffer = buffers.getUnchecked((int) (pointer / fileSize)); ByteBuffer metaDataByteBuffer = byteBuffer.duplicate(); metaDataByteBuffer.position((int) (pointer % fileSize)); metaDataByteBuffer = metaDataByteBuffer.slice(); ByteBuffer blockByteBuffer = byteBuffer.duplicate(); blockByteBuffer.position((int) (pointer % fileSize) + Block.MetaData.SIZE); blockByteBuffer = blockByteBuffer.slice(); return new Block(new Block.MetaData(metaDataByteBuffer), blockByteBuffer); }
public T get(long kthRecord) { int objectSize = schema.getObjectSize(); byte[] bytes = new byte[objectSize]; ByteBuffer newBuffer = buffers.getUnchecked((int) (kthRecord / numRecordsPerFile)).duplicate(); newBuffer.position((int) (kthRecord % numRecordsPerFile) * objectSize); newBuffer.get(bytes, 0, objectSize); return schema.fromBytes(bytes); }
public void update(long id, T t) { if (id > maxId) { maxId = id; } int currentBufferIndex = (int) (id / numRecordsPerFile); ByteBuffer duplicate = buffers.getUnchecked(currentBufferIndex).duplicate(); duplicate.position((int) (id % numRecordsPerFile) * schema.getObjectSize()); duplicate.put(schema.toBytes(t)); }
/** * Returns a mapped buffer of the smooshed file with the given name. Buffer's contents from 0 to capacity() are the * whole mapped file contents, limit() is equal to capacity(). */ public ByteBuffer mapFile(String name) throws IOException { final Metadata metadata = internalFiles.get(name); if (metadata == null) { return null; } final int fileNum = metadata.getFileNum(); while (buffersList.size() <= fileNum) { buffersList.add(null); } MappedByteBuffer mappedBuffer = buffersList.get(fileNum); if (mappedBuffer == null) { mappedBuffer = Files.map(outFiles.get(fileNum)); buffersList.set(fileNum, mappedBuffer); } ByteBuffer retVal = mappedBuffer.duplicate(); retVal.position(metadata.getStartOffset()).limit(metadata.getEndOffset()); return retVal.slice(); }
public ByteBuffer[] sliceTerms() { final ByteBuffer[] terms = new ByteBuffer[PARTITION_COUNT]; if (termLength < ONE_GIG) { final MappedByteBuffer buffer = mappedBuffers[0]; for (int i = 0; i < PARTITION_COUNT; i++) { buffer.limit((termLength * i) + termLength).position(termLength * i); terms[i] = buffer.slice(); } } else { for (int i = 0; i < PARTITION_COUNT; i++) { terms[i] = mappedBuffers[i].duplicate(); } } return terms; }
public void add(T t) { int currentBufferIndex = (int) (maxId / numRecordsPerFile); ByteBuffer duplicate = buffers.getUnchecked(currentBufferIndex).duplicate(); duplicate.position((int) (maxId % numRecordsPerFile) * schema.getObjectSize()); duplicate.put(schema.toBytes(t)); metaDataBuffer.putLong(0, ++maxId); }
protected Pair<ByteBuffer, Long> getMappedByteBufferAtTime(long targetTimeInSamples) throws IllegalArgumentException, IOException { assert mappedBB != null; /* Seek for the time index which comes just before the requested time */ IdxField idxFieldBefore = idx.getIdxFieldBefore(targetTimeInSamples); long time = idxFieldBefore.timePtr; int bytePos = (int) (idxFieldBefore.bytePtr - datagramsBytePos); ByteBuffer bb = mappedBB.duplicate(); bb.position(bytePos); time = hopToTime(bb, time, targetTimeInSamples); return new Pair<ByteBuffer, Long>(bb, time); }
protected Pair<ByteBuffer, Long> getMappedByteBufferAtTime(long targetTimeInSamples) throws IllegalArgumentException, IOException { assert mappedBB != null; /* Seek for the time index which comes just before the requested time */ IdxField idxFieldBefore = idx.getIdxFieldBefore(targetTimeInSamples); long time = idxFieldBefore.timePtr; int bytePos = (int) (idxFieldBefore.bytePtr - datagramsBytePos); ByteBuffer bb = mappedBB.duplicate(); bb.position(bytePos); time = hopToTime(bb, time, targetTimeInSamples); return new Pair<ByteBuffer, Long>(bb, time); }
public synchronized Block build(int blockOffset, long id) { final int fileSize = numBlocksPerFile * ( numRecordsPerBlock * UserEventIndex.ID_SIZE + UserEventIndex.Block.MetaData.SIZE); long pointer = currentPointer; MappedByteBuffer byteBuffer = buffers.getUnchecked((int) (pointer / fileSize)); int blockSize = numRecordsPerBlock * ID_SIZE + MetaData.SIZE; currentPointer += blockSize; ByteBuffer metaDataByteBuffer = byteBuffer.duplicate(); metaDataByteBuffer.position((int) (pointer % fileSize)); metaDataByteBuffer = metaDataByteBuffer.slice(); ByteBuffer blockByteBuffer = byteBuffer.duplicate(); blockByteBuffer.position((int) (pointer % fileSize) + Block.MetaData.SIZE); blockByteBuffer = blockByteBuffer.slice(); Block.MetaData metaData = new Block.MetaData(metaDataByteBuffer); metaData.setBlockOffset(blockOffset); metaData.setPointer(pointer); metaData.setMinId(id); Block block = new Block(metaData, blockByteBuffer); block.add(id); return block; }
MappedByteBuffer map(FileChannel wrapped, URL url, MapMode mode, long position, long size) throws IOException { if (mode != MapMode.READ_ONLY) { return wrapped.map(mode, position, size); } File file = URLs.urlToFile(url).getCanonicalFile(); MappingKey mk = new MappingKey(file, position, size); MappedByteBuffer buffer = buffers.get(mk); if (buffer == null) { synchronized (this) { buffer = buffers.get(mk); if (buffer == null) { buffer = wrapped.map(mode, position, size); buffers.put(mk, buffer); if (LOGGER.isLoggable(Level.FINE)) { LOGGER.log(Level.FINE, "Mapping and caching " + file.getAbsolutePath()); } } } } else { if (LOGGER.isLoggable(Level.FINE)) { LOGGER.log(Level.FINE, "Using cached map for " + file.getAbsolutePath()); } } return (MappedByteBuffer) buffer.duplicate(); }
@Override public ByteBuffer read(long offset, int length) { int newPosition = (int) (data.position() + offset); return (ByteBuffer) data.duplicate().order(ByteOrder.LITTLE_ENDIAN).clear().limit(newPosition + length).position(newPosition); }
public ByteBuffer getPageRegion(long position, int length) { if (!isPageAligned(position, length)) throw new IllegalArgumentException(String.format("range: %s-%s wraps more than one page", position, length)); ByteBuffer slice = pages[getPage(position)].duplicate(); int pageOffset = getPageOffset(position); slice.position(pageOffset).limit(pageOffset + length); return slice; }
public ByteBuffer getPageRegion(long position, int length) { if (!isPageAligned(position, length)) throw new IllegalArgumentException(String.format("range: %s-%s wraps more than one page", position, length)); ByteBuffer slice = pages[getPage(position)].duplicate(); int pageOffset = getPageOffset(position); slice.position(pageOffset).limit(pageOffset + length); return slice; }
public ByteBuffer getPageRegion(long position, int length) { if (!isPageAligned(position, length)) throw new IllegalArgumentException(String.format("range: %s-%s wraps more than one page", position, length)); ByteBuffer slice = pages[getPage(position)].duplicate(); int pageOffset = getPageOffset(position); slice.position(pageOffset).limit(pageOffset + length); return slice; }
void read(ByteBuffer bb) throws IOException{ int count = bb.remaining(); bb.put((ByteBuffer) mappedByteBuffer.duplicate().limit(mappedByteBuffer.position() + count)); mappedByteBuffer.position(mappedByteBuffer.position() + count); }
protected Pair<ByteBuffer, Long> getMappedByteBufferAtTime(long targetTimeInSamples) throws IllegalArgumentException, IOException { assert mappedBB != null; /* Seek for the time index which comes just before the requested time */ IdxField idxFieldBefore = idx.getIdxFieldBefore(targetTimeInSamples); long time = idxFieldBefore.timePtr; int bytePos = (int) (idxFieldBefore.bytePtr - datagramsBytePos); ByteBuffer bb = mappedBB.duplicate(); bb.position(bytePos); time = hopToTime(bb, time, targetTimeInSamples); return new Pair<ByteBuffer, Long>(bb, time); }
@Override public Iterator<CycleResultsSegment> iterator() { CycleResultsRLEBufferReadable cycleResultsSegments = new CycleResultsRLEBufferReadable(mbb.duplicate()); if (cycleResultsSegments instanceof CanFilterResultValue) { ((CanFilterResultValue)cycleResultsSegments).setFilter(filter); } return cycleResultsSegments.iterator(); }