void seek(long pos) throws IOException { if (debug) Gpr.debug("seek(" + pos + ")"); fileInputStream.seek(pos); }
private void cinSeek(long virt) throws IOException { if (bgzf) ((BlockCompressedInputStream)cin).seek(virt); else ((SeekableStream)cin).seek(virt); }
private void cinSeek(long virt) throws IOException { if (bgzf) ((BlockCompressedInputStream)cin).seek(virt); else ((SeekableStream)cin).seek(virt); }
private void cinSeek(long virt) throws IOException { if (bgzf) ((BlockCompressedInputStream)cin).seek(virt); else ((SeekableStream)cin).seek(virt); }
@Override protected boolean advanceToNextRecordStart() throws IOException { // Advance to next file block if necessary while (mCompressedInputStream.getFilePointer() >= mFilePointerLimit) { if (mFilePointers == null || mFilePointerIndex >= mFilePointers.length) { return false; } final long startOffset = mFilePointers[mFilePointerIndex++]; final long endOffset = mFilePointers[mFilePointerIndex++]; mCompressedInputStream.seek(startOffset); mFilePointerLimit = endOffset; } return true; } }
@Override protected int readFromPosition(final ByteBuffer buffer, final long position) throws IOException { // old position to get back final long oldPos = stream.getFilePointer(); try { final long virtualOffset = gzindex.getVirtualOffsetForSeek(position); stream.seek(virtualOffset); final byte[] array = new byte[buffer.remaining()]; final int read = stream.read(array); buffer.put(array); return read; } finally { stream.seek(oldPos); } }
@Override protected int readFromPosition(final ByteBuffer buffer, final long position) throws IOException { // old position to get back final long oldPos = stream.getFilePointer(); try { final long virtualOffset = gzindex.getVirtualOffsetForSeek(position); stream.seek(virtualOffset); final byte[] array = new byte[buffer.remaining()]; final int read = stream.read(array); buffer.put(array); return read; } finally { stream.seek(oldPos); } }
/** * Prepare to iterate through the SAMRecords in file order. * Only a single iterator on a BAM file can be extant at a time. If getIterator() or a query method has been called once, * that iterator must be closed before getIterator() can be called again. * A somewhat peculiar aspect of this method is that if the file is not seekable, a second call to * getIterator() begins its iteration where the last one left off. That is the best that can be * done in that situation. */ public CloseableIterator<SAMRecord> getIterator() { if (mStream == null) { throw new IllegalStateException("File reader is closed"); } if (mCurrentIterator != null) { throw new IllegalStateException("Iteration in progress"); } if (mIsSeekable) { try { mCompressedInputStream.seek(mFirstRecordPointer); } catch (final IOException exc) { throw new RuntimeException(exc.getMessage(), exc); } } mCurrentIterator = new BAMFileIterator(); return mCurrentIterator; }
@Override public void seek(final long position) { if (mCompressedStream == null) { throw new SAMException("Null input stream."); } try { mCompressedStream.seek(position); } catch (IOException ioe) { throw(new RuntimeIOException("Seek error in CSI compressed stream: " + ioe)); } }
/** * Prepare to iterate through the SAMRecords in file order. * Only a single iterator on a BAM file can be extant at a time. If getIterator() or a query method has been called once, * that iterator must be closed before getIterator() can be called again. * A somewhat peculiar aspect of this method is that if the file is not seekable, a second call to * getIterator() begins its iteration where the last one left off. That is the best that can be * done in that situation. */ @Override public CloseableIterator<SAMRecord> getIterator() { if (mStream == null) { throw new IllegalStateException("File reader is closed"); } if (mCurrentIterator != null) { throw new IllegalStateException("Iteration in progress"); } if (mIsSeekable) { try { mCompressedInputStream.seek(mFirstRecordPointer); } catch (final IOException exc) { throw new RuntimeIOException(exc.getMessage(), exc); } } mCurrentIterator = new BAMFileIterator(); return mCurrentIterator; }
/** * Prepare to iterate through the SAMRecords in file order. * Only a single iterator on a BAM file can be extant at a time. If getIterator() or a query method has been called once, * that iterator must be closed before getIterator() can be called again. * A somewhat peculiar aspect of this method is that if the file is not seekable, a second call to * getIterator() begins its iteration where the last one left off. That is the best that can be * done in that situation. */ @Override public CloseableIterator<SAMRecord> getIterator() { if (mStream == null) { throw new IllegalStateException("File reader is closed"); } if (mCurrentIterator != null) { throw new IllegalStateException("Iteration in progress"); } if (mIsSeekable) { try { mCompressedInputStream.seek(mFirstRecordPointer); } catch (final IOException exc) { throw new RuntimeIOException(exc.getMessage(), exc); } } mCurrentIterator = new BAMFileIterator(); return mCurrentIterator; }
@Override SAMRecord getNextRecord() throws IOException { // Advance to next file block if necessary while (mCompressedInputStream.getFilePointer() >= mFilePointerLimit) { if (mFilePointers == null || mFilePointerIndex >= mFilePointers.length) { return null; } final long startOffset = mFilePointers[mFilePointerIndex++]; final long endOffset = mFilePointers[mFilePointerIndex++]; mCompressedInputStream.seek(startOffset); mFilePointerLimit = endOffset; } // Pull next record from stream return super.getNextRecord(); } }
SAMRecord getNextRecord() throws IOException { // Advance to next file block if necessary while (mCompressedInputStream.getFilePointer() >= mFilePointerLimit) { if (mFilePointers == null || mFilePointerIndex >= mFilePointers.length) { return null; } final long startOffset = mFilePointers[mFilePointerIndex++]; final long endOffset = mFilePointers[mFilePointerIndex++]; mCompressedInputStream.seek(startOffset); mFilePointerLimit = endOffset; } // Pull next record from stream return super.getNextRecord(); } }
@Override SAMRecord getNextRecord() throws IOException { // Advance to next file block if necessary while (mCompressedInputStream.getFilePointer() >= mFilePointerLimit) { if (mFilePointers == null || mFilePointerIndex >= mFilePointers.length) { return null; } final long startOffset = mFilePointers[mFilePointerIndex++]; final long endOffset = mFilePointers[mFilePointerIndex++]; mCompressedInputStream.seek(startOffset); mFilePointerLimit = endOffset; } // Pull next record from stream return super.getNextRecord(); } }
public long findNextBAMPos(int cp0, int offset) throws IOException { try { long vPos = ((long) cp0 << 16) | offset; int numTries = 65536; boolean firstPass = true; // up: Uncompressed Position, indexes the data inside the BGZF block. for (int i = 0; i < numTries; i++) { if (firstPass) { firstPass = false; bgzf.seek(vPos); } else { bgzf.seek(vPos); // Increment vPos, possibly over a block boundary IOUtils.skipFully(bgzf, 1); vPos = bgzf.getFilePointer(); } if (!posGuesser.checkRecordStart(vPos)) { continue; } if (posGuesser.checkSucceedingRecords(vPos)) return vPos; } } catch (EOFException ignored) {} return -1; }
@Test public void seek_should_read_block() throws Exception { byte[] uncompressed = Files.readAllBytes(BLOCK_UNCOMPRESSED.toPath()); try (SeekableFileStream sfs = new SeekableFileStream(BLOCK_COMPRESSED)) { try (BlockCompressedInputStream stream = new BlockCompressedInputStream(sfs)) { // seek to the start of the first block for (int i = 0; i < BLOCK_COMPRESSED_OFFSETS.length-1; i++) { stream.seek(BLOCK_COMPRESSED_OFFSETS[i] << 16); Assert.assertEquals(sfs.position(), BLOCK_COMPRESSED_OFFSETS[i + 1]); // check byte[] actual = new byte[uncompressed.length]; int len = stream.read(actual); actual = Arrays.copyOf(actual, len); byte[] expected = Arrays.copyOfRange(uncompressed, uncompressed.length - actual.length, uncompressed.length); Assert.assertEquals(actual, expected); } } } } @Test
public int seek(final List<File> files, final TileIndex tileIndex, final int currentTile) { int count = 0; int numClustersInTile = 0; for (final InputStream inputStream : streams) { final TileIndex.TileIndexRecord tileIndexRecord = tileIndex.findTile(currentTile); final BclIndexReader bclIndexReader = new BclIndexReader(files.get(count)); final long virtualFilePointer = bclIndexReader.get(tileIndexRecord.getZeroBasedTileNumber()); if (!(inputStream instanceof BlockCompressedInputStream)) { throw new UnsupportedOperationException("Seeking only allowed on bzgf"); } else { try { if (tileIndex.getNumTiles() != bclIndexReader.getNumTiles()) { throw new PicardException(String.format("%s.getNumTiles(%d) != %s.getNumTiles(%d)", tileIndex.getFile().getAbsolutePath(), tileIndex.getNumTiles(), bclIndexReader.getBciFile().getAbsolutePath(), bclIndexReader.getNumTiles())); } ((BlockCompressedInputStream) inputStream).seek(virtualFilePointer); numClustersInTile = tileIndexRecord.getNumClustersInTile(); } catch (final IOException e) { throw new PicardException("Problem seeking to " + virtualFilePointer, e); } } count++; } return numClustersInTile; } }
private void canReadFromBlockStart(long blockStart) throws IOException { BlockCompressedInputStream blockCompressedInputStream = new BlockCompressedInputStream(file); blockCompressedInputStream.setCheckCrcs(true); blockCompressedInputStream.seek(blockStart << 16); byte[] b = new byte[100]; blockCompressedInputStream.read(b); } }
private List<SAMRecord> getRecordsAtSplits(File bam, SplittingBAMIndex index) throws IOException { List<SAMRecord> records = new ArrayList<>(); BAMRecordCodec codec = new BAMRecordCodec(samFileHeader); BlockCompressedInputStream bci = new BlockCompressedInputStream(bam); codec.setInputStream(bci); for (Long offset : index.getVirtualOffsets()) { bci.seek(offset); SAMRecord record = codec.decode(); if (record != null) { records.add(record); } } return records; }
/** * Submits a new access plan for the given dataset and seeks to the given point. * @param accessPlan The next seek point for BAM data in this reader. */ public void submitAccessPlan(final BAMAccessPlan accessPlan) { //System.out.printf("Thread %s: submitting access plan for block at position: %d%n",Thread.currentThread().getId(),position.getBlockAddress()); this.accessPlan = accessPlan; accessPlan.reset(); clearBuffers(); // Pull the iterator past any oddball chunks at the beginning of the shard (chunkEnd < chunkStart, empty chunks, etc). // TODO: Don't pass these empty chunks in. accessPlan.advancePosition(makeFilePointer(accessPlan.getBlockAddress(),0)); if(accessPlan.getBlockAddress() >= 0) { waitForBufferFill(); } if(validatingInputStream != null) { try { validatingInputStream.seek(makeFilePointer(accessPlan.getBlockAddress(),0)); } catch(IOException ex) { throw new ReviewedGATKException("Unable to validate against Picard input stream",ex); } } }