@Override public long length() { return file.getSize(); }
long fileLength(final String name) { final FileMetadata fileMetadata = fileOps.getFileMetadata(name); if (fileMetadata == null) { return 0L; //as in FSDirectory (RAMDirectory throws an exception instead) } else { return fileMetadata.getSize(); } }
private void resizeFileIfNeeded() { if (file.getSize() < filePosition) { file.setSize(filePosition); } }
long fileLength(final String name) { final FileMetadata fileMetadata = fileOps.getFileMetadata(name); if (fileMetadata == null) { return 0L; //as in FSDirectory (RAMDirectory throws an exception instead) } else { return fileMetadata.getSize(); } }
private void resizeFileIfNeeded() { if (file.getSize() < filePosition) { file.setSize(filePosition); } }
private void resizeFileIfNeeded() { if (file.getSize() < filePosition) { file.setSize(filePosition); } }
public long length() { resizeFileIfNeeded(); return file.getSize(); }
long fileLength(final String name) { final FileMetadata fileMetadata = fileOps.getFileMetadata(name); if (fileMetadata == null) { return 0L; //as in FSDirectory (RAMDirectory throws an exception instead) } else { return fileMetadata.getSize(); } }
public long length() { resizeFileIfNeeded(); return file.getSize(); }
/** * {@inheritDoc} */ @Override public long fileLength(String name) { ensureOpen(); FileMetadata fileMetadata = fileOps.getFileMetadata(name); if (fileMetadata == null) { return 0L;//as in FSDirectory (RAMDirectory throws an exception instead) } else { return fileMetadata.getSize(); } }
protected void storeCurrentBuffer(final boolean isClose) { if (currentChunkNumber == 0 && ! isClose) { //we don't store the first chunk until the close operation: this way //we guarantee each chunk is written only once an minimize locking needs. return; } // size changed, apply change to file header resizeFileIfNeeded(); byte[] bufferToFlush = buffer; boolean writingOnLastChunk = isWritingOnLastChunk(); if (writingOnLastChunk) { int newBufferSize = (int) (file.getSize() % bufferSize); if (newBufferSize != 0) { bufferToFlush = new byte[newBufferSize]; System.arraycopy(buffer, 0, bufferToFlush, 0, newBufferSize); } } // add chunk to cache if ( ! writingOnLastChunk || this.positionInBuffer != 0) { // store the current chunk storeBufferAsChunk(bufferToFlush, currentChunkNumber); } }
public void seek(final long pos) throws IOException { final int requestedChunkNumber = getChunkNumberFromPosition(pos, bufferSize); if (pos > file.getSize()) { resizeFileIfNeeded(); if (pos > file.getSize()) // check again, might be fixed by the resize throw new IOException(fileKey.getFileName() + ": seeking past end of file"); } if (requestedChunkNumber != currentChunkNumber) { storeCurrentBuffer(false); if (requestedChunkNumber != 0) { buffer = getChunkById(fileKey, requestedChunkNumber, bufferSize); } else { buffer = firstChunkBuffer; } currentChunkNumber = requestedChunkNumber; } positionInBuffer = getPositionInBuffer(pos, bufferSize); filePosition = pos; }
public void seek(final long pos) throws IOException { final int requestedChunkNumber = getChunkNumberFromPosition(pos, bufferSize); if (pos > file.getSize()) { resizeFileIfNeeded(); if (pos > file.getSize()) // check again, might be fixed by the resize throw new IOException(fileKey.getFileName() + ": seeking past end of file"); } if (requestedChunkNumber != currentChunkNumber) { storeCurrentBuffer(false); if (requestedChunkNumber != 0) { buffer = getChunkById(fileKey, requestedChunkNumber, bufferSize); } else { buffer = firstChunkBuffer; } currentChunkNumber = requestedChunkNumber; } positionInBuffer = getPositionInBuffer(pos, bufferSize); filePosition = pos; }
@Override public void seek(final long pos) throws IOException { final int requestedChunkNumber = getChunkNumberFromPosition(pos, bufferSize); if (pos > file.getSize()) { resizeFileIfNeeded(); if (pos > file.getSize()) // check again, might be fixed by the resize throw new IOException(fileKey.getFileName() + ": seeking past end of file"); } if (requestedChunkNumber != currentChunkNumber) { storeCurrentBuffer(false); if (requestedChunkNumber != 0) { buffer = getChunkById(fileKey, requestedChunkNumber, bufferSize); } else { buffer = firstChunkBuffer; } currentChunkNumber = requestedChunkNumber; } positionInBuffer = getPositionInBuffer(pos, bufferSize); filePosition = pos; }
IndexInputContext openInput(final String name) throws IOException { final FileCacheKey fileKey = new FileCacheKey(indexName, name); final FileMetadata fileMetadata = metadataCache.get(fileKey); if (fileMetadata == null) { throw new FileNotFoundException("Error loading metadata for index file: " + fileKey); } else if (fileMetadata.getSize() <= fileMetadata.getBufferSize()) { //files smaller than chunkSize don't need a readLock return new IndexInputContext(chunksCache, fileKey, fileMetadata, null); } else { boolean locked = readLocks.acquireReadLock(name); if (!locked) { // safest reaction is to tell this file doesn't exist anymore. throw new FileNotFoundException("Error loading metadata for index file: " + fileKey); } return new IndexInputContext(chunksCache, fileKey, fileMetadata, readLocks); } }
public InfinispanIndexInput(final IndexInputContext ctx) { super(ctx.fileKey.getFileName()); this.chunksCache = ctx.chunksCache; this.fileKey = ctx.fileKey; this.chunkSize = ctx.fileMetadata.getBufferSize(); this.fileLength = ctx.fileMetadata.getSize(); this.readLocks = ctx.readLocks; this.affinitySegmentId = ctx.affinitySegmentId; this.filename = fileKey.getFileName(); if (trace) { log.tracef("Opened new IndexInput for file:%s in index: %s", filename, fileKey.getIndexName()); } }
public InfinispanIndexInput(final IndexInputContext ctx) { super(ctx.fileKey.getFileName()); this.chunksCache = ctx.chunksCache; this.fileKey = ctx.fileKey; this.chunkSize = ctx.fileMetadata.getBufferSize(); this.fileLength = ctx.fileMetadata.getSize(); this.readLocks = ctx.readLocks; this.filename = fileKey.getFileName(); if (trace) { log.tracef("Opened new IndexInput for file:%s in index: %s", filename, fileKey.getIndexName()); } }
public InfinispanIndexInput(final IndexInputContext ctx) { super(ctx.fileKey.getFileName()); this.chunksCache = ctx.chunksCache; this.fileKey = ctx.fileKey; this.chunkSize = ctx.fileMetadata.getBufferSize(); this.fileLength = ctx.fileMetadata.getSize(); this.readLocks = ctx.readLocks; this.affinitySegmentId = ctx.affinitySegmentId; this.filename = fileKey.getFileName(); if (trace) { log.tracef("Opened new IndexInput for file:%s in index: %s", filename, fileKey.getIndexName()); } }
@Test public void testFileMetaData() { FileMetadata data1 = new FileMetadata(1024); FileMetadata data2 = new FileMetadata(2048); FileMetadata data3 = new FileMetadata(1024); FileMetadata data4 = data1; assert !data1.equals(new FileCacheKey("testIndex", "testFile", -1)); AssertJUnit.assertNotNull(data1); assert data1.equals(data4); assert data1.equals(data3); data3.setSize(2048); assert !data1.equals(data3); assert !data1.equals(data2); AssertJUnit.assertEquals("FileMetadata{ size=" + data1.getSize() + '}', data1.toString()); } }
public void testAutoChunkingOnLargeFiles() throws IOException { Directory mockDirectory = createMockDirectory(); FileCacheKey k = new FileCacheKey(INDEX_NAME, FILE_NAME, segmentId); DirectoryLoaderAdaptor adaptor = new DirectoryLoaderAdaptor(mockDirectory, INDEX_NAME, AUTO_BUFFER, -1); Object loaded = adaptor.load(k); AssertJUnit.assertTrue(loaded instanceof FileMetadata); FileMetadata metadata = (FileMetadata)loaded; AssertJUnit.assertEquals(TEST_SIZE, metadata.getSize()); AssertJUnit.assertEquals(AUTO_BUFFER, metadata.getBufferSize()); }