final BufferMode bm = bufferStrategy.getBufferMode(); final long userExtent = bufferStrategy.getUserExtent(); final long extent = bufferStrategy.getExtent(); final long initialExtent = bufferStrategy.getInitialExtent(); final long nextOffset = bufferStrategy.getNextOffset(); final long addr = writeRandomData(store, remaining, bufferStrategy.useChecksums()); assertEquals("extent", extent, bufferStrategy.getExtent()); .getUserExtent()); final ByteBuffer b = bufferStrategy.read(addr); bufferStrategy.force(false); .getExtent()); .getUserExtent()); assertEquals(b, bufferStrategy.read(addr)); assertEquals(b2, bufferStrategy.read(addr2));
/** * Return both root blocks (atomically - used by HA). * <p> * Note: This takes a lock to ensure that the root blocks are consistent * with a commit point on the backing store. */ protected IRootBlockView[] getRootBlocks() { final Lock lock = _fieldReadWriteLock.readLock(); lock.lock(); try { final ChecksumUtility checker = ChecksumUtility.getCHK(); final IRootBlockView rb0 = new RootBlockView( true/* rootBlock0 */, getBufferStrategy() .readRootBlock(true/* rootBlock0 */), checker); final IRootBlockView rb1 = new RootBlockView( false/* rootBlock0 */, getBufferStrategy() .readRootBlock(false/* rootBlock0 */), checker); return new IRootBlockView[] { rb0, rb1 }; } finally { lock.unlock(); } }
/** * Make sure that the journal has at least the specified number of bytes of * unused capacity remaining in the user extent. * <p> * Note: You need an exclusive write lock on the journal to extend it. * * @param minFree * The minimum #of bytes free for the user extent. * * @return The #of bytes of free space remaining in the user extent. */ public long ensureMinFree(final long minFree) { assertOpen(); if (minFree < 0L) throw new IllegalArgumentException(); final IBufferStrategy buf = _bufferStrategy; final long remaining = buf.getUserExtent() - buf.getNextOffset(); if (remaining < minFree) { buf.truncate(buf.getExtent() + minFree); } return buf.getUserExtent() - buf.getNextOffset(); }
switch (backingBuffer.getBufferMode()) { case DiskRW: final long oldExtent = backingBuffer.getExtent(); final long newExtent = backingBuffer.getHeaderSize() + backingBuffer.getNextOffset(); backingBuffer.truncate(newExtent);
/** * Test verifies that a write up to the remaining extent does not trigger * an overflow. */ public void test_writeNoExtend() { final Journal store = (Journal) getStore(); try { final IBufferStrategy bufferStrategy = store.getBufferStrategy(); if (bufferStrategy.getBufferMode() == BufferMode.DiskRW) { return; } final long userExtent = bufferStrategy.getUserExtent(); final long extent = bufferStrategy.getExtent(); final long initialExtent = bufferStrategy.getInitialExtent(); final long nextOffset = bufferStrategy.getNextOffset(); assertEquals("extent", initialExtent, extent); final long remaining = userExtent - nextOffset; writeRandomData(store, remaining, bufferStrategy.useChecksums()); // no change in extent. assertEquals("extent", extent, bufferStrategy.getExtent()); // no change in user extent. assertEquals("userExtent", userExtent, bufferStrategy .getUserExtent()); } finally { store.destroy(); } }
static ProxyTestSuite createProxyTestSuite(final IIndexManager indexManager, final TestMode testMode) { final TestNanoSparqlServerWithProxyIndexManager<?> delegate = new TestNanoSparqlServerWithProxyIndexManager( null/* name */, indexManager, testMode); // !!!! THIS CLASS !!!! /* * Use a proxy test suite and specify the delegate. */ final ProxyTestSuite suite = new ProxyTestSuite(delegate, "NanoSparqlServer Proxied Test Suite: indexManager=" + indexManager.getClass().getSimpleName() + ", testMode=" + testMode + ", bufferMode=" + (indexManager instanceof Journal ? ((Journal) indexManager) .getBufferStrategy().getBufferMode() : "")); return suite; }
System.err.println("Begin: bufferMode="+journal.getBufferStrategy().getBufferMode()); final int nwrites = (int) journal.getBufferStrategy().getUserExtent() / writeSize; final long bytesWritten = journal.getBufferStrategy().getNextOffset(); + journal.getBufferStrategy().getBufferMode() + ", recordSize=" + cf.format(writeSize) + ", nwrites=" + cf.format(nwrites) + ", writeRate=" + fpf.format(writeRate) + "MB/sec");
+ (tx == 0 ? "no" : "yes") + ", isolatable=" + ndx.getIndexMetadata().isIsolatable() + ", bufferMode=" + journal.getBufferStrategy().getBufferMode()); final int nwrites = (int) (journal.getBufferStrategy().getExtent() final long bytesWritten = journal.getBufferStrategy().getNextOffset(); + journal.getBufferStrategy().getBufferMode() + ", valueSize=" + cf.format(valueSize) + ", ninserts=" + cf.format(nwrites) + ", nrecordsWritten=" + recordsWritten
checker); _bufferStrategy.writeRootBlock(rootBlock0, ForceEnum.No); _bufferStrategy.writeRootBlock(rootBlock1, ForceEnum.No); .getBufferMode()); + getBufferStrategy().getBufferMode());
_bufferStrategy.force(false/* metadata */); _bufferStrategy.writeRootBlock(newRootBlock, store.forceOnCommit);
/** * The #of bytes currently under management, including those written on the * live journal. * * @throws IllegalStateException * during startup or if the {@link StoreManager} is closed. */ public long getBytesUnderManagement() { assertRunning(); return bytesUnderManagement.get() + getLiveJournal().getBufferStrategy().getExtent(); }
final long nextOffset = _bufferStrategy.getNextOffset(); final long metaStartAddr = _bufferStrategy.getMetaStartAddr(); final long metaBitsAddr = _bufferStrategy.getMetaBitsAddr();
final ByteBuffer buf = _bufferStrategy.readRootBlock(!_rootBlock.isRootBlock0()); _bufferStrategy.writeRootBlock(newRootBlock, forceOnCommit);
@Override public void force(final boolean metadata) { assertOpen(); _bufferStrategy.force(metadata); }
@Override public ByteBuffer read(final long addr) { assertOpen(); assertCanRead(); return _bufferStrategy.read(addr); }
/** * Return <code>true</code> if the persistence store uses record level * checksums. When <code>true</code>, the store will detect bad reads by * comparing the record as read from the disk against the checksum for that * record. */ public boolean isChecked() { return _bufferStrategy.useChecksums(); }
/** * Test verifies that a write up to the remaining extent does not trigger * an overflow. */ public void test_writeNoExtend() { final Journal store = (Journal) getStore(); try { final IBufferStrategy bufferStrategy = store.getBufferStrategy(); if (bufferStrategy.getBufferMode() == BufferMode.DiskRW) { return; } final long userExtent = bufferStrategy.getUserExtent(); final long extent = bufferStrategy.getExtent(); final long initialExtent = bufferStrategy.getInitialExtent(); final long nextOffset = bufferStrategy.getNextOffset(); assertEquals("extent", initialExtent, extent); final long remaining = userExtent - nextOffset; writeRandomData(store, remaining, bufferStrategy.useChecksums()); // no change in extent. assertEquals("extent", extent, bufferStrategy.getExtent()); // no change in user extent. assertEquals("userExtent", userExtent, bufferStrategy .getUserExtent()); } finally { store.destroy(); } }
switch (backingBuffer.getBufferMode()) { case DiskRW: final long oldExtent = backingBuffer.getExtent(); final long newExtent = backingBuffer.getHeaderSize() + backingBuffer.getNextOffset(); backingBuffer.truncate(newExtent);
@Override public void sample() { final AbstractJournal jnl = ref.get(); if (jnl != null) { final IBufferStrategy bufferStrategy = jnl.getBufferStrategy(); if (bufferStrategy != null) { final BufferMode bufferMode = bufferStrategy.getBufferMode(); if (bufferMode != null) { setValue(bufferMode.toString()); } } } } });
System.err.println("Begin: bufferMode="+journal.getBufferStrategy().getBufferMode()); final int nwrites = (int) journal.getBufferStrategy().getUserExtent() / writeSize; final long bytesWritten = journal.getBufferStrategy().getNextOffset(); + journal.getBufferStrategy().getBufferMode() + ", recordSize=" + cf.format(writeSize) + ", nwrites=" + cf.format(nwrites) + ", writeRate=" + fpf.format(writeRate) + "MB/sec");