@Override public void seek(long pos) throws IOException { checkIfClosed(); Preconditions.checkArgument(pos >= 0, PreconditionMessage.ERR_SEEK_NEGATIVE.toString(), pos); Preconditions .checkArgument(pos <= mLength, PreconditionMessage.ERR_SEEK_PAST_END_OF_REGION.toString(), mId); if (pos == mPos) { return; } if (pos < mPos) { mEOF = false; } closeDataReader(); mPos = pos; }
/** * Validates Zookeeper-related configuration and prints warnings for possible sources of error. * * @throws IllegalStateException if invalid Zookeeper configuration is encountered */ private void checkZkConfiguration() { Preconditions.checkState( isSet(PropertyKey.ZOOKEEPER_ADDRESS) == getBoolean(PropertyKey.ZOOKEEPER_ENABLED), PreconditionMessage.INCONSISTENT_ZK_CONFIGURATION.toString(), PropertyKey.Name.ZOOKEEPER_ADDRESS, PropertyKey.Name.ZOOKEEPER_ENABLED); }
/** * Validates the user file buffer size is a non-negative number. * * @throws IllegalStateException if invalid user file buffer size configuration is encountered */ private void checkUserFileBufferBytes() { if (!isSet(PropertyKey.USER_FILE_BUFFER_BYTES)) { // load from hadoop conf return; } long usrFileBufferBytes = getBytes(PropertyKey.USER_FILE_BUFFER_BYTES); Preconditions.checkState((usrFileBufferBytes & Integer.MAX_VALUE) == usrFileBufferBytes, PreconditionMessage.INVALID_USER_FILE_BUFFER_BYTES.toString(), PropertyKey.Name.USER_FILE_BUFFER_BYTES, usrFileBufferBytes); }
@Override public int read(byte[] b, int off, int len) throws IOException { checkIfClosed(); Preconditions.checkArgument(b != null, PreconditionMessage.ERR_READ_BUFFER_NULL); Preconditions.checkArgument(off >= 0 && len >= 0 && len + off <= b.length, PreconditionMessage.ERR_BUFFER_STATE.toString(), b.length, off, len); if (len == 0) { return 0; } readChunk(); if (mCurrentChunk == null) { mEOF = true; } if (mEOF) { closeDataReader(); Preconditions .checkState(mPos >= mLength, PreconditionMessage.BLOCK_LENGTH_INCONSISTENT.toString(), mId, mLength, mPos); return -1; } int toRead = Math.min(len, mCurrentChunk.readableBytes()); mCurrentChunk.readBytes(b, off, toRead); mPos += toRead; return toRead; }
@Override public void seek(long pos) throws IOException { if (mPosition == pos) { return; } Preconditions.checkArgument(pos >= 0, PreconditionMessage.ERR_SEEK_NEGATIVE.toString(), pos); Preconditions.checkArgument(pos <= mLength, PreconditionMessage.ERR_SEEK_PAST_END_OF_FILE.toString(), pos); if (mBlockInStream == null) { // no current stream open, advance position mPosition = pos; return; } long delta = pos - mPosition; if (delta <= mBlockInStream.remaining() && delta >= -mBlockInStream.getPos()) { // within block mBlockInStream.seek(mBlockInStream.getPos() + delta); } else { // close the underlying stream as the new position is no longer in bounds closeBlockInStream(mBlockInStream); } mPosition += delta; }
public void initialize(URI uri, org.apache.hadoop.conf.Configuration conf) throws IOException { Preconditions.checkArgument(uri.getScheme().equals(getScheme()), PreconditionMessage.URI_SCHEME_MISMATCH.toString(), uri.getScheme(), getScheme()); Preconditions.checkArgument(mInitialized.compareAndSet(false, true), "Cannot invoke " + "initialize() more than once");
.checkState(bytesRead == bytesToRead, PreconditionMessage.NOT_ENOUGH_BYTES_READ.toString(), bytesRead, bytesToRead, mBlockMeta.getUnderFileSystemPath()); if (mBlockWriter != null && mBlockWriter.getPosition() < mInStreamPos) {
Preconditions.checkArgument(b != null, PreconditionMessage.ERR_READ_BUFFER_NULL); Preconditions.checkArgument(off >= 0 && len >= 0 && len + off <= b.length, PreconditionMessage.ERR_BUFFER_STATE.toString(), b.length, off, len); if (len == 0) { return 0;
/** * Tests that specifying an invalid offset/length for a buffer read throws the right exception. */ @Test public void readBadBuffer() throws IOException { try { mTestStream.read(new byte[10], 5, 6); fail("the buffer read of invalid offset/length should fail"); } catch (IllegalArgumentException e) { assertEquals(String.format(PreconditionMessage.ERR_BUFFER_STATE.toString(), 10, 5, 6), e.getMessage()); } }
/** * Tests that seeking to a negative position will throw the right exception. */ @Test public void seekNegative() throws IOException { try { mTestStream.seek(-1); fail("seeking negative position should fail"); } catch (IllegalArgumentException e) { assertEquals(String.format(PreconditionMessage.ERR_SEEK_NEGATIVE.toString(), -1), e.getMessage()); } }
/** * Tests that the correct exception is thrown when a buffer is written with invalid offset/length. */ @Test public void writeBadBufferOffset() throws IOException { try { mTestStream.write(new byte[10], 5, 6); Assert.fail("buffer write with invalid offset/length should fail"); } catch (IllegalArgumentException e) { Assert.assertEquals(String.format(PreconditionMessage.ERR_BUFFER_STATE.toString(), 10, 5, 6), e.getMessage()); } }
private void writeInternal(byte[] b, int off, int len) throws IOException { Preconditions.checkArgument(b != null, PreconditionMessage.ERR_WRITE_BUFFER_NULL); Preconditions.checkArgument(off >= 0 && len >= 0 && len + off <= b.length, PreconditionMessage.ERR_BUFFER_STATE.toString(), b.length, off, len);
/** * Tests that writing a null buffer with offset/length information throws the correct exception. */ @Test public void writeNullBufferOffset() throws IOException { try { mTestStream.write(null, 0, 0); Assert.fail("writing null should fail"); } catch (IllegalArgumentException e) { Assert.assertEquals(PreconditionMessage.ERR_WRITE_BUFFER_NULL.toString(), e.getMessage()); } }
/** * Tests that seeking past the end of the stream will throw the right exception. */ @Test public void seekPastEnd() throws IOException { try { mTestStream.seek(FILE_LENGTH + 1); fail("seeking past the end of the stream should fail"); } catch (IllegalArgumentException e) { assertEquals(String.format(PreconditionMessage.ERR_SEEK_PAST_END_OF_FILE.toString(), FILE_LENGTH + 1), e.getMessage()); } }
/** * Tests that writing a null buffer throws the correct exception. */ @Test public void writeNullBuffer() throws IOException { try { mTestStream.write(null); Assert.fail("writing null should fail"); } catch (IllegalArgumentException e) { Assert.assertEquals(PreconditionMessage.ERR_WRITE_BUFFER_NULL.toString(), e.getMessage()); } }
PreconditionMessage.INVALID_REPLICATION_MAX_SMALLER_THAN_MIN.toString(), replicationMax, replicationMax);
@Test public void getOutStreamMissingLocationPolicy() throws IOException { OutStreamOptions options = OutStreamOptions.defaults(sConf).setBlockSizeBytes(BLOCK_LENGTH) .setWriteType(WriteType.MUST_CACHE).setLocationPolicy(null); mException.expect(NullPointerException.class); mException.expectMessage(PreconditionMessage.FILE_WRITE_LOCATION_POLICY_UNSPECIFIED.toString()); mBlockStore.getOutStream(BLOCK_ID, BLOCK_LENGTH, options); }
/** * Validates Zookeeper-related configuration and prints warnings for possible sources of error. * * @throws IllegalStateException if invalid Zookeeper configuration is encountered */ private void checkZkConfiguration() { Preconditions.checkState( isSet(PropertyKey.ZOOKEEPER_ADDRESS) == getBoolean(PropertyKey.ZOOKEEPER_ENABLED), PreconditionMessage.INCONSISTENT_ZK_CONFIGURATION.toString(), PropertyKey.Name.ZOOKEEPER_ADDRESS, PropertyKey.Name.ZOOKEEPER_ENABLED); }
/** * Validates the user file buffer size is a non-negative number. * * @throws IllegalStateException if invalid user file buffer size configuration is encountered */ private void checkUserFileBufferBytes() { if (!isSet(PropertyKey.USER_FILE_BUFFER_BYTES)) { // load from hadoop conf return; } long usrFileBufferBytes = getBytes(PropertyKey.USER_FILE_BUFFER_BYTES); Preconditions.checkState((usrFileBufferBytes & Integer.MAX_VALUE) == usrFileBufferBytes, PreconditionMessage.INVALID_USER_FILE_BUFFER_BYTES.toString(), PropertyKey.Name.USER_FILE_BUFFER_BYTES, usrFileBufferBytes); }
public void initialize(URI uri, org.apache.hadoop.conf.Configuration conf) throws IOException { Preconditions.checkArgument(uri.getScheme().equals(getScheme()), PreconditionMessage.URI_SCHEME_MISMATCH.toString(), uri.getScheme(), getScheme()); super.initialize(uri, conf); LOG.debug("initialize({}, {}). Connecting to Alluxio", uri, conf);