@Override public InputStream open(String path, OpenOptions options) throws IOException { path = stripPath(path); FileInputStream inputStream = new FileInputStream(path); try { ByteStreams.skipFully(inputStream, options.getOffset()); } catch (IOException e) { inputStream.close(); throw e; } return inputStream; }
/** * @return the default {@link OpenOptions} */ public static OpenOptions defaults() { return new OpenOptions(); }
JournalInputStream(UfsJournalFile file) throws IOException { mFile = file; LOG.info("Reading journal file {}.", file.getLocation()); mReader = new JournalEntryStreamReader(mUfs.open(file.getLocation().toString(), OpenOptions.defaults().setRecoverFailedOpen(true))); }
FSDataInputStream inputStream = hdfs.open(new Path(path)); try { inputStream.seek(options.getOffset()); } catch (IOException e) { inputStream.close(); LOG.warn("{} try to open {} : {}", retryPolicy.getAttemptCount(), path, e.getMessage()); te = e; if (options.getRecoverFailedOpen() && dfs != null && e.getMessage().toLowerCase() .startsWith("cannot obtain block length for")) {
@Test public void createOpenAtPosition() throws IOException { String testFile = PathUtils.concatPath(mUnderfsAddress, "createOpenAtPosition"); prepareMultiBlockFile(testFile); int[] offsets = {0, 256, 511, 512, 513, 768, 1024, 1025}; for (int offset : offsets) { InputStream inputStream = mUfs.open(testFile, OpenOptions.defaults().setOffset(offset)); assertEquals(TEST_BYTES[offset % TEST_BYTES.length], inputStream.read()); inputStream.close(); } }
@Override public InputStream open(String path) throws IOException { return open(path, OpenOptions.defaults()); }
/** * Updates the UFS input stream given an offset to read. * * @param offset the read offset within the block */ private void updateUnderFileSystemInputStream(long offset) throws IOException { if ((mUnderFileSystemInputStream != null) && offset != mInStreamPos) { mUfsInstreamManager.release(mUnderFileSystemInputStream); mUnderFileSystemInputStream = null; mInStreamPos = -1; } if (mUnderFileSystemInputStream == null && offset < mBlockMeta.getBlockSize()) { UnderFileSystem ufs = mUfsResource.get(); mUnderFileSystemInputStream = mUfsInstreamManager.acquire(ufs, mBlockMeta.getUnderFileSystemPath(), IdUtils.fileIdFromBlockId(mBlockMeta.getBlockId()), OpenOptions.defaults().setOffset(mBlockMeta.getOffset() + offset)); mInStreamPos = offset; } }
JournalInputStream(UfsJournalFile file) throws IOException { mFile = file; LOG.info("Reading journal file {}.", file.getLocation()); mReader = new JournalEntryStreamReader(mUfs.open(file.getLocation().toString(), OpenOptions.defaults().setRecoverFailedOpen(true))); }
@Override public InputStream open(String path) throws IOException { return open(path, OpenOptions.defaults()); }
@Override protected InputStream openObject(String key, OpenOptions options) throws IOException { try { return new GCSInputStream(mBucketName, key, mClient, options.getOffset()); } catch (ServiceException e) { throw new IOException(e.getMessage()); } } }
LOG.debug("Reused the under file input stream resource of {}", nextId); inputStream.seek(openOptions.getOffset()); break; inputStream = mUnderFileInputStreamCache.get(nextId, () -> { SeekableUnderFileInputStream ufsStream = (SeekableUnderFileInputStream) ufs.open(path, OpenOptions.defaults().setOffset(openOptions.getOffset())); LOG.debug("Created the under file input stream resource of {}", newId); return new CachedSeekableInputStream(ufsStream, newId, fileId, path); path); return ufs.open(path, OpenOptions.defaults().setOffset(openOptions.getOffset()));
/** * @return the default {@link OpenOptions} */ public static OpenOptions defaults() { return new OpenOptions(); }
@Override protected InputStream openObject(String key, OpenOptions options) { try { return new KodoInputStream(key, mKodoClinet, options.getOffset(), mAlluxioConf.getBytes(PropertyKey.UNDERFS_OBJECT_STORE_MULTI_RANGE_CHUNK_SIZE)); } catch (QiniuException e) { LOG.error("Failed to open Object {}, Msg: {}", key, e); } return null; }
@Override protected InputStream openObject(String key, OpenOptions options) throws IOException { return new SwiftInputStream(mAccount, mContainerName, key, options.getOffset(), mAlluxioConf.getBytes(PropertyKey.UNDERFS_OBJECT_STORE_MULTI_RANGE_CHUNK_SIZE)); } }
@Override protected InputStream openObject(String key, OpenOptions options) throws IOException { try { return new COSInputStream(mBucketNameInternal, key, mClient, options.getOffset(), mAlluxioConf.getBytes(PropertyKey.UNDERFS_OBJECT_STORE_MULTI_RANGE_CHUNK_SIZE)); } catch (CosClientException e) { throw new IOException(e.getMessage()); } } }
@Override protected InputStream openObject(String key, OpenOptions options) throws IOException { try { return new OSSInputStream(mBucketName, key, mClient, options.getOffset(), mAlluxioConf.getBytes(PropertyKey.UNDERFS_OBJECT_STORE_MULTI_RANGE_CHUNK_SIZE)); } catch (ServiceException e) { throw new IOException(e.getMessage()); } } }
@Override protected InputStream openObject(String key, OpenOptions options) throws IOException { try { RetryPolicy retryPolicy = new ExponentialBackoffRetry( (int) mUfsConf.getMs(PropertyKey.UNDERFS_OBJECT_STORE_READ_RETRY_BASE_SLEEP_MS), (int) mUfsConf.getMs(PropertyKey.UNDERFS_OBJECT_STORE_READ_RETRY_MAX_SLEEP_MS), mUfsConf.getInt(PropertyKey.UNDERFS_OBJECT_STORE_READ_RETRY_MAX_NUM)); return new S3AInputStream(mBucketName, key, mClient, retryPolicy, options.getOffset()); } catch (AmazonClientException e) { throw new IOException(e); } } }
@Override protected InputStream openObject(String key, OpenOptions options) throws IOException { return new SwiftInputStream(mAccount, mContainerName, key, options.getOffset()); } }