public static HdfsParquetDataSource buildHdfsParquetDataSource(FSDataInputStream inputStream, Path path, long fileSize, FileFormatDataSourceStats stats) { return new HdfsParquetDataSource(new ParquetDataSourceId(path.toString()), fileSize, inputStream, stats); } }
@Override public final void readFully(long position, byte[] buffer) { readFully(position, buffer, 0, buffer.length); }
@Override public final void readFully(long position, byte[] buffer, int bufferOffset, int bufferLength) { readBytes += bufferLength; long start = System.nanoTime(); readInternal(position, buffer, bufferOffset, bufferLength); long currentReadTimeNanos = System.nanoTime() - start; readTimeNanos += currentReadTimeNanos; stats.readDataBytesPerSecond(bufferLength, currentReadTimeNanos); }
FileMetaData fileMetaData = parquetMetadata.getFileMetaData(); MessageType fileSchema = fileMetaData.getSchema(); dataSource = buildHdfsParquetDataSource(inputStream, path, fileSize, stats);
TupleDomain<HiveColumnHandle> effectivePredicate) ParquetDataSource dataSource = buildHdfsParquetDataSource(path, configuration, start, length); try { ParquetMetadata parquetMetadata = ParquetFileReader.readFooter(configuration, path, NO_FILTER);
public static HdfsParquetDataSource buildHdfsParquetDataSource(FileSystem fileSystem, Path path, long start, long length, long fileSize, FileFormatDataSourceStats stats) { try { FSDataInputStream inputStream = fileSystem.open(path); return new HdfsParquetDataSource(new ParquetDataSourceId(path.toString()), fileSize, inputStream, stats); } catch (Exception e) { if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) { throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e); } throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, format("Error opening Hive split %s (offset=%s, length=%s): %s", path, start, length, e.getMessage()), e); } }
TupleDomain<HiveColumnHandle> effectivePredicate) ParquetDataSource dataSource = buildHdfsParquetDataSource(path, configuration, start, length); try { ParquetMetadata parquetMetadata = ParquetMetadataReader.readFooter(configuration, path);
@Override public final void readFully(long position, byte[] buffer) throws IOException { readFully(position, buffer, 0, buffer.length); }
@Override public final void readFully(long position, byte[] buffer, int bufferOffset, int bufferLength) throws IOException { readInternal(position, buffer, bufferOffset, bufferLength); }
public static HdfsParquetDataSource buildHdfsParquetDataSource(Path path, Configuration configuration, long start, long length) { try { FileSystem fileSystem = path.getFileSystem(configuration); long size = fileSystem.getFileStatus(path).getLen(); FSDataInputStream inputStream = fileSystem.open(path); return new HdfsParquetDataSource(path, size, inputStream); } catch (Exception e) { if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") || e instanceof FileNotFoundException) { throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e); } throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, format("Error opening Hive split %s (offset=%s, length=%s): %s", path, start, length, e.getMessage()), e); } } }
long size = fileSystem.getFileStatus(path).getLen(); FSDataInputStream inputStream = fileSystem.open(path); ParquetDataSource dataSource = new HdfsParquetDataSource(path, size, inputStream);