protected FileSystemCachedEntity fromFileStatus(FileStatus status){ return new FileSystemCachedEntity() .setPath(status.getPath().toString()) .setLastModificationTime(status.getModificationTime()); }
for (int i = 0; i < cachedEntities.size(); ++i) { final FileSystemCachedEntity cachedEntity = cachedEntities.get(i); final Path cachedEntityPath = new Path(cachedEntity.getPath()); try { if(cachedEntity.getLastModificationTime() == 0) { final long updatedModificationTime = updatedFileStatus.getModificationTime(); Preconditions.checkArgument(updatedFileStatus.isDirectory(), "fs based dataset update key must be composed of directories"); if (cachedEntity.getLastModificationTime() < updatedModificationTime) {
public void writeTo(Output output, FileUpdateKey message) throws IOException { if(message.cachedEntities != null) { for(FileSystemCachedEntity cachedEntities : message.cachedEntities) { if(cachedEntities != null) output.writeObject(1, cachedEntities, FileSystemCachedEntity.getSchema(), true); } } }
.setStart(rowGroupInfo.getStart()) .setRowGroupIndex(rowGroupInfo.getRowGroupIndex()) .setUpdateKey(new FileSystemCachedEntity() .setPath(pathString) .setLastModificationTime(rowGroupInfo.getStatus().getModificationTime()) .setLength(length)) .setColumnValueCountsList(columnValueCounts) .setLength(rowGroupInfo.getLength()))));
public FileSystemCachedEntity newMessage() { return new FileSystemCachedEntity(); }
private Collection<FsPermissionTask> getUpdateKeyPermissionTasks(DatasetConfig datasetConfig, FileSystemWrapper userFs) { final FileUpdateKey fileUpdateKey = new FileUpdateKey(); ProtostuffIOUtil.mergeFrom(datasetConfig.getReadDefinition().getReadSignature().toByteArray(), fileUpdateKey, FileUpdateKey.getSchema()); if (fileUpdateKey.getCachedEntitiesList() == null || fileUpdateKey.getCachedEntitiesList().isEmpty()) { return Collections.emptyList(); } final List<FsPermissionTask> fsPermissionTasks = Lists.newArrayList(); final FsAction action; final List<Path> batch = Lists.newArrayList(); //DX-7850 : remove once solution for maprfs is found if (userFs.isMapRfs()) { action = FsAction.READ; } else { action = FsAction.READ_EXECUTE; } for (FileSystemCachedEntity cachedEntity : fileUpdateKey.getCachedEntitiesList()) { batch.add(new Path(cachedEntity.getPath())); if (batch.size() == PERMISSION_CHECK_TASK_BATCH_SIZE) { // make a copy of batch fsPermissionTasks.add(new FsPermissionTask(userFs, Lists.newArrayList(batch), action)); batch.clear(); } } if (!batch.isEmpty()) { fsPermissionTasks.add(new FsPermissionTask(userFs, batch, action)); } return fsPermissionTasks; }
private ByteString convertToScanXAttr(ByteString xattrFullSerialized) { ParquetDatasetSplitXAttr fullXAttr = ParquetDatasetXAttrSerDe.PARQUET_DATASET_SPLIT_XATTR_SERIALIZER.revert(xattrFullSerialized.toByteArray());; ParquetDatasetSplitScanXAttr scanXAttr = new ParquetDatasetSplitScanXAttr(); scanXAttr.setPath(fullXAttr.getPath()); scanXAttr.setFileLength(fullXAttr.getUpdateKey().getLength()); scanXAttr.setStart(fullXAttr.getStart()); scanXAttr.setLength(fullXAttr.getLength()); scanXAttr.setRowGroupIndex(fullXAttr.getRowGroupIndex()); return ByteString.copyFrom(ParquetDatasetXAttrSerDe.PARQUET_DATASET_SPLIT_SCAN_XATTR_SERIALIZER.serialize(scanXAttr)); }
.setStart(completeFileWork.getStart()) .setLength(completeFileWork.getLength()) .setUpdateKey(new FileSystemCachedEntity() .setPath(pathString) .setLastModificationTime(completeFileWork.getStatus().getModificationTime())) )));
public void mergeFrom(Input input, FileUpdateKey message) throws IOException { for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this)) { switch(number) { case 0: return; case 1: if(message.cachedEntities == null) message.cachedEntities = new ArrayList<FileSystemCachedEntity>(); message.cachedEntities.add(input.mergeObject(null, FileSystemCachedEntity.getSchema())); break; default: input.handleUnknownField(number, this); } } }
public void writeTo(Output output, EasyDatasetSplitXAttr message) throws IOException { if(message.path != null) output.writeString(1, message.path, false); if(message.start != null) output.writeInt64(2, message.start, false); if(message.length != null) output.writeInt64(3, message.length, false); if(message.updateKey != null) output.writeObject(4, message.updateKey, FileSystemCachedEntity.getSchema(), false); }
break; case 5: message.updateKey = input.mergeObject(message.updateKey, FileSystemCachedEntity.getSchema())
public void writeTo(Output output, ParquetDatasetSplitXAttr message) throws IOException { if(message.path != null) output.writeString(1, message.path, false); if(message.start != null) output.writeInt64(2, message.start, false); if(message.length != null) output.writeInt64(3, message.length, false); if(message.rowGroupIndex != null) output.writeInt32(4, message.rowGroupIndex, false); if(message.updateKey != null) output.writeObject(5, message.updateKey, FileSystemCachedEntity.getSchema(), false); if(message.columnValueCounts != null) { for(ColumnValueCount columnValueCounts : message.columnValueCounts) { if(columnValueCounts != null) output.writeObject(6, columnValueCounts, ColumnValueCount.getSchema(), true); } } }
public void mergeFrom(Input input, EasyDatasetSplitXAttr message) throws IOException { for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this)) { switch(number) { case 0: return; case 1: message.path = input.readString(); break; case 2: message.start = input.readInt64(); break; case 3: message.length = input.readInt64(); break; case 4: message.updateKey = input.mergeObject(message.updateKey, FileSystemCachedEntity.getSchema()) ; break; default: input.handleUnknownField(number, this); } } }