public JobResultsStore(final FileSystemPlugin plugin, final IndexedStore<JobId, JobResult> store, final BufferAllocator allocator) throws IOException { this.storageName = plugin.getName(); this.dfs = plugin.getFS(ImpersonationUtil.getProcessUserName()); this.jobStoreLocation = plugin.getConfig().getPath(); this.dfs.mkdirs(jobStoreLocation); this.store = store; this.allocator = allocator; this.jobResults = CacheBuilder.newBuilder() .maximumSize(100) .expireAfterAccess(15, TimeUnit.MINUTES) .build( new CacheLoader<JobId, JobData>() { @Override public JobData load(JobId key) throws Exception { final JobDataImpl jobDataImpl = new JobDataImpl(new LateJobLoader(key), key); return newJobDataReference(jobDataImpl); } }); }
@Override public FileSystemPlugin<MapRConf> newPlugin(SabotContext context, String name, Provider<StoragePluginId> pluginIdProvider) { return new FileSystemPlugin<>(this, context, name, null, pluginIdProvider); }
@JsonCreator public FileSystemCreateTableEntry(@JsonProperty("userName") String userName, @JsonProperty("pluginId") StoragePluginId pluginId, @JsonProperty("formatConfig") FormatPluginConfig formatConfig, @JsonProperty("location") String location, @JsonProperty("options") WriterOptions options, @JacksonInject CatalogService catalogService) throws ExecutionSetupException { this.userName = userName; this.plugin = catalogService.getSource(pluginId); this.formatPlugin = plugin.getFormatPlugin(formatConfig); this.location = location; this.options = options; }
protected boolean fileExists(String username, List<String> filePath) throws IOException { return getFS(username).isFile(PathUtils.toFSPath(resolveTableNameToValidPath(filePath))); }
private DatasetDownloadManager downloadManager() { final FileSystemPlugin downloadPlugin = catalogService.getSource(DATASET_DOWNLOAD_STORAGE_PLUGIN); return new DatasetDownloadManager(jobsService, namespaceService, downloadPlugin.getConfig().getPath(), downloadPlugin.getFs()); } private void validate(DatasetPath path, VirtualDatasetUI ds) {
@Override public HomeFileConf getConfig() { return super.getConfig(); }
if(!getMutability().hasMutationCapability(MutationType.TABLE, schemaConfig.isSystemUser())) { throw UserException.parseError() .message("Unable to drop table. Schema [%s] is immutable for this user.", this.name) FileSystemWrapper fs = getFS(schemaConfig.getUserName()); List<String> fullPath = resolveTableNameToValidPath(tableSchemaPath); FileSelection fileSelection; try { if (!isHomogeneous(fs, fileSelection)) { throw UserException .validationError()
@Override public void start() throws IOException { List<Property> properties = getProperties(); if (properties != null) { for (Property prop : properties) { this.fs = getFS(SYSTEM_USERNAME); createIfNecessary();
/** * Fetches a single item from the filesystem plugin */ public SchemaEntity get(List<String> path, String userName) { try { final FileStatus status = getFS(userName).getFileStatus(PathUtils.toFSPath(resolveTableNameToValidPath(path))); final Set<List<String>> tableNames = Sets.newHashSet(); final NamespaceService ns = context.getNamespaceService(userName); final NamespaceKey folderNSKey = new NamespaceKey(path); if (ns.exists(folderNSKey)) { for(NameSpaceContainer entity : ns.list(folderNSKey)) { if (entity.getType() == Type.DATASET) { tableNames.add(resolveTableNameToValidPath(entity.getDataset().getFullPathList())); } } } List<String> p = PathUtils.toPathComponents(status.getPath()); return getSchemaEntity(status, tableNames, p); } catch (IOException | NamespaceException e) { throw new RuntimeException(e); } }
@Override public void dropView(SchemaConfig schemaConfig, List<String> tableSchemaPath) throws IOException { if(!getMutability().hasMutationCapability(MutationType.VIEW, schemaConfig.isSystemUser())) { throw UserException.parseError() .message("Unable to drop view. Schema [%s] is immutable for this user.", this.name) .build(logger); } getFS(schemaConfig.getUserName()).delete(getViewPath(tableSchemaPath), false); }
@Override public CreateTableEntry createNewTable(SchemaConfig config, NamespaceKey key, WriterOptions writerOptions, Map<String, Object> storageOptions) { if(!getMutability().hasMutationCapability(MutationType.TABLE, config.isSystemUser())) { throw UserException.parseError() .message("Unable to create table. Schema [%s] is immutable for this user.", key.getParent()) final String tableName = getTableName(key); formatPlugin = getFormatPlugin(storage); if (formatPlugin == null) { throw new UnsupportedOperationException(String.format("Unsupported format '%s' in '%s'", storage, key)); final FormatPluginConfig formatConfig = createConfigForTable(tableName, storageOptions); formatPlugin = getFormatPlugin(formatConfig); Path path = resolveTablePathToValidPath(tableName); try { if(fs.exists(path)) {
return null; // not a valid table schema path final List<String> fullPath = resolveTableNameToValidPath(datasetPath.getPathComponents()); try { FileSystemWrapper fs = getFS((user != null) ? user : userName); FileSelection fileSelection = FileSelection.create(fs, fullPath); String tableName = datasetPath.getName(); if (rootStatus.isDirectory()) { cachedEntities.add(fromFileStatus(rootStatus)); cachedEntities.add(fromFileStatus(dirStatus));
@Override public boolean hasAccessPermission(String user, NamespaceKey key, DatasetConfig datasetConfig) { if (config.isImpersonationEnabled()) { if (datasetConfig.getReadDefinition() != null) { // allow accessing partial datasets final FileSystemWrapper userFs = getFS(user); final List<TimedRunnable<Boolean>> permissionCheckTasks = Lists.newArrayList(); permissionCheckTasks.addAll(getUpdateKeyPermissionTasks(datasetConfig, userFs)); permissionCheckTasks.addAll(getSplitPermissiomTasks(datasetConfig, userFs, user)); try { Stopwatch stopwatch = Stopwatch.createStarted(); final List<Boolean> accessPermissions = TimedRunnable.run("check access permission for " + key, logger, permissionCheckTasks, 16); stopwatch.stop(); logger.debug("Checking access permission for {} took {} ms", key, stopwatch.elapsed(TimeUnit.MILLISECONDS)); for (Boolean permission : accessPermissions) { if (!permission) { return false; } } } catch (IOException ioe) { throw UserException.dataReadError(ioe).build(logger); } } } return true; }
@Override public SourceState getState() { final FileSystemWrapper fs = getFS(ImpersonationUtil.getProcessUserName()); if (!fs.isPdfs()) { try { fs.listStatus(config.getPath()); return SourceState.GOOD; } catch (Exception e) { return SourceState.badState(e); } } else { return SourceState.GOOD; } }
@Override public ProducerOperator create(FragmentExecutionContext fragmentExecContext, final OperatorContext context, EasySubScan config) throws ExecutionSetupException { final FileSystemPlugin plugin = fragmentExecContext.getStoragePlugin(config.getPluginId()); final FileSystemWrapper fs = plugin.getFs(config.getUserName(), context.getStats()); final FormatPluginConfig formatConfig = PhysicalDatasetUtils.toFormatPlugin(config.getFileConfig(), Collections.<String>emptyList()); final EasyFormatPlugin<?> formatPlugin = (EasyFormatPlugin<?>) plugin.getFormatPlugin(formatConfig); FluentIterable<SplitAndExtended> unorderedWork = FluentIterable.from(config.getSplits()) .transform(new Function<DatasetSplit, SplitAndExtended>() { @Override public SplitAndExtended apply(DatasetSplit split) { return new SplitAndExtended(split); } }); final boolean sortReaders = context.getOptions().getOption(ExecConstants.SORT_FILE_BLOCKS); final List<SplitAndExtended> workList = sortReaders ? unorderedWork.toSortedList(SPLIT_COMPARATOR) : unorderedWork.toList(); final boolean selectAllColumns = selectsAllColumns(config.getSchema(), config.getColumns()); final CompositeReaderConfig readerConfig = CompositeReaderConfig.getCompound(config.getSchema(), config.getColumns(), config.getPartitionColumns()); final List<SchemaPath> innerFields = selectAllColumns ? ImmutableList.of(ColumnUtils.STAR_COLUMN) : readerConfig.getInnerColumns(); FluentIterable<RecordReader> readers = FluentIterable.from(workList).transform(new Function<SplitAndExtended, RecordReader>() { @Override public RecordReader apply(SplitAndExtended input) { try { RecordReader inner = formatPlugin.getRecordReader(context, fs, input.getExtended(), innerFields); return readerConfig.wrapIfNecessary(context.getAllocator(), inner, input.getSplit()); } catch (ExecutionSetupException e) { throw new RuntimeException(e); } }}); return new ScanOperator(fragmentExecContext.getSchemaUpdater(), config, context, readers.iterator()); }
public FileSystemWrapper getFS(String userName) { return getFs(userName, null); }
/** * Returns all children of the listingPath for a source */ private List<CatalogItem> getChildrenForSourcePath(String sourceName, List<String> listingPath) { final List<CatalogItem> catalogItems = new ArrayList<>(); final StoragePlugin plugin = getStoragePlugin(sourceName); if (plugin instanceof FileSystemPlugin) { // For file based plugins, use the list method to get the listing. That code will merge in any promoted datasets // that are in the namespace for us. This is in line with what the UI does. final List<SchemaEntity> list = ((FileSystemPlugin) plugin).list(listingPath, context.getUserPrincipal().getName()); for (SchemaEntity entity : list) { final CatalogItem catalogItem = convertSchemaEntityToCatalogItem(entity, listingPath); if (catalogItem != null) { catalogItems.add(catalogItem); } } } else { // for non-file based plugins we can go directly to the namespace catalogItems.addAll(getNamespaceChildrenForPath(new NamespaceKey(listingPath))); } return catalogItems; }
@Override public SourceTableDefinition getDataset(NamespaceKey datasetPath, DatasetConfig oldConfig, DatasetRetrievalOptions retrievalOptions) throws Exception { FormatPluginConfig formatPluginConfig = null; PhysicalDataset physicalDataset = oldConfig == null ? null : oldConfig.getPhysicalDataset(); if(physicalDataset != null && physicalDataset.getFormatSettings() != null){ formatPluginConfig = PhysicalDatasetUtils.toFormatPlugin(physicalDataset.getFormatSettings(), Collections.<String>emptyList()); } return getDatasetWithFormat(datasetPath, oldConfig, formatPluginConfig, retrievalOptions, null); }
final List<Property> result = new ArrayList<>(); List<Property> properties = super.getProperties(); if (properties != null) { result.addAll(properties);
private List<DatasetSplit> getSplits(ParquetGroupScanUtils parquetGroupScanUtils, ReadDefinition readDefinition) throws IOException { final List<DatasetSplit> splits = Lists.newArrayList(); final ImplicitFilesystemColumnFinder finder = new ImplicitFilesystemColumnFinder(getFsPlugin().getContext().getOptionManager(), fs, GroupScan.ALL_COLUMNS); List<RowGroupInfo> rowGroups = parquetGroupScanUtils.getRowGroupInfos(); if (!"__accelerator".equals(fsPlugin.getName())) { if (fsPlugin.getContext().getOptionManager().getOption(ExecConstants.PARQUET_CACHED_ENTITY_SET_FILE_SIZE)) { length = rowGroupInfo.getStatus().getLen();