@SuppressWarnings("unchecked") public <T extends StoragePlugin> T getStoragePlugin(StoragePluginId pluginId) throws ExecutionSetupException { StoragePlugin plugin = sources.getSource(pluginId); if(plugin == null){ return null; } return (T) plugin; } }
public static void addClasspathSourceIf(CatalogService catalog) { try { catalog.createSourceIfMissingWithThrow(cp()); } catch (ConcurrentModificationException e) { // no-op since signature was change to throw } }
private Catalog createCatalog(String userName) { return catalogService.getCatalog(SchemaConfig.newBuilder(userName).build()); }
@Test public void testCheckHasPermission() throws Exception { getSabotContext().getCatalogService().refreshSource(new NamespaceKey("hive"), CatalogService.REFRESH_EVERYTHING_NOW, UpdateType.FULL); NamespaceService ns = getSabotContext().getNamespaceService(SystemUser.SYSTEM_USERNAME); NamespaceKey dataset = new NamespaceKey(PathUtils.parseFullPath("hive.db1.kv_db1")); DatasetConfig datasetConfig = ns.getDataset(dataset); assertTrue(getSabotContext().getCatalogService().getSource("hive").hasAccessPermission(ImpersonationUtil.getProcessUserName(), dataset, datasetConfig)); final Path tableFile = new Path(hiveTest.getWhDir() + "/db1.db/kv_db1/000000_0"); final Path tableDir = new Path(hiveTest.getWhDir() + "/db1.db/kv_db1"); final FileSystem localFs = FileSystem.getLocal(new Configuration()); try { // no read on file localFs.setPermission(tableFile, new FsPermission(FsAction.WRITE_EXECUTE, FsAction.WRITE_EXECUTE, FsAction.WRITE_EXECUTE)); assertFalse(getSabotContext().getCatalogService().getSource("hive").hasAccessPermission(ImpersonationUtil.getProcessUserName(), dataset, datasetConfig)); } finally { localFs.setPermission(tableFile, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); } try { // no exec on dir localFs.setPermission(tableDir, new FsPermission(FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE)); assertFalse(getSabotContext().getCatalogService().getSource("hive").hasAccessPermission(ImpersonationUtil.getProcessUserName(), dataset, datasetConfig)); } finally { localFs.setPermission(tableDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); } }
public void refreshNow(String...sources) throws NamespaceException { for(String source : sources) { context.getCatalogService().refreshSource(new NamespaceKey(source), CatalogService.REFRESH_EVERYTHING_NOW, CatalogService.UpdateType.FULL); } }
DirectProvider.wrap(fabricService), DirectProvider.wrap(reader)); catalogService.start();
public SourceState getSourceState(String sourceName) throws SourceNotFoundException { try { SourceState state = catalogService.getSourceState(sourceName); if(state == null) { return SourceState.badState("Unable to find source."); } return state; } catch (Exception e) { return SourceState.badState(e); } }
@Test public void testCheckReadSignature() throws Exception { getSabotContext().getCatalogService().refreshSource(new NamespaceKey("hive"), CatalogService.REFRESH_EVERYTHING_NOW, UpdateType.FULL); NamespaceService ns = getSabotContext().getNamespaceService(SystemUser.SYSTEM_USERNAME); DatasetConfig datasetConfig = ns.getDataset(new NamespaceKey(PathUtils.parseFullPath("hive.db1.kv_db1"))); assertEquals(UpdateStatus.UNCHANGED, getSabotContext().getCatalogService().getSource("hive").checkReadSignature( datasetConfig.getReadDefinition().getReadSignature(), datasetConfig, DatasetRetrievalOptions.DEFAULT).getStatus()); datasetConfig = ns.getDataset(new NamespaceKey(PathUtils.parseFullPath("hive.\"default\".partition_with_few_schemas"))); assertEquals(UpdateStatus.UNCHANGED, getSabotContext().getCatalogService().getSource("hive").checkReadSignature( datasetConfig.getReadDefinition().getReadSignature(), datasetConfig, DatasetRetrievalOptions.DEFAULT).getStatus()); new File(hiveTest.getWhDir() + "/db1.db/kv_db1", "000000_0").setLastModified(System.currentTimeMillis()); File newFile = new File(hiveTest.getWhDir() + "/partition_with_few_schemas/c=1/d=1/e=1/", "empty_file"); try { newFile.createNewFile(); datasetConfig = ns.getDataset(new NamespaceKey(PathUtils.parseFullPath("hive.db1.kv_db1"))); assertEquals(UpdateStatus.CHANGED, getSabotContext().getCatalogService().getSource("hive").checkReadSignature( datasetConfig.getReadDefinition().getReadSignature(), datasetConfig, DatasetRetrievalOptions.DEFAULT).getStatus()); datasetConfig = ns.getDataset(new NamespaceKey(PathUtils.parseFullPath("hive.\"default\".partition_with_few_schemas"))); assertEquals(UpdateStatus.CHANGED, getSabotContext().getCatalogService().getSource("hive").checkReadSignature( datasetConfig.getReadDefinition().getReadSignature(), datasetConfig, DatasetRetrievalOptions.DEFAULT).getStatus()); } finally { newFile.delete(); } }
@Test public void refreshSourceMetadata_EmptySource() throws Exception { doMockDatasets(mockUpPlugin, ImmutableList.<SourceTableDefinition>of()); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogService.UpdateType.FULL); // make sure the namespace has no datasets under mockUpKey List<NamespaceKey> datasets = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(0, datasets.size()); assertNoDatasetsAfterSourceDeletion(); }
DirectProvider.wrap(fabricService), DirectProvider.wrap(ConnectionReader.of(sabotContext.getClasspathScan(), sabotConfig))); catalogService.start();
@Override public FileSystemPlugin get() { try { CatalogService storagePluginRegistry = registry.provider(CatalogService.class).get(); return (FileSystemPlugin) storagePluginRegistry.getSource(JOBS_STORAGEPLUGIN_NAME); } catch(Exception e) { throw Throwables.propagate(e); } } },
private Catalog createCatalog() { return catalogService.getCatalog(SchemaConfig.newBuilder(security.getUserPrincipal().getName()).build()); }
public static SourceConfig addSource(NamespaceService ns, String name) throws Exception { final NASConf conf = new NASConf(); conf.path = Files.createTempDirectory(null).toString(); final SourceConfig src = new SourceConfig() .setName(name) .setCtime(100L) .setConnectionConf(conf) .setAccelerationRefreshPeriod(TimeUnit.HOURS.toMillis(24)) .setAccelerationGracePeriod(TimeUnit.HOURS.toMillis(48)); try { l(CatalogService.class).createSourceIfMissingWithThrow(src); } catch (ConcurrentModificationException e) { // noop - changed signature to throw } return src; }
@Test public void refreshSourceMetadata_FirstTime() throws Exception { doMockDatasets(mockUpPlugin, mockDatasets); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogService.UpdateType.FULL); // make sure the namespace has datasets and folders according to the data supplied by plugin List<NamespaceKey> actualDatasetKeys = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(5, actualDatasetKeys.size()); assertDatasetsAreEqual(mockDatasets, actualDatasetKeys); assertFoldersExist(Lists.newArrayList(MOCK_UP + ".fld1", MOCK_UP + ".fld2", MOCK_UP + ".fld2.fld21")); assertDatasetSchemasDefined(actualDatasetKeys); assertNoDatasetsAfterSourceDeletion(); }
@VisibleForTesting public StoragePlugin getStoragePlugin(String sourceName) throws SourceNotFoundException, ExecutionSetupException { StoragePlugin plugin = catalogService.getSource(sourceName); if (plugin == null) { throw new SourceNotFoundException(sourceName); } return plugin; }
public RelDataType getRowType(final NamespaceKey path) { DremioTable table = catalogService.getCatalog(SchemaConfig.newBuilder(SystemUser.SYSTEM_USERNAME).build()).getTable(path); Preconditions.checkNotNull(table, "Unknown dataset %s", path); return table.getRowType(JavaTypeFactoryImpl.INSTANCE); }
@Before public void addWritableSource() throws IOException { NASConf writable = new NASConf(); writable.allowCreateDrop = true; File folderWritable = folder.newFolder(); // add an existing folder. new File(folderWritable, "existing").mkdirs(); writable.path = folderWritable.getCanonicalPath(); SourceConfig conf = new SourceConfig(); conf.setConnectionConf(writable); conf.setMetadataPolicy(CatalogService.DEFAULT_METADATA_POLICY_WITH_AUTO_PROMOTE); conf.setName("writable"); getSabotContext().getCatalogService().createSourceIfMissingWithThrow(conf); }
@Test public void refreshSourceMetadata_FirstTime_UpdateWithNewDatasets() throws Exception { doMockDatasets(mockUpPlugin, mockDatasets); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogService.UpdateType.FULL); List<NamespaceKey> actualDatasetKeys = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(5, actualDatasetKeys.size()); List<SourceTableDefinition> testDatasets = Lists.newArrayList(mockDatasets); testDatasets.add(newDataset(MOCK_UP + ".ds4")); testDatasets.add(newDataset(MOCK_UP + ".fld1.ds13")); testDatasets.add(newDataset(MOCK_UP + ".fld2.fld21.ds212")); testDatasets.add(newDataset(MOCK_UP + ".fld5.ds51")); doMockDatasets(mockUpPlugin, testDatasets); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogService.UpdateType.FULL); // make sure the namespace has datasets and folders according to the data supplied by plugin in second request actualDatasetKeys = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(9, actualDatasetKeys.size()); assertDatasetsAreEqual(testDatasets, actualDatasetKeys); assertFoldersExist(Lists.newArrayList(MOCK_UP + ".fld1", MOCK_UP + ".fld2", MOCK_UP + ".fld2.fld21", MOCK_UP + ".fld5")); assertDatasetSchemasDefined(actualDatasetKeys); assertNoDatasetsAfterSourceDeletion(); }
@JsonCreator public GenericCreateTableEntry( @JsonProperty("userName") String userName, @JsonProperty("pluginId") StoragePluginId pluginId, @JsonProperty("location") String location, @JsonProperty("options") WriterOptions options, @JacksonInject CatalogService catalogService) throws ExecutionSetupException { this.userName = userName; this.plugin = catalogService.getSource(pluginId); this.location = location; this.options = options; }
@Override @RequestScoped public Catalog provide() { return catalogService.getCatalog(SchemaConfig.newBuilder(context.getUserPrincipal().getName()).build()); }