private String getDataSourceName(DataSource dataSource) { return Iterables.getOnlyElement(dataSource.getNames()); }
public VersionedIntervalTimeline<String, SegmentLoadInfo> getTimeline(DataSource dataSource) { String table = Iterables.getOnlyElement(dataSource.getNames()); synchronized (lock) { return timelines.get(table); } }
public static String getMetricName(DataSource dataSource) { final List<String> names = dataSource.getNames(); return names.size() == 1 ? names.get(0) : names.toString(); } }
@Nullable @Override public VersionedIntervalTimeline<String, ServerSelector> getTimeline(DataSource dataSource) { String table = Iterables.getOnlyElement(dataSource.getNames()); synchronized (lock) { return timelines.get(table); } }
@Override public List<String> getDataSourceNames() { return druidTable.getDataSource().getNames(); }
@Override public List<String> getNames() { return query.getDataSource().getNames(); }
/** * Authorize the query. Will return an Access object denoting whether the query is authorized or not. * * @param authenticationResult authentication result indicating the identity of the requester * * @return authorization result */ public Access authorize(final AuthenticationResult authenticationResult) { transition(State.INITIALIZED, State.AUTHORIZING); return doAuthorize( authenticationResult, AuthorizationUtils.authorizeAllResourceActions( authenticationResult, Iterables.transform( baseQuery.getDataSource().getNames(), AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR ), authorizerMapper ) ); }
@Override public <T> QueryRunner<T> getQueryRunnerForIntervals(final Query<T> query, Iterable<Interval> intervals) { final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query); final Map<Integer, FireChief> partitionChiefs = chiefs.get(Iterables.getOnlyElement(query.getDataSource() .getNames())); return partitionChiefs == null ? new NoopQueryRunner<T>() : factory.getToolchest().mergeResults( factory.mergeRunners( Execs.directExecutor(), // Chaining query runners which wait on submitted chain query runners can make executor pools deadlock Iterables.transform( partitionChiefs.values(), new Function<FireChief, QueryRunner<T>>() { @Override public QueryRunner<T> apply(FireChief fireChief) { return fireChief.getQueryRunner(query); } } ) ) ); }
/** * Authorize the query. Will return an Access object denoting whether the query is authorized or not. * * @param req HTTP request object of the request. If provided, the auth-related fields in the HTTP request * will be automatically set. * * @return authorization result */ public Access authorize(HttpServletRequest req) { transition(State.INITIALIZED, State.AUTHORIZING); return doAuthorize( AuthorizationUtils.authenticationResultFromRequest(req), AuthorizationUtils.authorizeAllResourceActions( req, Iterables.transform( baseQuery.getDataSource().getNames(), AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR ), authorizerMapper ) ); }
@Override public void registerQuery(Query query, final ListenableFuture future) { final String id = query.getId(); final List<String> datasources = query.getDataSource().getNames(); queries.put(id, future); queryDatasources.putAll(id, datasources); future.addListener( new Runnable() { @Override public void run() { queries.remove(id, future); for (String datasource : datasources) { queryDatasources.remove(id, datasource); } } }, Execs.directExecutor() ); }
@Override public <T> QueryRunner<T> getQueryRunnerForSegments(final Query<T> query, final Iterable<SegmentDescriptor> specs) { final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query); final Map<Integer, FireChief> partitionChiefs = chiefs.get(Iterables.getOnlyElement(query.getDataSource() .getNames())); return partitionChiefs == null ? new NoopQueryRunner<T>() : factory.getToolchest().mergeResults( factory.mergeRunners( Execs.directExecutor(), Iterables.transform( specs, new Function<SegmentDescriptor, QueryRunner<T>>() { @Override public QueryRunner<T> apply(SegmentDescriptor spec) { final FireChief retVal = partitionChiefs.get(spec.getPartitionNumber()); return retVal == null ? new NoopQueryRunner<T>() : retVal.getQueryRunner(query.withQuerySegmentSpec(new SpecificSegmentSpec(spec))); } } ) ) ); }
@Override public Sequence run(QueryPlus queryPlus, Map responseContext) { // verify that table datasource is passed to baseQueryRunner Assert.assertTrue(queryPlus.getQuery().getDataSource() instanceof TableDataSource); String dsName = Iterables.getOnlyElement(queryPlus.getQuery().getDataSource().getNames()); if ("ds1".equals(dsName)) { responseContext.put("ds1", "ds1"); return Sequences.simple(Arrays.asList(1, 2, 3)); } else if ("ds2".equals(dsName)) { responseContext.put("ds2", "ds2"); return Sequences.simple(Arrays.asList(4, 5, 6)); } else { throw new AssertionError("Unexpected DataSource"); } } };
final String dataSource = Iterables.getOnlyElement(query.getDataSource().getNames());
.setName(StringUtils.format("%s[%s_%s_%s]", currThreadName, query.getType(), query.getDataSource().getNames(), queryId)); if (log.isDebugEnabled()) { log.debug("Got query [%s]", query);
private <T> QueryRunner<T> getQueryRunnerImpl(Query<T> query) { QueryRunner<T> queryRunner = null; final String queryDataSource = Iterables.getOnlyElement(query.getDataSource().getNames()); if (runningItem != null) { final Task task = runningItem.getTask(); if (task.getDataSource().equals(queryDataSource)) { final QueryRunner<T> taskQueryRunner = task.getQueryRunner(query); if (taskQueryRunner != null) { if (queryRunner == null) { queryRunner = taskQueryRunner; } else { log.makeAlert("Found too many query runners for datasource") .addData("dataSource", queryDataSource) .emit(); } } } } return new SetAndVerifyContextQueryRunner<>( serverConfig, queryRunner == null ? new NoopQueryRunner<>() : queryRunner ); }
@Test public void testUnionDataSource() throws Exception { DataSource dataSource = jsonMapper.readValue( "{\"type\":\"union\", \"dataSources\":[\"ds1\", \"ds2\"]}", DataSource.class ); Assert.assertTrue(dataSource instanceof UnionDataSource); Assert.assertEquals( Lists.newArrayList(new TableDataSource("ds1"), new TableDataSource("ds2")), Lists.newArrayList(((UnionDataSource) dataSource).getDataSources()) ); Assert.assertEquals( Lists.newArrayList("ds1", "ds2"), Lists.newArrayList(dataSource.getNames()) ); final DataSource serde = jsonMapper.readValue(jsonMapper.writeValueAsString(dataSource), DataSource.class); Assert.assertEquals(dataSource, serde); }
List<Rule> rules = ruleManager.getRulesWithDefault(Iterables.getFirst(query.getDataSource().getNames(), null));
@Test public void testSerde() throws Exception { String queryStr = "{\n" + " \"queryType\":\"segmentMetadata\",\n" + " \"dataSource\":\"test_ds\",\n" + " \"intervals\":[\"2013-12-04T00:00:00.000Z/2013-12-05T00:00:00.000Z\"],\n" + " \"analysisTypes\":[\"cardinality\",\"size\"]\n" + "}"; EnumSet<SegmentMetadataQuery.AnalysisType> expectedAnalysisTypes = EnumSet.of( SegmentMetadataQuery.AnalysisType.CARDINALITY, SegmentMetadataQuery.AnalysisType.SIZE ); Query query = MAPPER.readValue(queryStr, Query.class); Assert.assertTrue(query instanceof SegmentMetadataQuery); Assert.assertEquals("test_ds", Iterables.getOnlyElement(query.getDataSource().getNames())); Assert.assertEquals( Intervals.of("2013-12-04T00:00:00.000Z/2013-12-05T00:00:00.000Z"), query.getIntervals().get(0) ); Assert.assertEquals(expectedAnalysisTypes, ((SegmentMetadataQuery) query).getAnalysisTypes()); // test serialize and deserialize Assert.assertEquals(query, MAPPER.readValue(MAPPER.writeValueAsString(query), Query.class)); }
@Test public void testSerdeWithDefaultInterval() throws Exception { String queryStr = "{\n" + " \"queryType\":\"segmentMetadata\",\n" + " \"dataSource\":\"test_ds\"\n" + "}"; Query query = MAPPER.readValue(queryStr, Query.class); Assert.assertTrue(query instanceof SegmentMetadataQuery); Assert.assertEquals("test_ds", Iterables.getOnlyElement(query.getDataSource().getNames())); Assert.assertEquals(Intervals.ETERNITY, query.getIntervals().get(0)); Assert.assertTrue(((SegmentMetadataQuery) query).isUsingDefaultInterval()); // test serialize and deserialize Assert.assertEquals(query, MAPPER.readValue(MAPPER.writeValueAsString(query), Query.class)); // test copy Assert.assertEquals(query, Druids.SegmentMetadataQueryBuilder.copy((SegmentMetadataQuery) query).build()); }