@Override public int compare(Object o1, Object o2) { final boolean[] orderByColsEqual = new boolean[] {false}; QueryObserverHolder.setInstance(new QueryObserverAdapter() { @Override public void orderByColumnsEqual() { orderByColsEqual[0] = true; } }); int result = secondLevelComparator.compare(o1, o2); if (result != 0 && orderByColsEqual[0]) { result = 0; } return result; }
if (entriesMap == null || result == null) { if (verifyLimit(result, limit)) { QueryObserver observer = QueryObserverHolder.getInstance(); if (observer != null) { observer.limitAppliedAtIndexLevel(this, limit, result); QueryObserver observer = QueryObserverHolder.getInstance(); if (entriesMap instanceof SortedMap) { Iterator entriesIter = ((Map) entriesMap).entrySet().iterator();
public QueryObserver startTrace() { QueryObserver queryObserver = null; if (this.traceOn && this.cache != null) { QueryObserver qo = QueryObserverHolder.getInstance(); if (qo instanceof IndexTrackingQueryObserver) { queryObserver = qo; } else if (!QueryObserverHolder.hasObserver()) { queryObserver = new IndexTrackingQueryObserver(); QueryObserverHolder.setInstance(queryObserver); } else { queryObserver = qo; } } return queryObserver; }
startTime = NanoTimer.getTime(); queryObserver = new WrappedIndexTrackingQueryObserver(); QueryObserverHolder.setInstance(queryObserver); } finally { if (queryObserver != null) { QueryObserverHolder.reset();
InternalClientMembership.unregisterAllListeners(); LogWrapper.close(); QueryObserverHolder.reset(); QueryTestUtils.setCache(null); RegionTestCase.preSnapshotRegion = null;
CacheUtils.getLogger().info("Executing query: " + queries[i]); QueryObserverImpl observer = new QueryObserverImpl(); QueryObserverHolder.setInstance(observer); r[i][1] = q.execute(); SelectResults rcw = (SelectResults) r[i][1];
boolean applyOrderBy, boolean asc, long iteratorCreationTime) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { QueryObserver observer = QueryObserverHolder.getInstance(); if (result == null || limit != -1 && result.size() == limit) { return;
String otherObserver = null; if (this.traceOn) { QueryObserver qo = QueryObserverHolder.getInstance(); if (qo instanceof IndexTrackingQueryObserver) { indexObserver = (IndexTrackingQueryObserver) qo; } else if (!QueryObserverHolder.hasObserver()) { indexObserver = new IndexTrackingQueryObserver(); QueryObserverHolder.setInstance(indexObserver); } else { otherObserver = qo.getClass().getName();
CacheUtils.getLogger().info("Executing query: " + queries[i]); QueryObserverImpl observer = new QueryObserverImpl(); QueryObserverHolder.setInstance(observer); r[i][1] = q.execute(); SelectResults rcw = (SelectResults) r[i][1];
if (entriesMap == null || result == null) return; QueryObserver observer = QueryObserverHolder.getInstance(); if (verifyLimit(result, limit)) { observer.limitAppliedAtIndexLevel(this, limit, result);
CacheUtils.getLogger().info("Executing query: " + queries[i]); QueryObserverImpl observer = new QueryObserverImpl(); QueryObserverHolder.setInstance(observer); r[i][1] = q.execute(); SelectResults rcw = (SelectResults) r[i][1];
/** * Asif : This function is invoked during clear operation on Region. It causes re execution of * Index Initialization query on the region & before doing this it makes theexisting data maps * null. This is needed so that index does not miss any entry being put in the region when the * Region.clear is in progress */ public void rerunIndexCreationQuery() throws QueryException { try { QueryObserver observer = QueryObserverHolder.getInstance(); observer.beforeRerunningIndexCreationQuery(); } catch (Exception e) { // Asif Ignore any exception as this should not hamper normal code flow if (logger.isDebugEnabled()) { logger.debug( "IndexMananger::rerunIndexCreationQuery: Exception in callback beforeRerunningIndexcreationQuery", e); } } if (isIndexMaintenanceTypeSynchronous()) { recreateAllIndexesForRegion(); } else { // System.out.println("Aynchronous update"); updater.addTask(RECREATE_INDEX, null, IndexProtocol.OTHER_OP); } }
CacheUtils.getLogger().info("Executing query: " + queries[i]); QueryObserverImpl observer = new QueryObserverImpl(); QueryObserverHolder.setInstance(observer); r[i][1] = q.execute(); SelectResults rcw = (SelectResults) r[i][1];
if (entriesMap == null || result == null) return; QueryObserver observer = QueryObserverHolder.getInstance(); if (verifyLimit(result, limit)) { observer.limitAppliedAtIndexLevel(this, limit, result);
CacheUtils.getLogger().info("Executing query: " + queries[i]); QueryObserverImpl observer = new QueryObserverImpl(); QueryObserverHolder.setInstance(observer); r[i][1] = q.execute(); SelectResults rcw = (SelectResults) r[i][1];
@Override public void hook(int spot) throws RuntimeException { QueryObserver observer = QueryObserverHolder.getInstance(); assertTrue(observer instanceof IndexTrackingQueryObserver); IndexTrackingQueryObserver gfObserver = (IndexTrackingQueryObserver) observer; if (spot == 1) { // before index lookup } else if (spot == 2) { // before key range index lookup } else if (spot == 3) { // End of afterIndexLookup call } else if (spot == 4) { // Before resetting indexInfoMap Map map = gfObserver.getUsedIndexes(); assertEquals(1, map.size()); assertTrue(map.get( INDEX_NAME) instanceof IndexTrackingQueryObserver.IndexInfo); rMap = (IndexTrackingQueryObserver.IndexInfo) map.get( INDEX_NAME); if (this.regn instanceof PartitionedRegion) { assertEquals(1, rMap.getResults().size()); } else if (this.regn instanceof LocalRegion) { assertEquals(1, rMap.getResults().size()); } } }
CacheUtils.getLogger().info("Executing query: " + queries[i]); QueryObserverImpl observer = new QueryObserverImpl(); QueryObserverHolder.setInstance(observer); r[i][1] = q.execute(); SelectResults rcw = (SelectResults) r[i][1];
private SelectResults doIterationEvaluate(ExecutionContext context, boolean evaluateWhereClause) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { SelectResults results = prepareEmptyResultSet(context, false); // TODO: SELF : Work on limit implementation on bulk get // check for bulk get optimization if (evaluateWhereClause) { List tmpResults = optimizeBulkGet(context); if (tmpResults != null) { // (has only one iterator) RuntimeIterator rIter = (RuntimeIterator) context.getCurrentIterators().get(0); for (Iterator itr = tmpResults.iterator(); itr.hasNext();) { Object currObj = itr.next(); rIter.setCurrent(currObj); QueryObserver observer = QueryObserverHolder.getInstance(); observer.beforeIterationEvaluation(rIter, currObj); applyProjectionAndAddToResultSet(context, results, this.orderByAttrs == null); } return results; } } int numElementsInResult = 0; try { doNestedIterations(0, results, context, evaluateWhereClause, numElementsInResult); } catch (CompiledSelect.NullIteratorException ignore) { return null; } return results; }
CacheUtils.getLogger().info("Executing query: " + queries[i]); QueryObserverImpl observer = new QueryObserverImpl(); QueryObserverHolder.setInstance(observer); r[i][1] = q.execute(); int indexLimit = queries[i].indexOf("limit");
return result; if (QueryObserverHolder.getInstance() != null) { QueryObserverHolder.getInstance().orderByColumnsEqual();