public int lineCount() { return lines.size(); }
/** {@inheritDoc} */ @Override public long recycledPagesCount() throws IgniteCheckedException { return deque.size(); } }
/** * @param discoMsg Discovery message. * @param msg Message. */ protected void debugLog(@Nullable TcpDiscoveryAbstractMessage discoMsg, String msg) { assert debugMode; String msg0 = new SimpleDateFormat("[HH:mm:ss,SSS]").format(new Date(System.currentTimeMillis())) + '[' + Thread.currentThread().getName() + "][" + getLocalNodeId() + "-" + locNode.internalOrder() + "] " + msg; debugLogQ.add(msg0); int delta = debugLogQ.size() - debugMsgHist; for (int i = 0; i < delta && debugLogQ.size() > debugMsgHist; i++) debugLogQ.poll(); }
public void checkContext(String s, boolean front) { if (!front && pendingContext.size() > MAX_PENDING) return; // queue is full if (!harvestedContext.contains(s) && !pendingContext.contains(s)) { if (front) pendingContext.addFirst(s); else pendingContext.addLast(s); } while (pendingContext.size() > MAX_PENDING) pendingContext.removeLast(); if (harvestedContext.size() > MAX_HARVESTED) harvestedContext.clear(); }
/** * Puts exception into queue. * Thread-safe. * * @param msg Message that describe reason why error was suppressed. * @param e Exception. */ public void onException(String msg, Throwable e) { q.offerFirst( new ExceptionInfo( errCnt.incrementAndGet(), e, msg, Thread.currentThread().getId(), Thread.currentThread().getName(), U.currentTimeMillis())); // Remove extra entries. int delta = q.size() - maxSize; for (int i = 0; i < delta && q.size() > maxSize; i++) q.pollLast(); }
es.shutdownNow(); log.warn("Timed out while getting all pages for country-{} at round {}. Next round now has size {}.", country, r, nextRound.size());
throw new RuntimeException(String.format( "Timed out while downloading query data for country-%s at round %d. Next round now has size %d.", _country, r, retries.size()));
@Override public void run() { try { final ArrayList<ApiDimensionFilter> filters = new ArrayList<>(); filters.addAll(_filterMap.values()); filters.add(GoogleWebmasterFilter.pageFilter(job.getOperator(), job.getPage())); LIMITER.acquirePermits(1); List<String[]> results = _webmaster .performSearchAnalyticsQuery(job.getStartDate(), job.getEndDate(), QUERY_LIMIT, _requestedDimensions, _requestedMetrics, filters); onSuccess(job, results, responseQueue, retries); reporter.report(job.getPagesSize(), _country); } catch (IOException e) { onFailure(e.getMessage(), job, retries); } catch (InterruptedException e) { log.error(String .format("Interrupted while trying to get queries for job %s. Current retry size is %d.", job, retries.size())); } } };
if (toOffload.size() > 0) { log.info("[{}] Going to automatically offload ledgers {}" + ", total size = {}, already offloaded = {}, to offload = {}",
Joiner.on(",").join(jobs), retries.size()));
public int harvest() { String[] backend = DAO.getBackend(); if (random.nextInt(100) != 0 && hitsOnBackend < HITS_LIMIT_4_QUERIES && pendingQueries.size() == 0 && pendingContext.size() > 0) { DAO.log("retrieval of " + tl.size() + " new messages for q = " + q + ", scheduled push; pendingQueries = " + pendingQueries.size() + ", pendingContext = " + pendingContext.size() + ", harvestedContext = " + harvestedContext.size()); return tl.size(); if (hitsOnBackend == 0) { if (pendingContext.size() == 0) { if (pendingContext.size() == 0) try {Thread.sleep(10000);} catch (InterruptedException e) {} PushThread pushThread = new PushThread(backend, tl); DAO.log( "starting push to backend; pendingQueries = " + pendingQueries.size() + ", pendingContext = " + pendingContext.size() + ", harvestedContext = " + harvestedContext.size()); executor.execute(pushThread); return -1; PushThread pushThread = new PushThread(backend, tl); DAO.log( "starting push to backend; pendingQueries = " + pendingQueries.size() + ", pendingContext = " + pendingContext.size() + ", harvestedContext = " + harvestedContext.size()); executor.execute(pushThread); return tl.size();
public boolean offer(RequestMetric metric) { queue.offer(metric); //remove earliest entries while (queue.size() > MAX_ENTRIES) { queue.removeFirst(); } StatusCodeGroup statusCode = StatusCodeGroup.valueOf(metric.getStatusCode()); if (!statistics.containsKey(statusCode)) { statistics.putIfAbsent(statusCode, new RequestMetricSummary()); } RequestMetricSummary totals = statistics.get(statusCode); long time = metric.getRequestCompleteTime() - metric.getRequestStartTime(); totals.add(time, time < metric.getUriGroup().getLimit(), metric.getNrOfDatabaseQueries(), metric.getDatabaseQueryTime(), metric.getQueries().stream().filter(q -> q.isIntolerable()).count(), metric.getQueries().stream().filter(q -> q.isIntolerable()).mapToLong(q -> q.getRequestCompleteTime()-q.getRequestStartTime()).sum() ); return true; }
/** * Prints errors. * * @param log Logger. */ public void printErrors(IgniteLogger log) { int size = q.size(); Iterator<ExceptionInfo> descIter = q.descendingIterator(); for (int i = 0; i < size && descIter.hasNext(); i++) { ExceptionInfo error = descIter.next(); U.error( log, "Error: " + (i + 1) + U.nl() + " Time: " + new Date(error.time()) + U.nl() + " Error: " + error.message() + U.nl() + " Thread ID: " + error.threadId() + U.nl() + " Thread name: " + error.threadName(), error.error() ); } }
@Test public void sendNoContext() throws Exception { int size = 1000; ConcurrentLinkedDeque<Integer> expected = new ConcurrentLinkedDeque<>(); ConcurrentLinkedDeque<Integer> obtained = new ConcurrentLinkedDeque<>(); startNodes(2); CountDownLatch latch = new CountDownLatch(1); vertices[1].eventBus().<Integer>consumer(ADDRESS1, msg -> { obtained.add(msg.body()); if (obtained.size() == expected.size()) { assertEquals(new ArrayList<>(expected), new ArrayList<>(obtained)); testComplete(); } }).completionHandler(ar -> { assertTrue(ar.succeeded()); latch.countDown(); }); latch.await(); EventBus bus = vertices[0].eventBus(); for (int i = 0;i < size;i++) { expected.add(i); bus.send(ADDRESS1, i); } await(); }
@Test public void overflow_limit_respected() throws Exception { RequestMetric metric = RequestMetric.start("uri",uriGroup,0); metric.addQuery(new QueryMetric("query1", 0, 2, true)); metric.stop(200, 2); Runnable add10Metrics = () -> { for (int i=0; i<10; i++) { queue.offer(metric); } }; Thread[] threads = new Thread[5]; for (int i=0; i<threads.length; i++) { threads[i] = new Thread(add10Metrics); } for (int i=0; i<threads.length; i++) { threads[i].start(); } for (int i=0; i<threads.length; i++) { threads[i].join(); } assertThat(queue.getLastRequests().size(), Matchers.lessThanOrEqualTo(MetricsQueue.MAX_ENTRIES)); }
@ManagedAttribute(value = "available reserved threads", readonly = true) public int getAvailable() { return _stack.size(); }
public void validateMetricsQueue(MetricsQueue queue) { Map<StatusCodeGroup, RequestMetricSummary> summary = queue.getDetailed(); assertNotNull(summary); assertEquals(2, summary.size()); RequestMetricSummary twoHundredResponses = summary.get(StatusCodeGroup.SUCCESS); assertNotNull(twoHundredResponses); assertEquals(2, twoHundredResponses.getCount()); assertEquals(1, twoHundredResponses.getIntolerableCount()); assertEquals((double)(MAX_TIME+3) / 2.0, twoHundredResponses.getAverageTime(), DELTA); assertEquals(MAX_TIME+1, twoHundredResponses.getAverageIntolerableTime(), DELTA); assertEquals(2, twoHundredResponses.getDatabaseQueryCount()); assertEquals(3.5, twoHundredResponses.getAverageDatabaseQueryTime(), DELTA); RequestMetricSummary fiveHundredResponses = summary.get(StatusCodeGroup.SERVER_ERROR); assertNotNull(fiveHundredResponses); assertEquals(1, fiveHundredResponses.getCount()); assertEquals(0, fiveHundredResponses.getIntolerableCount()); assertEquals(5, fiveHundredResponses.getAverageTime(), DELTA); assertEquals(0, fiveHundredResponses.getAverageIntolerableTime(), DELTA); assertEquals(1, fiveHundredResponses.getDatabaseQueryCount()); assertEquals(2, fiveHundredResponses.getAverageDatabaseQueryTime(), DELTA); assertEquals(0, fiveHundredResponses.getDatabaseIntolerableQueryCount()); assertEquals(0, fiveHundredResponses.getAverageDatabaseIntolerableQueryTime(), DELTA); assertEquals(3, queue.getLastRequests().size()); }
meanValues[c] /= currentData.size();
@Test public void sendNoContext() throws Exception { int size = 1000; ConcurrentLinkedDeque<Integer> expected = new ConcurrentLinkedDeque<>(); ConcurrentLinkedDeque<Integer> obtained = new ConcurrentLinkedDeque<>(); startNodes(2); CountDownLatch latch = new CountDownLatch(1); vertices[1].eventBus().<Integer>consumer(ADDRESS1, msg -> { obtained.add(msg.body()); if (obtained.size() == expected.size()) { assertEquals(new ArrayList<>(expected), new ArrayList<>(obtained)); testComplete(); } }).completionHandler(ar -> { assertTrue(ar.succeeded()); latch.countDown(); }); latch.await(); EventBus bus = vertices[0].eventBus(); for (int i = 0;i < size;i++) { expected.add(i); bus.send(ADDRESS1, i); } await(); }