private DocsStats docsStats(final SegmentInfos lastCommittedSegmentInfos) { long numDocs = 0; long numDeletedDocs = 0; long sizeInBytes = 0; if (lastCommittedSegmentInfos != null) { for (SegmentCommitInfo segmentCommitInfo : lastCommittedSegmentInfos) { numDocs += segmentCommitInfo.info.maxDoc() - segmentCommitInfo.getDelCount() - segmentCommitInfo.getSoftDelCount(); numDeletedDocs += segmentCommitInfo.getDelCount() + segmentCommitInfo.getSoftDelCount(); try { sizeInBytes += segmentCommitInfo.sizeInBytes(); } catch (IOException e) { throw new UncheckedIOException("Failed to get size for [" + segmentCommitInfo.info.name + "]", e); } } } return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); }
protected final DocsStats docsStats(IndexReader indexReader) { long numDocs = 0; long numDeletedDocs = 0; long sizeInBytes = 0; // we don't wait for a pending refreshes here since it's a stats call instead we mark it as accessed only which will cause // the next scheduled refresh to go through and refresh the stats as well for (LeafReaderContext readerContext : indexReader.leaves()) { // we go on the segment level here to get accurate numbers final SegmentReader segmentReader = Lucene.segmentReader(readerContext.reader()); SegmentCommitInfo info = segmentReader.getSegmentInfo(); numDocs += readerContext.reader().numDocs(); numDeletedDocs += readerContext.reader().numDeletedDocs(); try { sizeInBytes += info.sizeInBytes(); } catch (IOException e) { logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } } return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); }
switch (flag) { case Docs: docs = new DocsStats(); break; case Store:
public static DocsStats readDocStats(StreamInput in) throws IOException { DocsStats docsStats = new DocsStats(); docsStats.readFrom(in); return docsStats; }
@Override public DocsStats getIndexStats(String indexName) { Action getStats = new Stats.Builder().addIndex(indexName).build(); JestResult jestResult = jestClientHelper.execute(getStats); Assert.isTrue(jestResult.isSucceeded(), jestResult.getErrorMessage()); JsonObject statsJson = jestResult.getJsonObject().getAsJsonObject("indices").getAsJsonObject(indexName).getAsJsonObject("primaries"); JsonObject docsJson = statsJson.getAsJsonObject("docs"); return new DocsStats(docsJson.get("count").getAsLong(), docsJson.get("deleted").getAsLong()); }
@Override public DocsStats getIndexStats(String indexName) { Action getStats = new Stats.Builder().addIndex(indexName).build(); JestResult jestResult = jestClientHelper.execute(getStats); Assert.isTrue(jestResult.isSucceeded(), jestResult.getErrorMessage()); JsonObject statsJson = jestResult.getJsonObject().getAsJsonObject("indices").getAsJsonObject(indexName).getAsJsonObject("primaries"); JsonObject docsJson = statsJson.getAsJsonObject("docs"); return new DocsStats(docsJson.get("count").getAsLong(), docsJson.get("deleted").getAsLong()); }
public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses) { ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>(); this.docs = new DocsStats(); this.store = new StoreStats(); this.fieldData = new FieldDataStats();
public DocsStats docStats() { try (Engine.Searcher searcher = acquireSearcher("doc_stats")) { return new DocsStats(searcher.reader().numDocs(), searcher.reader().numDeletedDocs()); } }
private DocsStats docsStats(final SegmentInfos lastCommittedSegmentInfos) { long numDocs = 0; long numDeletedDocs = 0; long sizeInBytes = 0; if (lastCommittedSegmentInfos != null) { for (SegmentCommitInfo segmentCommitInfo : lastCommittedSegmentInfos) { numDocs += segmentCommitInfo.info.maxDoc() - segmentCommitInfo.getDelCount() - segmentCommitInfo.getSoftDelCount(); numDeletedDocs += segmentCommitInfo.getDelCount() + segmentCommitInfo.getSoftDelCount(); try { sizeInBytes += segmentCommitInfo.sizeInBytes(); } catch (IOException e) { throw new UncheckedIOException("Failed to get size for [" + segmentCommitInfo.info.name + "]", e); } } } return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); }
public DocsStats docStats() { try (Engine.Searcher searcher = acquireSearcher("doc_stats")) { return new DocsStats(searcher.reader().numDocs(), searcher.reader().numDeletedDocs()); } }
private DocsStats docsStats(final SegmentInfos lastCommittedSegmentInfos) { long numDocs = 0; long numDeletedDocs = 0; long sizeInBytes = 0; if (lastCommittedSegmentInfos != null) { for (SegmentCommitInfo segmentCommitInfo : lastCommittedSegmentInfos) { numDocs += segmentCommitInfo.info.maxDoc() - segmentCommitInfo.getDelCount() - segmentCommitInfo.getSoftDelCount(); numDeletedDocs += segmentCommitInfo.getDelCount() + segmentCommitInfo.getSoftDelCount(); try { sizeInBytes += segmentCommitInfo.sizeInBytes(); } catch (IOException e) { throw new UncheckedIOException("Failed to get size for [" + segmentCommitInfo.info.name + "]", e); } } } return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); }
protected final DocsStats docsStats(IndexReader indexReader) { long numDocs = 0; long numDeletedDocs = 0; long sizeInBytes = 0; // we don't wait for a pending refreshes here since it's a stats call instead we mark it as accessed only which will cause // the next scheduled refresh to go through and refresh the stats as well for (LeafReaderContext readerContext : indexReader.leaves()) { // we go on the segment level here to get accurate numbers final SegmentReader segmentReader = Lucene.segmentReader(readerContext.reader()); SegmentCommitInfo info = segmentReader.getSegmentInfo(); numDocs += readerContext.reader().numDocs(); numDeletedDocs += readerContext.reader().numDeletedDocs(); try { sizeInBytes += info.sizeInBytes(); } catch (IOException e) { logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } } return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); }
public void add(CommonStats stats) { if (docs == null) { if (stats.getDocs() != null) { docs = new DocsStats(); docs.add(stats.getDocs());
switch (flag) { case Docs: docs = new DocsStats(); break; case Store:
switch (flag) { case Docs: docs = new DocsStats(); break; case Store:
switch (flag) { case Docs: docs = new DocsStats(); break; case Store:
public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses) { ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>(); this.docs = new DocsStats(); this.store = new StoreStats(); this.fieldData = new FieldDataStats();
public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses) { ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>(); this.docs = new DocsStats(); this.store = new StoreStats(); this.fieldData = new FieldDataStats();
public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses) { ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>(); this.docs = new DocsStats(); this.store = new StoreStats(); this.fieldData = new FieldDataStats();
public ClusterStatsIndices(ClusterStatsNodeResponse[] nodeResponses) { ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>(); this.docs = new DocsStats(); this.store = new StoreStats(); this.fieldData = new FieldDataStats();