public void dumpStats() { int numPartitionsInMem = 0; int numPartitionsOnDisk = 0; for (HashPartition hp : hashPartitions) { if (hp.isHashMapOnDisk()) { numPartitionsOnDisk++; } else { numPartitionsInMem++; } } LOG.info("In memory partitions have been processed successfully: " + numPartitionsInMem + " partitions in memory have been processed; " + numPartitionsOnDisk + " partitions have been spilled to disk and will be processed next."); }
public void dumpStats() { int numPartitionsInMem = 0; int numPartitionsOnDisk = 0; for (HashPartition hp : hashPartitions) { if (hp.isHashMapOnDisk()) { numPartitionsOnDisk++; } else { numPartitionsInMem++; } } LOG.info("In memory partitions have been processed successfully: " + numPartitionsInMem + " partitions in memory have been processed; " + numPartitionsOnDisk + " partitions have been spilled to disk and will be processed next."); }
if (!hashPartitions[i].isHashMapOnDisk()) { hybridHtContainer.setTotalInMemRowCount( hybridHtContainer.getTotalInMemRowCount() - if (hashPartitions[i].isHashMapOnDisk()) { try {
if (!hashPartitions[i].isHashMapOnDisk()) { hybridHtContainer.setTotalInMemRowCount( hybridHtContainer.getTotalInMemRowCount() - if (hashPartitions[i].isHashMapOnDisk()) { try {
public void dumpStats() { int numPartitionsInMem = 0; int numPartitionsOnDisk = 0; for (HashPartition hp : hashPartitions) { if (hp.isHashMapOnDisk()) { numPartitionsOnDisk++; } else { numPartitionsInMem++; } } LOG.info("In memory partitions have been processed successfully: " + numPartitionsInMem + " partitions in memory have been processed; " + numPartitionsOnDisk + " partitions have been spilled to disk and will be processed next."); } }
if (!hashPartitions[i].isHashMapOnDisk()) { hybridHtContainer.setTotalInMemRowCount( hybridHtContainer.getTotalInMemRowCount() - if (hashPartitions[i].isHashMapOnDisk()) { try {
/** * Check if the memory threshold is about to be reached. * Since all the write buffer will be lazily allocated in BytesBytesMultiHashMap, we need to * consider those as well. * We also need to count in the next 1024 rows to be loaded. * @return true if memory is full, false if not */ private boolean isMemoryFull() { int numPartitionsInMem = 0; for (HashPartition hp : hashPartitions) { if (!hp.isHashMapOnDisk()) { numPartitionsInMem++; } } return refreshMemoryUsed() + this.memoryCheckFrequency * getTableRowSize() + writeBufferSize * numPartitionsInMem >= memoryThreshold; }
/** * Check if the memory threshold is about to be reached. * Since all the write buffer will be lazily allocated in BytesBytesMultiHashMap, we need to * consider those as well. * We also need to count in the next 1024 rows to be loaded. * @return true if memory is full, false if not */ private boolean isMemoryFull() { int numPartitionsInMem = 0; for (HashPartition hp : hashPartitions) { if (!hp.isHashMapOnDisk()) { numPartitionsInMem++; } } return refreshMemoryUsed() + this.memoryCheckFrequency * getTableRowSize() + writeBufferSize * numPartitionsInMem >= memoryThreshold; }
public int size() { if (isHashMapOnDisk()) { // Rows are in a combination of the on-disk hashmap and the sidefile return rowsOnDisk + (sidefileKVContainer != null ? sidefileKVContainer.size() : 0); } else { // All rows should be in the in-memory hashmap return hashMap.size(); } } }
public int size() { if (isHashMapOnDisk()) { // Rows are in a combination of the on-disk hashmap and the sidefile return rowsOnDisk + (sidefileKVContainer != null ? sidefileKVContainer.size() : 0); } else { // All rows should be in the in-memory hashmap return hashMap.size(); } } }