/** * This function reinitializes the bucket segments, * reads all records from the record segments (sequentially, without using the pointers or the buckets), * and rebuilds the hash table. */ private void rebuild() throws IOException { rebuild(bucketSegments.length); }
/** * This function reinitializes the bucket segments, * reads all records from the record segments (sequentially, without using the pointers or the buckets), * and rebuilds the hash table. */ private void rebuild() throws IOException { rebuild(bucketSegments.length); }
/** * This function reinitializes the bucket segments, * reads all records from the record segments (sequentially, without using the pointers or the buckets), * and rebuilds the hash table. */ private void rebuild() throws IOException { rebuild(bucketSegments.length); }
/** * This function reinitializes the bucket segments, * reads all records from the record segments (sequentially, without using the pointers or the buckets), * and rebuilds the hash table. */ private void rebuild() throws IOException { rebuild(bucketSegments.length); }
private void resizeTableIfNecessary() throws IOException { if (enableResize && numElements > numBuckets) { final long newNumBucketSegments = 2L * bucketSegments.length; // Checks: // - we can't handle more than Integer.MAX_VALUE buckets // - don't take more memory than the free memory we have left // - the buckets shouldn't occupy more than half of all our memory if (newNumBucketSegments * numBucketsPerSegment < Integer.MAX_VALUE && newNumBucketSegments - bucketSegments.length < freeMemorySegments.size() && newNumBucketSegments < numAllMemorySegments / 2) { // do the resize rebuild(newNumBucketSegments); } } }
private void resizeTableIfNecessary() throws IOException { if (enableResize && numElements > numBuckets) { final long newNumBucketSegments = 2L * bucketSegments.length; // Checks: // - we can't handle more than Integer.MAX_VALUE buckets // - don't take more memory than the free memory we have left // - the buckets shouldn't occupy more than half of all our memory if (newNumBucketSegments * numBucketsPerSegment < Integer.MAX_VALUE && newNumBucketSegments - bucketSegments.length < freeMemorySegments.size() && newNumBucketSegments < numAllMemorySegments / 2) { // do the resize rebuild(newNumBucketSegments); } } }
private void resizeTableIfNecessary() throws IOException { if (enableResize && numElements > numBuckets) { final long newNumBucketSegments = 2L * bucketSegments.length; // Checks: // - we can't handle more than Integer.MAX_VALUE buckets // - don't take more memory than the free memory we have left // - the buckets shouldn't occupy more than half of all our memory if (newNumBucketSegments * numBucketsPerSegment < Integer.MAX_VALUE && newNumBucketSegments - bucketSegments.length < freeMemorySegments.size() && newNumBucketSegments < numAllMemorySegments / 2) { // do the resize rebuild(newNumBucketSegments); } } }
private void resizeTableIfNecessary() throws IOException { if (enableResize && numElements > numBuckets) { final long newNumBucketSegments = 2L * bucketSegments.length; // Checks: // - we can't handle more than Integer.MAX_VALUE buckets // - don't take more memory than the free memory we have left // - the buckets shouldn't occupy more than half of all our memory if (newNumBucketSegments * numBucketsPerSegment < Integer.MAX_VALUE && newNumBucketSegments - bucketSegments.length < freeMemorySegments.size() && newNumBucketSegments < numAllMemorySegments / 2) { // do the resize rebuild(newNumBucketSegments); } } }
/** * If there is wasted space (due to updated records not fitting in their old places), then do a compaction. * Else, throw EOFException to indicate that memory ran out. * @throws IOException */ private void compactOrThrow() throws IOException { if (holes > (double)recordArea.getTotalSize() * 0.05) { rebuild(); } else { throw new EOFException("InPlaceMutableHashTable memory ran out. " + getMemoryConsumptionString()); } }
/** * If there is wasted space (due to updated records not fitting in their old places), then do a compaction. * Else, throw EOFException to indicate that memory ran out. * @throws IOException */ private void compactOrThrow() throws IOException { if (holes > (double)recordArea.getTotalSize() * 0.05) { rebuild(); } else { throw new EOFException("InPlaceMutableHashTable memory ran out. " + getMemoryConsumptionString()); } }
/** * If there is wasted space (due to updated records not fitting in their old places), then do a compaction. * Else, throw EOFException to indicate that memory ran out. * @throws IOException */ private void compactOrThrow() throws IOException { if (holes > (double)recordArea.getTotalSize() * 0.05) { rebuild(); } else { throw new EOFException("InPlaceMutableHashTable memory ran out. " + getMemoryConsumptionString()); } }
/** * If there is wasted space (due to updated records not fitting in their old places), then do a compaction. * Else, throw EOFException to indicate that memory ran out. * @throws IOException */ private void compactOrThrow() throws IOException { if (holes > (double)recordArea.getTotalSize() * 0.05) { rebuild(); } else { throw new EOFException("InPlaceMutableHashTable memory ran out. " + getMemoryConsumptionString()); } }