private HashedArray createRandomData(int length, TestContext context) { byte[] data = new byte[length]; context.random.nextBytes(data); return new HashedArray(data); }
private HashedArray generateData(int length, Random rnd) { byte[] keyData = new byte[length]; rnd.nextBytes(keyData); return new HashedArray(keyData); }
private HashedArray generateData(int minLength, int maxLength, Random rnd) { byte[] keyData = new byte[Math.max(minLength, rnd.nextInt(maxLength))]; rnd.nextBytes(keyData); return new HashedArray(keyData); }
private HashedArray newKey(Random rnd) { byte[] key = new byte[Math.max(1, rnd.nextInt(MAX_KEY_LENGTH))]; rnd.nextBytes(key); return new HashedArray(key); }
private boolean keyMatches(ArrayView k1, ArrayView k2) { return new HashedArray(k1).equals(new HashedArray(k2)); }
/** * Locates all {@link ResultT} instances in a TableBucket. * * @param bucketOffset The current segment offset of the Table Bucket we are looking into. * @param timer A {@link TimeoutTimer} for the operation. * @return A CompletableFuture that, when completed, will contain a List with the desired result items. This list * will exclude all {@link ResultT} items that are marked as deleted. */ CompletableFuture<List<ResultT>> findAllExisting(long bucketOffset, TimeoutTimer timer) { val result = new HashMap<HashedArray, ResultT>(); // This handler ensures that items are only added once (per key) and only if they are not deleted. Since the items // are processed in descending version order, the first time we encounter its key is its latest value. Consumer<ResultT> handler = item -> { TableKey key = getKey(item); HashedArray indexedKey = new HashedArray(key.getKey()); if (!result.containsKey(indexedKey)) { result.put(indexedKey, key.getVersion() == TableKey.NOT_EXISTS ? null : item); } }; return findAll(bucketOffset, handler, timer) .thenApply(v -> result.values().stream().filter(Objects::nonNull).collect(Collectors.toList())); }
synchronized List<TableEntry> get(List<ArrayView> keys) { return keys.stream().map(k -> this.entries.get(new HashedArray(k))).collect(Collectors.toList()); }
private void acceptRemovals(HashMap<String, ArrayList<TableKey>> removals, HashMap<HashedArray, EntryData> keyInfo) { for (val removeSet : removals.values()) { for (val r : removeSet) { val ki = keyInfo.get(new HashedArray(r.getKey())); ki.deleteValue(); } } }
synchronized void remove(Collection<TableKey> keys) { validateKeys(keys, k -> k); keys.forEach(k -> this.entries.remove(new HashedArray(k.getKey()))); }
private <T> List<T> collectIteratorItems(AsyncIterator<IteratorItem<T>> iterator) throws Exception { val result = new ArrayList<T>(); val hashes = new HashSet<HashedArray>(); iterator.forEachRemaining(item -> { Assert.assertTrue("Duplicate IteratorItem.getState().", hashes.add(new HashedArray(item.getState()))); result.addAll(item.getEntries()); }, executorService()).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); return result; }
private void acceptUpdates(Map<String, ArrayList<TableEntry>> updatesBySegment, Map<String, List<Long>> versionsBySegment, Map<HashedArray, EntryData> keyInfo) { Assert.assertEquals(updatesBySegment.size(), versionsBySegment.size()); for (val e : updatesBySegment.entrySet()) { val updates = e.getValue(); val versions = versionsBySegment.get(e.getKey()); Assert.assertEquals(updates.size(), versions.size()); for (int i = 0; i < updates.size(); i++) { val u = updates.get(i); val ki = keyInfo.get(new HashedArray(u.getKey().getKey())); ki.setValue(u.getValue(), versions.get(i)); } } }
/** * Fetches the existing keys for the given {@link BucketUpdate}. * * @param bucketUpdate The BucketUpdate to fetch keys for. Upon completion of this method, this will be updated with * the existing keys. * @param segment The segment to operate on. * @param timer Timer for the operation. * @return A CompletableFuture that, when completed, will indicate the operation is done. */ private CompletableFuture<Void> fetchExistingKeys(BucketUpdate bucketUpdate, DirectSegmentAccess segment, TimeoutTimer timer) { // Get all Key locations, using the bucket's last offset and backpointers. return TableBucketReader .key(segment, this.indexWriter::getBackpointerOffset, this.executor) .findAll(bucketUpdate.getBucket().getSegmentOffset(), k -> bucketUpdate.withExistingKey(new BucketUpdate.KeyInfo(new HashedArray(k.getKey()), k.getVersion())), timer); }
private void check(Map<HashedArray, HashedArray> expectedEntries, Collection<ArrayView> nonExistentKeys, ContainerTableExtension ext) throws Exception { // Verify that non-existing keys are not returned by accident. val nonExistingResult = ext.get(SEGMENT_NAME, new ArrayList<>(nonExistentKeys), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); Assert.assertEquals("Unexpected result size for non-existing key search.", nonExistentKeys.size(), nonExistingResult.size()); Assert.assertTrue("Unexpected result for non-existing key search.", nonExistingResult.stream().allMatch(Objects::isNull)); // Verify existing Keys. val expectedResult = new ArrayList<HashedArray>(); val existingKeys = new ArrayList<ArrayView>(); expectedEntries.forEach((k, v) -> { existingKeys.add(k); expectedResult.add(v); }); val existingResult = ext.get(SEGMENT_NAME, existingKeys, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); Assert.assertEquals("Unexpected result size for existing key search.", expectedResult.size(), existingResult.size()); for (int i = 0; i < expectedResult.size(); i++) { val expectedValue = expectedResult.get(i); val expectedKey = existingKeys.get(i); val actualEntry = existingResult.get(i); Assert.assertEquals("Unexpected key at position " + i, expectedKey, new HashedArray(actualEntry.getKey().getKey())); Assert.assertEquals("Unexpected value at position " + i, expectedValue, new HashedArray(actualEntry.getValue())); } }
synchronized List<Long> put(List<TableEntry> entries) { validateKeys(entries, TableEntry::getKey); return entries .stream() .map(e -> { long version = this.nextVersion.incrementAndGet(); val key = new HashedArray(e.getKey().getKey().getCopy()); this.entries.put(key, TableEntry.versioned(key, new ByteArraySegment(e.getValue().getCopy()), version)); return version; }) .collect(Collectors.toList()); }
byte[] valueData = new byte[context.random.nextInt(MAX_VALUE_LENGTH)]; context.random.nextBytes(valueData); val key = new HashedArray(keyData); val offset = context.metadata.getLength(); val entry = TableEntry.versioned(key, new ByteArraySegment(valueData), offset);
@GuardedBy("this") private <T> void validateKeys(Collection<T> items, Function<T, TableKey> getKey) { items.stream() .map(getKey) .filter(TableKey::hasVersion) .forEach(k -> { TableEntry e = this.entries.get(new HashedArray(k.getKey())); if (e == null) { if (k.getVersion() != TableKey.NOT_EXISTS) { throw new CompletionException(new KeyNotExistsException(this.segmentName, k.getKey())); } } else if (k.getVersion() != e.getKey().getVersion()) { throw new CompletionException(new BadKeyVersionException(this.segmentName, Collections.emptyMap())); } }); } }
bu.withExistingKey(new BucketUpdate.KeyInfo(new HashedArray(new byte[]{(byte) i}), i)); bu.withKeyUpdate(new BucketUpdate.KeyUpdate(new HashedArray(new byte[]{(byte) -i}), i, i % 2 == 0)); bu.isKeyUpdated(new HashedArray(new byte[]{(byte) -1}))); Assert.assertFalse("Unexpected result from isKeyUpdated for non-updated key.", bu.isKeyUpdated(new HashedArray(new byte[]{(byte) -count})));
keyUpdates.add(new BucketUpdate.KeyUpdate(new HashedArray(key), i * hashesPerBucket + j, true)); rnd.nextBytes(key); hashToBuckets.put(KeyHashers.DEFAULT_HASHER.hash(key), bucket);
/** * Tests equals() and hashCode(). */ @Test public void testEqualsHashCode() { val data1 = generate(); val data2 = copy(data1); HashedArray prev = null; for (int i = 0; i < data1.size(); i++) { val a1 = new HashedArray(data1.get(i)); val a2 = new HashedArray(data2.get(i)); Assert.assertEquals("Expecting hashCode() to be the same for the same array contents.", a1.hashCode(), a2.hashCode()); Assert.assertTrue("Expecting equals() to return true for the same array contents.", a1.equals(a2) && a2.equals(a1)); if (prev != null) { Assert.assertNotEquals("Expecting hashCode() to be different for different arrays.", prev.hashCode(), a1.hashCode()); Assert.assertFalse("Expecting equals() to return false for different array contents.", prev.equals(a1) || a1.equals(prev)); } prev = a1; } }
/** * Indexes a single Key for a Table Entry that begins with the first byte of the given InputStream. * * @param input The InputStream that contains the Table Entry to index. * @param entryOffset The offset within the Segment where this Table Entry begins. * @param keyUpdateCollection A Map where to add the result. * @return The number of bytes processed from the given InputStream. * @throws IOException If an IOException occurred. */ private int indexSingleKey(InputStream input, long entryOffset, KeyUpdateCollection keyUpdateCollection) throws IOException { // Retrieve the next entry, get its Key and hash it. EntrySerializer.Header h = this.connector.getSerializer().readHeader(input); HashedArray key = new HashedArray(StreamHelpers.readAll(input, h.getKeyLength())); // Index the Key. If it was used before, it must have had a lower offset, so this supersedes it. keyUpdateCollection.add(new BucketUpdate.KeyUpdate(key, entryOffset, h.isDeletion()), h.getTotalLength()); // We don't care about the value; so skip over it. if (h.getValueLength() > 0) { IOUtils.skipFully(input, h.getValueLength()); } return h.getTotalLength(); }