@Override public InsertionIds getInsertionIds( final MultiDimensionalNumericData indexedData, final int maxDuplicateInsertionIds) { // return a single empty sort key as the ID return new InsertionIds(null, Collections.singletonList(new byte[0])); }
public FitToIndexPersistenceEncoding( final byte[] dataId, final PersistentDataset<CommonIndexValue> commonData, final PersistentDataset<Object> adapterExtendedData, final byte[] partitionKey, final byte[] sortKey) { super(dataId, commonData, adapterExtendedData); insertionIds = new InsertionIds(partitionKey, sortKey == null ? null : Collections.singletonList(sortKey)); }
@Override public InsertionIds getInsertionIds(final Date indexedData) { return new InsertionIds(Collections.singletonList(toIndexByte(indexedData))); }
@Override public InsertionIds getInsertionIds(final Number indexedData) { return new InsertionIds(null, Collections.singletonList(toIndexByte(indexedData))); }
@Override public InsertionIds getInsertionIds(final String indexedData) { return new InsertionIds(Collections.singletonList(StringUtils.stringToBinary(indexedData))); }
private InsertionIds internalGetInsertionIds( final MultiDimensionalNumericData indexedData, final BigInteger maxDuplicateInsertionIds) { final List<BinnedNumericDataset> ranges = BinnedNumericDataset.applyBins(indexedData, baseDefinitions); // place each of these indices into a single row ID at a tier that will // fit its min and max final Set<SinglePartitionInsertionIds> retVal = new HashSet<>(ranges.size()); for (final BinnedNumericDataset range : ranges) { retVal.add(getRowIds(range, maxDuplicateInsertionIds)); } return new InsertionIds(retVal); }
protected WriteResults internalWrite( final T entry, final Function<Writer<T>, WriteResults> internalWriter) { final Map<String, List<SinglePartitionInsertionIds>> insertionIdsPerIndex = new HashMap<>(); for (final Writer<T> indexWriter : writers) { final WriteResults ids = internalWriter.apply(indexWriter); for (final String indexName : ids.getWrittenIndexNames()) { List<SinglePartitionInsertionIds> partitionInsertionIds = insertionIdsPerIndex.get(indexName); if (partitionInsertionIds == null) { partitionInsertionIds = new ArrayList<>(); insertionIdsPerIndex.put(indexName, partitionInsertionIds); } partitionInsertionIds.addAll(ids.getInsertionIdsWritten(indexName).getPartitionKeys()); } } return new WriteResults(Maps.transformValues(insertionIdsPerIndex, v -> new InsertionIds(v))); }
private WriteResults internalWrite( final T entry, final Function<T, WriteResults> internalWriter) { final Iterator<T> indexedEntries = adapter.convertToIndex(index, entry); final Map<String, List<SinglePartitionInsertionIds>> insertionIdsPerIndex = new HashMap<>(); while (indexedEntries.hasNext()) { final WriteResults ids = internalWriter.apply(indexedEntries.next()); for (final String indexName : ids.getWrittenIndexNames()) { List<SinglePartitionInsertionIds> partitionInsertionIds = insertionIdsPerIndex.get(indexName); if (partitionInsertionIds == null) { partitionInsertionIds = new ArrayList<>(); insertionIdsPerIndex.put(indexName, partitionInsertionIds); } partitionInsertionIds.addAll(ids.getInsertionIdsWritten(indexName).getPartitionKeys()); } } return new WriteResults(Maps.transformValues(insertionIdsPerIndex, v -> new InsertionIds(v))); }
public static InsertionIds keysToInsertionIds(final GeoWaveKey... geoWaveKeys) { final Map<ByteArray, List<byte[]>> sortKeysPerPartition = new HashMap<>(); for (final GeoWaveKey key : geoWaveKeys) { final ByteArray partitionKey = new ByteArray(key.getPartitionKey()); List<byte[]> sortKeys = sortKeysPerPartition.get(partitionKey); if (sortKeys == null) { sortKeys = new ArrayList<>(); sortKeysPerPartition.put(partitionKey, sortKeys); } sortKeys.add(key.getSortKey()); } final Set<SinglePartitionInsertionIds> insertionIds = new HashSet<>(); for (final Entry<ByteArray, List<byte[]>> e : sortKeysPerPartition.entrySet()) { insertionIds.add(new SinglePartitionInsertionIds(e.getKey().getBytes(), e.getValue())); } return new InsertionIds(insertionIds); }
@Override public InsertionIds getInsertionIds( final MultiDimensionalNumericData indexedData, final int maxDuplicateInsertionIds) { // we need to duplicate per bin so we can't adhere to max duplication // anyways final List<BinnedNumericDataset> ranges = BinnedNumericDataset.applyBins(indexedData, baseDefinitions); final Set<SinglePartitionInsertionIds> retVal = new HashSet<>(ranges.size()); for (final BinnedNumericDataset range : ranges) { final SinglePartitionInsertionIds binRowIds = TieredSFCIndexStrategy.getRowIdsAtTier(range, tier, sfc, null, tier); if (binRowIds != null) { retVal.add(binRowIds); } } return new InsertionIds(retVal); }
/** * Returns all of the insertion ids for the range. Since this index strategy doensn't use binning, * it will return the ByteArrayId of every value in the range (i.e. if you are storing a range * using this index strategy, your data will be replicated for every integer value in the range). * * <p> {@inheritDoc} */ @Override public InsertionIds getInsertionIds( final MultiDimensionalNumericData indexedData, final int maxEstimatedDuplicateIds) { final long min = (long) indexedData.getMinValuesPerDimension()[0]; final long max = (long) Math.ceil(indexedData.getMaxValuesPerDimension()[0]); final List<byte[]> insertionIds = new ArrayList<>((int) (max - min) + 1); for (long i = min; i <= max; i++) { insertionIds.add(lexicoder.toByteArray(cast(i))); } return new InsertionIds(insertionIds); }
public InsertionIds reprojectToTier( final byte[] insertId, final Byte reprojectTierId, final BigInteger maxDuplicates) { final MultiDimensionalNumericData originalRange = getRangeForId(insertId, null); final List<BinnedNumericDataset> ranges = BinnedNumericDataset.applyBins(originalRange, baseDefinitions); final int sfcIndex = orderedSfcIndexToTierId.inverse().get(reprojectTierId); final Set<SinglePartitionInsertionIds> retVal = new HashSet<>(ranges.size()); for (final BinnedNumericDataset reprojectRange : ranges) { final SinglePartitionInsertionIds tierIds = TieredSFCIndexStrategy.getRowIdsAtTier( reprojectRange, reprojectTierId, orderedSfcs[sfcIndex], maxDuplicates, sfcIndex); retVal.add(tierIds); } return new InsertionIds(retVal); }
private InsertionIds trimPartitionForSubstrategy(final InsertionIds insertionIds) { final List<SinglePartitionInsertionIds> retVal = new ArrayList<>(); for (final SinglePartitionInsertionIds partitionIds : insertionIds.getPartitionKeys()) { final byte[] trimmedPartitionId = CompoundIndexStrategy.trimPartitionForSubstrategy( partition1Length, index == 0, partitionIds.getPartitionKey()); if (trimmedPartitionId == null) { return insertionIds; } else { retVal.add( new SinglePartitionInsertionIds(trimmedPartitionId, partitionIds.getSortKeys())); } } return new InsertionIds(retVal); }
|| insertionIds.getPartitionKeys().isEmpty()) { if (partitionKeysEmpty) { return new InsertionIds(); } else { return new InsertionIds( Arrays.stream(partitionKeys).map( input -> new SinglePartitionInsertionIds(input)).collect(Collectors.toList())); return new InsertionIds(permutations);
public BaseInsertionIdQuery( final InternalDataAdapter<?> adapter, final Index index, final InsertionIdQuery query, final ScanCallback<T, ?> scanCallback, final DedupeFilter dedupeFilter, final DifferingFieldVisibilityEntryCount differingVisibilityCounts, final FieldVisibilityCount visibilityCounts, final DataIndexRetrieval dataIndexRetrieval, final String[] authorizations) { super( new short[] {adapter.getAdapterId()}, index, query, dedupeFilter, scanCallback, null, null, null, null, differingVisibilityCounts, visibilityCounts, dataIndexRetrieval, authorizations); this.ranges = new InsertionIds( query.getPartitionKey(), Lists.newArrayList(query.getSortKey())).asQueryRanges(); }
return new InsertionIds(partitionIds);