/** * Creates a new accumulator. * * @param basename the basename (usually a global filename followed by the field name, * separated by a dash). * @param payload the payload stored by this accumulator. * @param field the name of the accumulated field. * @param indexingType the type of indexing procedure. * @param documentsPerBatch the number of documents in each batch. * @param batchDir a directory for batch files; batch names will be relativised to this * directory if it is not <code>null</code>. */ public PayloadAccumulator( final String basename, final Payload payload, final String field, final IndexingType indexingType, final int documentsPerBatch, final File batchDir ) { this.basename = basename; this.payload = payload; this.field = field; this.indexingType = indexingType; if ( indexingType != IndexingType.STANDARD && indexingType != IndexingType.REMAPPED ) throw new UnsupportedOperationException( "Non-standard payload-based indices support only standard and remapped indexing" ); if ( indexingType == IndexingType.REMAPPED ) position = new long[ documentsPerBatch ]; this.batchDir = batchDir; this.cutPoints = new IntArrayList(); this.cutPoints.add( 0 ); flags = new EnumMap<Component, Coding>( CompressionFlags.DEFAULT_PAYLOAD_INDEX ); accumulatorStream = new FastByteArrayOutputStream(); accumulator = new OutputBitStream( accumulatorStream ); }
int[] bs = new int[ n ]; int[] values = new int[ n ]; FastByteArrayOutputStream oa = new FastByteArrayOutputStream(); OutputBitStream obs = new OutputBitStream( oa, 0 ); oa.trim(); FastByteArrayOutputStream oa = new FastByteArrayOutputStream(); OutputBitStream obs = new OutputBitStream( oa, 0 ); oa.trim(); int[] bs = new int[ n ]; int[] values = new int[ n ]; FastByteArrayOutputStream oa = new FastByteArrayOutputStream(); OutputBitStream obs = new OutputBitStream( oa, 0 ); oa.trim(); int[] bs = new int[ n ]; int[] values = new int[ n ]; FastByteArrayOutputStream oa = new FastByteArrayOutputStream(); OutputBitStream obs = new OutputBitStream( oa, 0 ); oa.trim();
FastByteArrayOutputStream oa = new FastByteArrayOutputStream(); OutputBitStream obs = new OutputBitStream( oa, 0 ); LongArrayList endpoints = new LongArrayList(); while( oa.length() % 4 != 0 ) { oa.write( 0 ); // pad to int for FastInputBitStream oa.trim(); FastByteArrayOutputStream noa = new FastByteArrayOutputStream(); OutputBitStream nobs = new OutputBitStream( noa, 0 ); pl.expectedUpdates = numWords; while( noa.length() % 4 != 0 ) { noa.write( 0 ); // pad to int for FastInputBitStream noa.trim(); endpoints = nendpoints; oa = noa;
accumulatorStream.reset(); accumulator.writtenBits( 0 ); documentCount = 0;
accumulatorStream.reset(); accumulator.writtenBits( 0 ); documentCount = 0;
/** * Creates a new accumulator. * * @param basename the basename (usually a global filename followed by the field name, * separated by a dash). * @param payload the payload stored by this accumulator. * @param field the name of the accumulated field. * @param indexingType the type of indexing procedure. * @param documentsPerBatch the number of documents in each batch. * @param batchDir a directory for batch files; batch names will be relativised to this * directory if it is not <code>null</code>. */ public PayloadAccumulator( final String basename, final Payload payload, final String field, final IndexingType indexingType, final int documentsPerBatch, final File batchDir ) { this.basename = basename; this.payload = payload; this.field = field; this.indexingType = indexingType; if ( indexingType != IndexingType.STANDARD && indexingType != IndexingType.REMAPPED ) throw new UnsupportedOperationException( "Non-standard payload-based indices support only standard and remapped indexing" ); if ( indexingType == IndexingType.REMAPPED ) position = new long[ documentsPerBatch ]; this.batchDir = batchDir; this.cutPoints = new LongArrayList(); this.cutPoints.add( 0 ); flags = new EnumMap<Component, Coding>( CompressionFlags.DEFAULT_PAYLOAD_INDEX ); accumulatorStream = new FastByteArrayOutputStream(); accumulator = new OutputBitStream( accumulatorStream ); }
pl.itemsName = "keys"; final PartialTrie<T> immutableBinaryTrie = new PartialTrie<>(elements, log2BucketSize, transformationStrategy, pl); final FastByteArrayOutputStream fbStream = new FastByteArrayOutputStream(); final OutputBitStream trie = new OutputBitStream(fbStream, 0); pl.start("Converting to bitstream..."); fbStream.trim(); this.trie = fbStream.array;
FastByteArrayOutputStream oa = new FastByteArrayOutputStream(); OutputBitStream obs = new OutputBitStream( oa, 0 ); final LongArrayList endpoints = new LongArrayList(); while( oa.length() % 4 != 0 ) { oa.write( 0 ); // pad to int for FastInputBitStream oa.trim();
accumulatorStream.reset(); accumulator.writtenBits( 0 ); documentCount = 0;
/** * Creates a new accumulator. * * @param ioFactory the factory that will be used to perform I/O. * @param basename the basename (usually a global filename followed by the field name, * separated by a dash). * @param payload the payload stored by this accumulator. * @param field the name of the accumulated field. * @param indexingType the type of indexing procedure. * @param documentsPerBatch the number of documents in each batch. * @param batchDir a directory for batch files; batch names will be relativised to this * directory if it is not {@code null}. */ public PayloadAccumulator( final IOFactory ioFactory, final String basename, final Payload payload, final String field, final IndexingType indexingType, final int documentsPerBatch, final File batchDir ) { this.basename = basename; this.ioFactory = ioFactory; this.payload = payload; this.field = field; this.indexingType = indexingType; if ( indexingType != IndexingType.STANDARD && indexingType != IndexingType.REMAPPED ) throw new UnsupportedOperationException( "Non-standard payload-based indices support only standard and remapped indexing" ); if ( indexingType == IndexingType.REMAPPED ) position = new long[ documentsPerBatch ]; this.batchDir = batchDir; this.cutPoints = new LongArrayList(); this.cutPoints.add( 0 ); flags = new EnumMap<Component, Coding>( CompressionFlags.DEFAULT_PAYLOAD_INDEX ); accumulatorStream = new FastByteArrayOutputStream(); accumulator = new OutputBitStream( accumulatorStream ); }
pl.itemsName = "keys"; final PartialTrie<T> immutableBinaryTrie = new PartialTrie<>(elements, size, bucketSize, transformationStrategy, pl); final FastByteArrayOutputStream fbStream = new FastByteArrayOutputStream(); final OutputBitStream trie = new OutputBitStream(fbStream, 0); pl.expectedUpdates = immutableBinaryTrie.size; fbStream.trim(); this.trie = fbStream.array;
accumulatorStream.reset(); accumulator.writtenBits( 0 ); documentCount = 0;
/** * Creates a new accumulator. * * @param ioFactory the factory that will be used to perform I/O. * @param basename the basename (usually a global filename followed by the field name, * separated by a dash). * @param payload the payload stored by this accumulator. * @param field the name of the accumulated field. * @param indexingType the type of indexing procedure. * @param documentsPerBatch the number of documents in each batch. * @param batchDir a directory for batch files; batch names will be relativised to this * directory if it is not <code>null</code>. */ public PayloadAccumulator( final IOFactory ioFactory, final String basename, final Payload payload, final String field, final IndexingType indexingType, final int documentsPerBatch, final File batchDir ) { this.ioFactory = ioFactory; this.basename = basename; this.payload = payload; this.field = field; this.indexingType = indexingType; if ( indexingType != IndexingType.STANDARD && indexingType != IndexingType.REMAPPED ) throw new UnsupportedOperationException( "Non-standard payload-based indices support only standard and remapped indexing" ); if ( indexingType == IndexingType.REMAPPED ) position = new long[ documentsPerBatch ]; this.batchDir = batchDir; this.cutPoints = new IntArrayList(); this.cutPoints.add( 0 ); flags = new EnumMap<Component, Coding>( CompressionFlags.DEFAULT_PAYLOAD_INDEX ); accumulatorStream = new FastByteArrayOutputStream(); accumulator = new OutputBitStream( accumulatorStream ); }
cachePointer[ i ] = new OutputBitStream( cachePointerByte[ i ] = new FastByteArrayOutputStream(), 0 ); cacheSkip[ i ] = new OutputBitStream( cacheSkipByte[ i ] = new FastByteArrayOutputStream(), 0 ); cacheSkipBitCount[ i ] = new OutputBitStream( NullOutputStream.getInstance(), 0 );
cachePointer[ i ] = new OutputBitStream( cachePointerByte[ i ] = new FastByteArrayOutputStream(), 0 ); cacheSkip[ i ] = new OutputBitStream( cacheSkipByte[ i ] = new FastByteArrayOutputStream(), 0 ); cacheSkipBitCount[ i ] = new OutputBitStream( NullOutputStream.getInstance(), 0 );
cachePointer[ i ] = new OutputBitStream( cachePointerByte[ i ] = new FastByteArrayOutputStream(), 0 ); cacheSkip[ i ] = new OutputBitStream( cacheSkipByte[ i ] = new FastByteArrayOutputStream(), 0 ); cacheSkipBitCount[ i ] = new OutputBitStream( NullOutputStream.getInstance(), 0 );