public int getInt( final Enum<?> key, int arg ) { return getInt( key.name().toLowerCase(), arg ); }
public int getInt( final Enum<?> key ) { return getInt( key.name().toLowerCase() ); }
public int getInt( final Enum<?> key ) { return getInt( key.name().toLowerCase() ); }
public int getInt(final Enum<?> key) { return getInt(key.name().toLowerCase()); }
public int getInt(final Enum<?> key, int arg) { return getInt(key.name().toLowerCase(), arg); }
public int getInt( final Enum<?> key, int arg ) { return getInt( key.name().toLowerCase(), arg ); }
final int numberOfDocuments = properties.getInt( Index.PropertyKeys.DOCUMENTS ); final int numberOfTerms = properties.getInt( Index.PropertyKeys.TERMS ); final long numberOfPostings= properties.getLong( Index.PropertyKeys.POSTINGS ); final long numberOfOccurrences = properties.getLong( Index.PropertyKeys.OCCURRENCES, -1 ); final int maxCount = properties.getInt( Index.PropertyKeys.MAXCOUNT, -1 ); final String field = properties.getString( Index.PropertyKeys.FIELD, new File( basename.toString() ).getName() ); final int quantum = properties.getInt( BitStreamIndex.PropertyKeys.SKIPQUANTUM, -1 ); final int height = properties.getInt( BitStreamIndex.PropertyKeys.SKIPHEIGHT, -1 ); final int bufferSize = properties.getInt( BitStreamIndex.PropertyKeys.BUFFERSIZE, BitStreamIndex.DEFAULT_BUFFER_SIZE );
final long numberOfPostings= properties.getLong( Index.PropertyKeys.POSTINGS ); final long numberOfOccurrences = properties.getLong( Index.PropertyKeys.OCCURRENCES, -1 ); final int maxCount = properties.getInt( Index.PropertyKeys.MAXCOUNT, -1 ); final String field = properties.getString( Index.PropertyKeys.FIELD, new File( basename.toString() ).getName() ); final int skipQuantum = properties.getInt( BitStreamIndex.PropertyKeys.SKIPQUANTUM, -1 ); final int bufferSize = properties.getInt( BitStreamIndex.PropertyKeys.BUFFERSIZE, BitStreamIndex.DEFAULT_BUFFER_SIZE ); final int offsetStep = queryProperties != null && queryProperties.get( UriKeys.OFFSETSTEP ) != null ? Integer.parseInt( queryProperties.get( UriKeys.OFFSETSTEP ) ) : DEFAULT_OFFSET_STEP; final int height = properties.getInt( BitStreamIndex.PropertyKeys.SKIPHEIGHT, -1 );
final int numberOfDocuments = properties.getInt( Index.PropertyKeys.DOCUMENTS ); final int numberOfTerms = properties.getInt( Index.PropertyKeys.TERMS ); final long numberOfPostings= properties.getLong( Index.PropertyKeys.POSTINGS ); final long numberOfOccurrences = properties.getLong( Index.PropertyKeys.OCCURRENCES, -1 ); final int maxCount = properties.getInt( Index.PropertyKeys.MAXCOUNT, -1 ); final String field = properties.getString( Index.PropertyKeys.FIELD, new File( basename.toString() ).getName() ); final int skipQuantum = properties.getInt( BitStreamIndex.PropertyKeys.SKIPQUANTUM, -1 ); final int bufferSize = properties.getInt( BitStreamIndex.PropertyKeys.BUFFERSIZE, BitStreamIndex.DEFAULT_BUFFER_SIZE ); final int offsetStep = queryProperties != null && queryProperties.get( UriKeys.OFFSETSTEP ) != null ? Integer.parseInt( queryProperties.get( UriKeys.OFFSETSTEP ) ) : DEFAULT_OFFSET_STEP; final int height = properties.getInt( BitStreamIndex.PropertyKeys.SKIPHEIGHT, -1 );
final int numberOfDocuments = properties.getInt( Index.PropertyKeys.DOCUMENTS ); final IntBigList sizes = queryProperties != null && queryProperties.containsKey( Index.UriKeys.SIZES ) ? DiskBasedIndex.readSizes( queryProperties.get( Index.UriKeys.SIZES ), numberOfDocuments ) : null; return new LexicalCluster( localIndex, (LexicalClusteringStrategy)strategy, termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions, Index.getTermProcessor( properties ), termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions, Index.getTermProcessor( properties ), termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions,
final int numberOfDocuments = properties.getInt( Index.PropertyKeys.DOCUMENTS ); final IntList sizes = queryProperties != null && queryProperties.containsKey( Index.UriKeys.SIZES ) ? DiskBasedIndex.readSizes( queryProperties.get( Index.UriKeys.SIZES ), numberOfDocuments ) : null; return new LexicalCluster( localIndex, (LexicalClusteringStrategy)strategy, termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions, Index.getTermProcessor( properties ), termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions, Index.getTermProcessor( properties ), termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions,
final int numberOfDocuments = properties.getInt( Index.PropertyKeys.DOCUMENTS ); final IntList sizes = queryProperties != null && queryProperties.containsKey( Index.UriKeys.SIZES ) ? DiskBasedIndex.readSizes( queryProperties.get( Index.UriKeys.SIZES ), numberOfDocuments ) : null; return new LexicalCluster( localIndex, (LexicalClusteringStrategy)strategy, termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions, Index.getTermProcessor( properties ), termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions, Index.getTermProcessor( properties ), termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions,
final int numberOfDocuments = properties.getInt( Index.PropertyKeys.DOCUMENTS ); final IntBigList sizes = queryProperties != null && queryProperties.containsKey( Index.UriKeys.SIZES ) ? DiskBasedIndex.readSizes( queryProperties.get( Index.UriKeys.SIZES ), numberOfDocuments ) : null; return new LexicalCluster( localIndex, (LexicalClusteringStrategy)strategy, termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions, Index.getTermProcessor( properties ), termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions, Index.getTermProcessor( properties ), termFilter, numberOfDocuments, properties.getInt( Index.PropertyKeys.TERMS ), properties.getLong( Index.PropertyKeys.POSTINGS ), properties.getLong( Index.PropertyKeys.OCCURRENCES ), properties.getInt( Index.PropertyKeys.MAXCOUNT ), payload, hasCounts, hasPositions,
final long numberOfPostings= properties.getLong( Index.PropertyKeys.POSTINGS ); final long numberOfOccurrences = properties.getLong( Index.PropertyKeys.OCCURRENCES, -1 ); final int maxCount = properties.getInt( Index.PropertyKeys.MAXCOUNT, -1 ); final String field = properties.getString( Index.PropertyKeys.FIELD, new File( basename.toString() ).getName() ); final int quantum = properties.getInt( BitStreamIndex.PropertyKeys.SKIPQUANTUM, -1 ); final int height = properties.getInt( BitStreamIndex.PropertyKeys.SKIPHEIGHT, -1 ); final int bufferSize = properties.getInt( BitStreamIndex.PropertyKeys.BUFFERSIZE, BitStreamIndex.DEFAULT_BUFFER_SIZE );
final int batches; if ( factory.fieldType( indexedField[ i ] ) == DocumentFactory.FieldType.VIRTUAL ) { batches = new Properties( new File( batchDir, basenameField[ i ] ) + DiskBasedIndex.PROPERTIES_EXTENSION ).getInt( Index.PropertyKeys.BATCHES ); final String[] inputBasename = new String[ batches ]; for( int j = 0; j < inputBasename.length; j++ ) inputBasename[ j ] = Scan.batchBasename( j, basenameField[ i ], batchDir );
final int batches; if ( factory.fieldType( indexedField[ i ] ) == DocumentFactory.FieldType.VIRTUAL ) { batches = IOFactories.loadProperties( ioFactory, basenameField[ i ] + DiskBasedIndex.PROPERTIES_EXTENSION ).getInt( Index.PropertyKeys.BATCHES ); final String[] inputBasename = new String[ batches ]; for( int j = 0; j < inputBasename.length; j++ ) inputBasename[ j ] = Scan.batchBasename( j, basenameField[ i ], batchDir );
final int batches; if ( factory.fieldType( indexedField[ i ] ) == DocumentFactory.FieldType.VIRTUAL ) { batches = IOFactories.loadProperties( ioFactory, basenameField[ i ] + DiskBasedIndex.PROPERTIES_EXTENSION ).getInt( Index.PropertyKeys.BATCHES ); final String[] inputBasename = new String[ batches ]; for( int j = 0; j < inputBasename.length; j++ ) inputBasename[ j ] = Scan.batchBasename( j, basenameField[ i ], batchDir );
final int batches; if ( factory.fieldType( indexedField[ i ] ) == DocumentFactory.FieldType.VIRTUAL ) { batches = new Properties( basenameField[ i ] + DiskBasedIndex.PROPERTIES_EXTENSION ).getInt( Index.PropertyKeys.BATCHES ); final String[] inputBasename = new String[ batches ]; for( int j = 0; j < inputBasename.length; j++ ) inputBasename[ j ] = Scan.batchBasename( j, basenameField[ i ], batchDir );
globalProperties.setProperty( DocumentalCluster.PropertyKeys.BLOOM, bloomFilterPrecision != 0 ); globalProperties.setProperty( DocumentalCluster.PropertyKeys.FLAT, inputProperties.getInt( Index.PropertyKeys.TERMS ) <= 1 ); globalProperties.setProperty( Index.PropertyKeys.MAXCOUNT, inputProperties.getProperty( Index.PropertyKeys.MAXCOUNT ) ); globalProperties.setProperty( Index.PropertyKeys.MAXDOCSIZE, inputProperties.getProperty( Index.PropertyKeys.MAXDOCSIZE ) );
globalProperties.setProperty( DocumentalCluster.PropertyKeys.BLOOM, bloomFilterPrecision != 0 ); globalProperties.setProperty( DocumentalCluster.PropertyKeys.FLAT, inputProperties.getInt( Index.PropertyKeys.TERMS ) <= 1 ); globalProperties.setProperty( Index.PropertyKeys.MAXCOUNT, inputProperties.getProperty( Index.PropertyKeys.MAXCOUNT ) ); globalProperties.setProperty( Index.PropertyKeys.MAXDOCSIZE, inputProperties.getProperty( Index.PropertyKeys.MAXDOCSIZE ) );