@Override public void append( long[] vs ) { append( vs, 0, vs.length ); }
@Override public void addGap( ) { data.append( -1 ); }
@Override public void addGap( ) { data.append( -1 ); } }
@Override public void addData( DataInputStream in ) throws IOException { data.append( in.readLong( ) ); } }
@Override public void append( LongBuffer vs ) { append( vs, vs.remaining( ) ); }
@Override public void append( Longs vs ) { append( vs, 0, vs.n( ) ); }
@Override public void addData( String token ) throws DsplException { if ( token.isEmpty( ) ) { addGap( ); } else { try { data.append( dateFormat.parseMillis( token ) ); } catch ( IllegalArgumentException e ) { if ( dsplParser.isFailOnErrorMode( ) ) { throw new DsplException( "Problem parsing: %s", e, token ); } else { logWarning( logger, "Trouble parsing date: %s. Adding gap instead.", token ); addGap( ); } } } }
@Override public void addData( String token ) throws DsplException { if ( token.isEmpty( ) ) { addGap( ); } else { try { data.append( parse( token ) ); } catch ( Exception e ) { if ( dsplParser.isFailOnErrorMode( ) ) { throw new DsplException( "Problem parsing: %s", e, token ); } else { logWarning( logger, "Trouble parsing date: %s. Adding gap instead.", token ); addGap( ); } } } }
/** * If {@code x(v)} or {@code y(v)} returns {@code NaN}, this method returns * immediately without adding {@code v} to the tree. */ public void add( long v ) { float x = x( v ); if ( Float.isNaN( x ) ) return; float y = y( v ); if ( Float.isNaN( y ) ) return; LeafNode<Bucket> leaf = leaf( x, y ); Bucket bucket = leaf.bucket; // The default return value for bucket.dupes is set to bucket.singles, // so iff bucket.dupes does not contain xyKey, we will end up appending // to bucket.singles -- which is exactly what we want. // // This is confusing to read, but it avoids double-checking the "contains // key?" condition. The map is already doing the check for us -- not worth // doing the same check again. // long xyKey = xyToKey( x, y ); bucket.dupes.get( xyKey ).append( v ); if ( bucketSize( bucket ) > maxBucketSize ) { compactBucket( bucket ); if ( bucketSize( bucket ) > 0.9 * maxBucketSize ) splitLeaf( leaf ); } }
@Override protected Bucket[] splitBucket( Bucket bucket, float xDivider, float yDivider ) { Bucket[] newBuckets = new Bucket[4]; for ( int q = 0; q < newBuckets.length; q++ ) newBuckets[q] = new Bucket( ); long[] a = bucket.singles.a; int n = bucket.singles.n; for ( int i = 0; i < n; i++ ) { long v = a[i]; int q = quadrant( xDivider, yDivider, x( v ), y( v ) ); newBuckets[q].singles.append( v ); } Long2ObjectOpenHashMap<LongsArray> dupes = bucket.dupes; for ( Entry<LongsArray> en : dupes.long2ObjectEntrySet( ) ) { long xyKey = en.getLongKey( ); LongsArray vs = en.getValue( ); int q = quadrant( xDivider, yDivider, xFromKey( xyKey ), yFromKey( xyKey ) ); newBuckets[q].dupes.put( xyKey, vs ); } return newBuckets; }
@Override protected Bucket[] splitBucket( Bucket bucket, float xDivider, float yDivider ) { Bucket[] newBuckets = new Bucket[4]; for ( int q = 0; q < newBuckets.length; q++ ) newBuckets[q] = new Bucket( ); long[] a = bucket.singles.a; int n = bucket.singles.n; for ( int i = 0; i < n; i += 2 ) { long v1 = a[i]; long v2 = a[i + 1]; int q = quadrant( xDivider, yDivider, x( v1, v2 ), y( v1, v2 ) ); newBuckets[q].singles.append( a, i, i + 2 ); } Long2ObjectOpenHashMap<LongsArray> dupes = bucket.dupes; for ( Entry<LongsArray> en : dupes.long2ObjectEntrySet( ) ) { long xyKey = en.getLongKey( ); LongsArray vs = en.getValue( ); int q = quadrant( xDivider, yDivider, xFromKey( xyKey ), yFromKey( xyKey ) ); newBuckets[q].dupes.put( xyKey, vs ); } return newBuckets; }