@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } ReadOnlyRandomAccessBuffer other = (ReadOnlyRandomAccessBuffer) obj; return underlying.equals(other.underlying); }
@Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + raf.hashCode(); result = prime * result + (int) (realSize ^ (realSize >>> 32)); return result; }
rafLength = raf.size(); if(raf.size() < 8 /* FIXME more! */) throw new StorageFormatException("Too short"); raf.pread(rafLength-8, buf, 0, 8); DataInputStream dis = new DataInputStream(new ByteArrayInputStream(buf)); if(dis.readLong() != END_MAGIC) raf.pread(rafLength-12, versionBuf, 0, 4); dis = new DataInputStream(new ByteArrayInputStream(versionBuf)); int version = dis.readInt(); raf.pread(rafLength-14, checksumTypeBuf, 0, 2); dis = new DataInputStream(new ByteArrayInputStream(checksumTypeBuf)); int checksumType = dis.readShort(); raf.pread(rafLength-18, flagsBuf, 0, 4); dis = new DataInputStream(new ByteArrayInputStream(flagsBuf)); int flags = dis.readInt(); raf.pread(rafLength-(22+checksumLength), buf, 0, 4); byte[] checksum = new byte[checksumLength]; raf.pread(rafLength-(18+checksumLength), checksum, 0, checksumLength); System.arraycopy(flagsBuf, 0, buf, 4, 4); System.arraycopy(checksumTypeBuf, 0, buf, 8, 2); dis = new DataInputStream(new ByteArrayInputStream(buf)); int basicSettingsLength = dis.readInt();
/** Migrate from one underlying LockableRandomAccessBuffer to another. */ protected final void migrate() throws IOException { try { lock.writeLock().lock(); if(closed) return; if(underlying == null) throw new IOException("Already freed"); LockableRandomAccessBuffer successor = innerMigrate(underlying); if(successor == null) throw new NullPointerException(); RAFLock newLock = null; if(lockOpenCount > 0) { try { newLock = successor.lockOpen(); } catch (IOException e) { successor.close(); successor.free(); throw e; } } if(lockOpenCount > 0) underlyingLock.unlock(); underlying.close(); underlying.free(); underlying = successor; underlyingLock = newLock; } finally { lock.writeLock().unlock(); } afterFreeUnderlying(); }
void preadChecksummed(long fileOffset, byte[] buf, int offset, int length) throws IOException, ChecksumFailedException { byte[] checksumBuf = new byte[checksumLength]; RAFLock lock = raf.lockOpen(); try { raf.pread(fileOffset, buf, offset, length); raf.pread(fileOffset+length, checksumBuf, 0, checksumLength); } finally { lock.unlock(); } if(!checksumChecker.checkChecksum(buf, offset, length, checksumBuf)) { Arrays.fill(buf, offset, offset+length, (byte)0); throw new ChecksumFailedException(); } }
public void writeSegmentCheckBlock(int segNo, int checkBlockNo, byte[] buf) throws IOException { assert (segNo >= 0 && segNo < segments.length); assert (checkBlockNo >= 0 && checkBlockNo < segments[segNo].checkBlockCount); assert (buf.length == CHKBlock.DATA_LENGTH); long offset = offsetSegmentCheckBlocks[segNo] + checkBlockNo * CHKBlock.DATA_LENGTH; raf.pwrite(offset, buf, 0, buf.length); }
/** * Lock the main RAF open to avoid the pooled fd being closed when we are * doing a major I/O operation involving many reads/writes. */ RAFLock lockRAF() throws IOException { return raf.lockOpen(); }
public TempRandomAccessBuffer(LockableRandomAccessBuffer underlying, long creationTime, boolean migrated, TempBucket tempBucket) throws IOException { super(underlying, underlying.size()); this.creationTime = creationTime; this.hasMigrated = hasFreedRAM = migrated; this.original = tempBucket; if (TRACE_BUCKET_LEAKS) tracer = new Throwable(); else tracer = null; }
/** Status. Generally depends on the status of the individual segments... * Not persisted: Can be deduced from the state of the segments, except for the last 3 states, * which are only used during completion (we don't keep the storage around once we're * finished). */ enum Status { NOT_STARTED, STARTED, ENCODED_CROSS_SEGMENTS, ENCODED, GENERATING_METADATA, SUCCEEDED, FAILED }
raf.onResume(context); originalData.onResume(context); this.storage = new SplitFileInserterStorage(raf, originalData, this, context.fastWeakRandom, context.memoryLimitedJobRunner, context.getJobRunner(true), context.ticker, } catch (IOException e) { Logger.error(this, "Resume failed: "+e, e); raf.close(); raf.free(); originalData.close(); if(freeData) originalData.free(); throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null); } catch (StorageFormatException e) { Logger.error(this, "Resume failed: "+e, e); raf.close(); raf.free(); originalData.close(); if(freeData) originalData.free(); throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null); } catch (ChecksumFailedException e) { Logger.error(this, "Resume failed: "+e, e); raf.close(); raf.free(); originalData.close(); if(freeData) originalData.free(); throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
public byte[] readSegmentCheckBlock(int segNo, int checkBlockNo) throws IOException { assert (segNo >= 0 && segNo < segments.length); assert (checkBlockNo >= 0 && checkBlockNo < segments[segNo].checkBlockCount); byte[] buf = new byte[CHKBlock.DATA_LENGTH]; long offset = offsetSegmentCheckBlocks[segNo] + checkBlockNo * CHKBlock.DATA_LENGTH; raf.pread(offset, buf, 0, buf.length); return buf; }
@Override public void realFree() { underlying.free(); }
byte[] readBlock(SplitFileFetcherSegmentStorage segment, int slotNumber) throws IOException { long offset = segment.blockOffset(slotNumber); if(logDEBUG) Logger.minor(this, "Reading block "+slotNumber+" for "+segment.segNo+"/"+segments.length+" from "+offset+" RAF length is "+raf.size()); byte[] buf = new byte[CHKBlock.DATA_LENGTH]; raf.pread(offset, buf, 0, buf.length); return buf; }
@Override public void close() { if(!isClosed){ isClosed = true; underlyingBuffer.close(); } }
this.random = random; this.raf = raf; rafLength = raf.size(); InputStream ois = new RAFInputStream(raf, 0, rafLength); DataInputStream dis = new DataInputStream(ois); } else { if(!originalData.equals(rafOrig)) throw new StorageFormatException("Original data restored from different filename! Expected "+originalData+" but restored "+rafOrig); this.originalData = originalData; if(dataLength != originalData.size()) throw new ResumeFailedException("Original data size is "+originalData.size()+" should be "+dataLength); if(((dataLength + CHKBlock.DATA_LENGTH - 1) / CHKBlock.DATA_LENGTH) != totalDataBlocks) throw new StorageFormatException("Data blocks "+totalDataBlocks+" not compatible with size "+dataLength);
public boolean writeTrivialProgress(DataOutputStream dos) throws IOException { boolean done = false; synchronized(this) { done = failed || succeeded; } if(done) { dos.writeBoolean(false); return false; } dos.writeBoolean(true); if(callbackCompleteViaTruncation == null) { dos.writeBoolean(false); raf.storeTo(dos); } else { dos.writeBoolean(true); dos.writeUTF(fileCompleteViaTruncation.toString()); dos.writeLong(raf.size()); } dos.writeLong(token); return true; }
try { KeySalter salter = getSalter(); raf.onResume(context); this.storage = new SplitFileFetcherStorage(raf, realTimeFlag, this, blockFetchContext, context.random, context.jobRunner, callbackCompleteViaTruncation != null); } catch (ResumeFailedException e) { raf.free(); Logger.error(this, "Failed to resume storage file: "+e+" for "+raf, e); throw new FetchException(FetchExceptionMode.BUCKET_ERROR, e); } catch (IOException e) { raf.free(); Logger.error(this, "Failed to resume due to I/O error: "+e+" raf = "+raf, e); throw new FetchException(FetchExceptionMode.BUCKET_ERROR, e); } catch (StorageFormatException e) { raf.free(); Logger.error(this, "Failed to resume due to storage error: "+e+" raf = "+raf, e); throw new FetchException(FetchExceptionMode.INTERNAL_ERROR, "Resume failed: "+e, e); } catch (FetchException e) { raf.free(); throw e;
@Override public void storeTo(DataOutputStream dos) throws IOException { dos.writeInt(MAGIC); underlying.storeTo(dos); }
@Override public void onResume(ClientContext context) throws ResumeFailedException { this.factory = context.persistentBucketFactory; underlying.onResume(context); }