/** * Upon construction, this method will be called with the region * to be governed. It will be called once and only once. */ protected void configureForRegion(HRegion region) { Preconditions.checkState( this.region == null, "Policy already configured for region {}", this.region); this.region = region; }
private void preCheck() { Preconditions.checkState(Thread.currentThread() == callerThread, "The current thread is %s, expected thread is %s, " + "you should not call this method outside onNext or onHeartbeat", Thread.currentThread(), callerThread); Preconditions.checkState(state.equals(ScanControllerState.INITIALIZED), "Invalid Stopper state %s", state); }
/** * remove the specified dead server * @param deadServerName the dead server name * @return true if this server was removed */ public synchronized boolean removeDeadServer(final ServerName deadServerName) { Preconditions.checkState(!processingServers.contains(deadServerName), "Asked to remove server still in processingServers set " + deadServerName + " (numProcessing=" + processingServers.size() + ")"); if (deadServers.remove(deadServerName) == null) { return false; } return true; } }
public void closeStreamReaders(boolean evictOnClose) throws IOException { synchronized (this) { for (StoreFileReader entry : streamReaders) { //closing the reader will remove itself from streamReaders thanks to the Listener entry.close(evictOnClose); } int size = streamReaders.size(); Preconditions.checkState(size == 0, "There are still streamReaders post close: " + size); } }
@Override public void close() throws IOException { Preconditions.checkState(buffer.size() == 0, "should call flush first before calling close"); executor.shutdown(); out.close(); }
/** * Transitions the block writer from the "writing" state to the "block * ready" state. Does nothing if a block is already finished. */ void ensureBlockReady() throws IOException { Preconditions.checkState(state != State.INIT, "Unexpected state: " + state); if (state == State.BLOCK_READY) { return; } // This will set state to BLOCK_READY. finishBlock(); }
public void start() { Preconditions.checkState(monitorThread == null, "Already started"); monitorThread = new Thread(new Monitor(), "JvmPauseMonitor"); monitorThread.setDaemon(true); monitorThread.start(); }
/** * Actually claim the memory for this chunk. This should only be called from the thread that * constructed the chunk. It is thread-safe against other threads calling alloc(), who will block * until the allocation is complete. */ public void init() { assert nextFreeOffset.get() == UNINITIALIZED; try { allocateDataBuffer(); } catch (OutOfMemoryError e) { boolean failInit = nextFreeOffset.compareAndSet(UNINITIALIZED, OOM); assert failInit; // should be true. throw e; } // Mark that it's ready for use // Move 4 bytes since the first 4 bytes are having the chunkid in it boolean initted = nextFreeOffset.compareAndSet(UNINITIALIZED, Bytes.SIZEOF_INT); // We should always succeed the above CAS since only one thread // calls init()! Preconditions.checkState(initted, "Multiple threads tried to init same chunk"); }
@Override public byte[][] split(int n) { Preconditions.checkArgument(lastRowInt.compareTo(firstRowInt) > 0, "last row (%s) is configured less than first row (%s)", lastRow, firstRow); // +1 to range because the last row is inclusive BigInteger range = lastRowInt.subtract(firstRowInt).add(BigInteger.ONE); Preconditions.checkState(range.compareTo(BigInteger.valueOf(n)) >= 0, "split granularity (%s) is greater than the range (%s)", n, range); BigInteger[] splits = new BigInteger[n - 1]; BigInteger sizeOfEachSplit = range.divide(BigInteger.valueOf(n)); for (int i = 1; i < n; i++) { // NOTE: this means the last region gets all the slop. // This is not a big deal if we're assuming n << MAXHEX splits[i - 1] = firstRowInt.add(sizeOfEachSplit.multiply(BigInteger .valueOf(i))); } return convertToBytes(splits); }
Preconditions.checkState(!closeAndCleanCompleted);
@Override public byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive) { BigInteger s = convertToBigInteger(start); BigInteger e = convertToBigInteger(end); Preconditions.checkArgument(e.compareTo(s) > 0, "last row (%s) is configured less than first row (%s)", rowToStr(end), end); // +1 to range because the last row is inclusive BigInteger range = e.subtract(s).add(BigInteger.ONE); Preconditions.checkState(range.compareTo(BigInteger.valueOf(numSplits)) >= 0, "split granularity (%s) is greater than the range (%s)", numSplits, range); BigInteger[] splits = new BigInteger[numSplits - 1]; BigInteger sizeOfEachSplit = range.divide(BigInteger.valueOf(numSplits)); for (int i = 1; i < numSplits; i++) { // NOTE: this means the last region gets all the slop. // This is not a big deal if we're assuming n << MAXHEX splits[i - 1] = s.add(sizeOfEachSplit.multiply(BigInteger .valueOf(i))); } if (inclusive) { BigInteger[] inclusiveSplitPoints = new BigInteger[numSplits + 1]; inclusiveSplitPoints[0] = convertToBigInteger(start); inclusiveSplitPoints[numSplits] = convertToBigInteger(end); System.arraycopy(splits, 0, inclusiveSplitPoints, 1, splits.length); return convertToBytes(inclusiveSplitPoints); } else { return convertToBytes(splits); } }
@Override public byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive) { if (Arrays.equals(start, HConstants.EMPTY_BYTE_ARRAY)) { start = firstRowBytes; } if (Arrays.equals(end, HConstants.EMPTY_BYTE_ARRAY)) { end = lastRowBytes; } Preconditions.checkArgument( Bytes.compareTo(end, start) > 0, "last row (%s) is configured less than first row (%s)", Bytes.toStringBinary(end), Bytes.toStringBinary(start)); byte[][] splits = Bytes.split(start, end, true, numSplits - 1); Preconditions.checkState(splits != null, "Could not calculate input splits with given user input: " + this); if (inclusive) { return splits; } else { // remove endpoints, which are included in the splits list return Arrays.copyOfRange(splits, 1, splits.length - 1); } }
@Override public byte[][] split(int numRegions) { Preconditions.checkArgument( Bytes.compareTo(lastRowBytes, firstRowBytes) > 0, "last row (%s) is configured less than first row (%s)", Bytes.toStringBinary(lastRowBytes), Bytes.toStringBinary(firstRowBytes)); byte[][] splits = Bytes.split(firstRowBytes, lastRowBytes, true, numRegions - 1); Preconditions.checkState(splits != null, "Could not split region with given user input: " + this); // remove endpoints, which are included in the splits list return splits == null? null: Arrays.copyOfRange(splits, 1, splits.length - 1); }
Context context) throws IOException { Preconditions.checkState(values != null, "values passed to the map is null");
private void endBlock() throws IOException { Preconditions.checkState(waitingAckQueue.isEmpty(), "should call flush first before calling close"); if (state != State.STREAMING) { throw new IOException("stream already broken"); } state = State.CLOSING; long finalizedLength = ackedBlockLength; PacketHeader header = new PacketHeader(4, finalizedLength, nextPacketSeqno, true, 0, false); buf.release(); buf = null; int headerLen = header.getSerializedSize(); ByteBuf headerBuf = alloc.directBuffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen); CompletableFuture<Long> future = new CompletableFuture<>(); waitingAckQueue.add(new Callback(future, finalizedLength, datanodeList)); datanodeList.forEach(ch -> ch.writeAndFlush(headerBuf.retainedDuplicate())); headerBuf.release(); try { future.get(); } catch (InterruptedException e) { throw (IOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { Throwable cause = e.getCause(); Throwables.propagateIfPossible(cause, IOException.class); throw new IOException(cause); } }
@Override public OutputStream createEncryptionStream(OutputStream out, Context context, byte[] iv) throws IOException { Preconditions.checkNotNull(context); Preconditions.checkState(context.getKey() != null, "Context does not have a key"); Preconditions.checkNotNull(iv); Encryptor e = getEncryptor(); e.setKey(context.getKey()); e.setIv(iv); return e.createEncryptionStream(out); }
@Override public OutputStream createEncryptionStream(OutputStream out, Context context, byte[] iv) throws IOException { Preconditions.checkNotNull(context); Preconditions.checkState(context.getKey() != null, "Context does not have a key"); Preconditions.checkNotNull(iv); Encryptor e = getEncryptor(); e.setKey(context.getKey()); e.setIv(iv); return e.createEncryptionStream(out); }
@Override public InputStream createDecryptionStream(InputStream in, Context context, byte[] iv) throws IOException { Preconditions.checkNotNull(context); Preconditions.checkState(context.getKey() != null, "Context does not have a key"); Preconditions.checkNotNull(iv); Decryptor d = getDecryptor(); d.setKey(context.getKey()); d.setIv(iv); return d.createDecryptionStream(in); }
@Override public InputStream createDecryptionStream(InputStream in, Context context, byte[] iv) throws IOException { Preconditions.checkNotNull(context); Preconditions.checkState(context.getKey() != null, "Context does not have a key"); Preconditions.checkNotNull(iv); Decryptor d = getDecryptor(); d.setKey(context.getKey()); d.setIv(iv); return d.createDecryptionStream(in); }
Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); Optional<Cell> lk = reader.getLastKey(); Preconditions.checkState(lk.isPresent(), "Last key can not be null"); byte[] lastKey = CellUtil.cloneRow(lk.get());