private void emitHeader(String name, String value) throws HpackException { // Header names are forced to lower case if ("cookie".equals(name)) { // Only count the cookie header once since HTTP/2 splits it into // multiple headers to aid compression if (!countedCookie) { headerCount ++; countedCookie = true; } } else { headerCount ++; } // Overhead will vary. The main concern is that lots of small headers // trigger the limiting mechanism correctly. Therefore, use an overhead // estimate of 3 which is the worst case for small headers. int inc = 3 + name.length() + value.length(); headerSize += inc; if (!isHeaderCountExceeded() && !isHeaderSizeExceeded(0)) { headerEmitter.emitHeader(name, value); } }
"http2Parser.headerLimitCount", connectionId, Integer.valueOf(streamId)), Http2Error.ENHANCE_YOUR_CALM, streamId); hpackDecoder.getHeaderEmitter().setHeaderException(headerException); "http2Parser.headerLimitSize", connectionId, Integer.valueOf(streamId)), Http2Error.ENHANCE_YOUR_CALM, streamId); hpackDecoder.getHeaderEmitter().setHeaderException(headerException);
protected void onHeadersComplete(int streamId) throws Http2Exception { // Any left over data is a compression error if (headerReadBuffer.position() > 0) { throw new ConnectionException( sm.getString("http2Parser.processFrameHeaders.decodingDataLeft"), Http2Error.COMPRESSION_ERROR); } // Delay validation (and triggering any exception) until this point // since all the headers still have to be read if a StreamException is // going to be thrown. hpackDecoder.getHeaderEmitter().validateHeaders(); output.headersEnd(streamId); if (headersEndStream) { output.receivedEndOfStream(streamId); headersEndStream = false; } // Reset size for new request if the buffer was previously expanded if (headerReadBuffer.capacity() > Constants.DEFAULT_HEADER_READ_BUFFER_SIZE) { headerReadBuffer = ByteBuffer.allocate(Constants.DEFAULT_HEADER_READ_BUFFER_SIZE); } }