From e2e2f37a8c52a262d5891acafc6031bbd8f71332 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Sat, 26 Oct 2019 21:15:26 -0700 Subject: [PATCH 01/14] begin integrating llhttp Signed-off-by: Derek Argueta --- bazel/external/llhttp.BUILD | 13 +++ bazel/repositories.bzl | 11 ++ bazel/repository_locations.bzl | 5 + source/common/http/http1/BUILD | 16 ++- source/common/http/http1/codec_impl.cc | 136 ++++++++++++++++------ source/common/http/http1/codec_impl.h | 11 +- source/common/http/http1/comparison.cc | 134 +++++++++++++++++++++ test/common/http/http1/codec_impl_test.cc | 74 +++++++----- 8 files changed, 325 insertions(+), 75 deletions(-) create mode 100644 bazel/external/llhttp.BUILD create mode 100644 source/common/http/http1/comparison.cc diff --git a/bazel/external/llhttp.BUILD b/bazel/external/llhttp.BUILD new file mode 100644 index 000000000000..c125bce7baf0 --- /dev/null +++ b/bazel/external/llhttp.BUILD @@ -0,0 +1,13 @@ +licenses(["notice"]) # Apache 2 + +cc_library( + name = "llhttp", + srcs = [ + "src/api.c", + "src/http.c", + "src/llhttp.c", + ], + hdrs = ["include/llhttp.h"], + includes = ["include"], + visibility = ["//visibility:public"], +) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 29221fc968dc..b03ad4b0d5ab 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -135,6 +135,7 @@ def envoy_dependencies(skip_targets = []): _com_github_nanopb_nanopb() _com_github_nghttp2_nghttp2() _com_github_nodejs_http_parser() + _com_github_nodejs_llhttp() _com_github_tencent_rapidjson() _com_google_absl() _com_google_googletest() @@ -410,6 +411,16 @@ def _com_github_nodejs_http_parser(): actual = "@com_github_nodejs_http_parser//:http_parser", ) +def _com_github_nodejs_llhttp(): + _repository_impl( + name = "com_github_nodejs_llhttp", + build_file = "@envoy//bazel/external:llhttp.BUILD", + ) + native.bind( + name = "llhttp", + actual = "@com_github_nodejs_llhttp//:llhttp", + ) + def _com_google_googletest(): _repository_impl("com_google_googletest") native.bind( diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 441f9a5ff565..1e1815598700 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -184,6 +184,11 @@ REPOSITORY_LOCATIONS = dict( strip_prefix = "http-parser-2.9.0", urls = ["https://github.com/nodejs/http-parser/archive/v2.9.0.tar.gz"], ), + com_github_nodejs_llhttp = dict( + sha256 = "579a81e1e56088470a121e308127f205e22b8988b944b5f36b2a11c1724590d4", + strip_prefix = "llhttp-release-v1.1.1", + urls = ["https://github.com/nodejs/llhttp/archive/release/v1.1.1.tar.gz"], + ), com_github_pallets_jinja = dict( sha256 = "db49236731373e4f3118af880eb91bb0aa6978bc0cf8b35760f6a026f1a9ffc4", strip_prefix = "jinja-2.10.3", diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index aca8bd1d8d0e..db0f7a7857b0 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_binary", "envoy_cc_library", "envoy_package", ) @@ -18,7 +19,10 @@ envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], hdrs = ["codec_impl.h"], - external_deps = ["http_parser"], + external_deps = [ + "http_parser", + "llhttp" + ], deps = [ "//include/envoy/buffer:buffer_interface", "//include/envoy/http:codec_interface", @@ -71,3 +75,13 @@ envoy_cc_library( "//source/common/upstream:upstream_lib", ], ) + + +envoy_cc_binary( + name = "comparison", + srcs = ["comparison.cc"], + external_deps = [ + "http_parser", + "llhttp" + ], +) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 900776e18dfe..3b48892266ec 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -1,6 +1,7 @@ #include "common/http/http1/codec_impl.h" #include +#include #include #include @@ -256,8 +257,8 @@ void StreamEncoderImpl::readDisable(bool disable) { connection_.readDisable(disa uint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); } -static const char RESPONSE_PREFIX[] = "HTTP/1.1 "; -static const char HTTP_10_RESPONSE_PREFIX[] = "HTTP/1.0 "; +static constexpr char RESPONSE_PREFIX[] = "HTTP/1.1 "; +static constexpr char HTTP_10_RESPONSE_PREFIX[] = "HTTP/1.0 "; void ResponseStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_stream) { started_response_ = true; @@ -291,7 +292,7 @@ void ResponseStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end StreamEncoderImpl::encodeHeaders(headers, end_stream); } -static const char REQUEST_POSTFIX[] = " HTTP/1.1\r\n"; +static constexpr char REQUEST_POSTFIX[] = " HTTP/1.1\r\n"; void RequestStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_stream) { const HeaderEntry* method = headers.Method(); @@ -312,32 +313,39 @@ void RequestStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_ StreamEncoderImpl::encodeHeaders(headers, end_stream); } -http_parser_settings ConnectionImpl::settings_{ - [](http_parser* parser) -> int { +llhttp_settings_s ConnectionImpl::settings_{ + [](llhttp_t* parser) -> int { + std::cout << "on_message_begin called" << std::endl; static_cast(parser->data)->onMessageBeginBase(); return 0; }, - [](http_parser* parser, const char* at, size_t length) -> int { + [](llhttp_t* parser, const char* at, size_t length) -> int { + std::cout << "on_url called" << std::endl; static_cast(parser->data)->onUrl(at, length); return 0; }, nullptr, // on_status - [](http_parser* parser, const char* at, size_t length) -> int { + [](llhttp_t* parser, const char* at, size_t length) -> int { + std::cout << "on_header_field called" << std::endl; static_cast(parser->data)->onHeaderField(at, length); return 0; }, - [](http_parser* parser, const char* at, size_t length) -> int { + [](llhttp_t* parser, const char* at, size_t length) -> int { + std::cout << "on_header_value called" << std::endl; static_cast(parser->data)->onHeaderValue(at, length); return 0; }, - [](http_parser* parser) -> int { + [](llhttp_t* parser) -> int { + std::cout << "on_headers_complete called" << std::endl; return static_cast(parser->data)->onHeadersCompleteBase(); }, - [](http_parser* parser, const char* at, size_t length) -> int { + [](llhttp_t* parser, const char* at, size_t length) -> int { + std::cout << "on_body called" << std::endl; static_cast(parser->data)->onBody(at, length); return 0; }, - [](http_parser* parser) -> int { + [](llhttp_t* parser) -> int { + std::cout << "on_message_complete called" << std::endl; static_cast(parser->data)->onMessageCompleteBase(); return 0; }, @@ -351,7 +359,7 @@ const ToLowerTable& ConnectionImpl::toLowerTable() { } ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, - http_parser_type type, uint32_t max_headers_kb, + llhttp_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, HeaderKeyFormatterPtr&& header_key_formatter) : connection_(connection), stats_{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(stats, "http1."))}, @@ -362,7 +370,7 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& st strict_header_validation_( Runtime::runtimeFeatureEnabled("envoy.reloadable_features.strict_header_validation")) { output_buffer_.setWatermarks(connection.bufferLimit()); - http_parser_init(&parser_, type); + llhttp_init(&parser_, type, &settings_); parser_.data = this; } @@ -413,14 +421,15 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { } // Always unpause before dispatch. - http_parser_pause(&parser_, 0); + llhttp_resume(&parser_); ssize_t total_parsed = 0; if (data.length() > 0) { - uint64_t num_slices = data.getRawSlices(nullptr, 0); + const uint64_t num_slices = data.getRawSlices(nullptr, 0); STACK_ARRAY(slices, Buffer::RawSlice, num_slices); data.getRawSlices(slices.begin(), num_slices); for (const Buffer::RawSlice& slice : slices) { + ENVOY_CONN_LOG(trace, "dispatching a slice of {} length", connection_, slice.len_); total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); } } else { @@ -436,17 +445,65 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { } size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { - ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len); - if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { + /*const auto rc = */llhttp_execute(&parser_, slice, len); + + // If llhttp ran into an error, llhttp_get_error_pos will return a char* to where it + // left off, allowing us to calculate how many bytes were read. Otherwise, we assume + // the entire message was parsed. + const char* error_pos = llhttp_get_error_pos(&parser_); + if (llhttp_get_errno(&parser_) == HPE_PAUSED_UPGRADE) { + ENVOY_CONN_LOG(trace, "resuming llhttp after upgrade", connection_); + llhttp_resume_after_upgrade(&parser_); + } + + ENVOY_CONN_LOG(trace, "len received: {}", connection_, len); + + size_t return_val; + if (error_pos != nullptr) { + const auto error_pos_len = strlen(error_pos); + // There's some weirdness where after the buffer has been static_cast'd to a const char*, there's + // possibly some extra stray noise in the char* that makes strlen inaccurate. Since we know we can + // only parse at most `len`, if the error_pos len is greater than the slice len, we assume to truncate + // it.... I'm not yet sure if this is correct.... + if (len > strlen(error_pos)) { + return_val = len; + } else { + return_val = len - error_pos_len; + } + ENVOY_CONN_LOG(trace, "non-null error_pos, setting return_val to {}", connection_, return_val); + } else { + return_val = len; + ENVOY_CONN_LOG(trace, "null error_pos, setting return_val to {}", connection_, return_val); + } + + // std::cout << "_index: " << parser_._index << std::endl; + // std::cout << "error: " << parser_.error << std::endl; + // std::cout << "content_length: " << parser_.content_length << std::endl; + // std::cout << "type: " << parser_.type << std::endl; + // std::cout << "method: " << parser_.method << std::endl; + // std::cout << "http_major: " << parser_.http_major << std::endl; + // std::cout << "http_minor: " << parser_.http_minor << std::endl; + // std::cout << "header_state: " << parser_.header_state << std::endl; + // std::cout << "flags: " << parser_.flags << std::endl; + // std::cout << "upgrade: " << parser_.flags << std::endl; + // std::cout << "status_code: " << parser_.status_code << std::endl; + // std::cout << "finish: " << parser_.finish << std::endl; + + if (llhttp_get_errno(&parser_) != HPE_OK && llhttp_get_errno(&parser_) != HPE_PAUSED) { sendProtocolError(); + std::cout << llhttp_get_error_reason(&parser_) << std::endl; throw CodecProtocolException("http/1.1 protocol error: " + - std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); + std::string(llhttp_errno_name(llhttp_get_errno(&parser_)))); } - return rc; + // TODO(dereka) not sure if this is right, but presumably if rc == HPE_OK then the whole + // thing was read? + // ASSERT(rc == HPE_OK); + return return_val; } void ConnectionImpl::onHeaderField(const char* data, size_t length) { + ENVOY_CONN_LOG(trace, "ConnectionImpl::onHeaderField", connection_); if (header_parsing_state_ == HeaderParsingState::Done) { // Ignore trailers. return; @@ -460,6 +517,7 @@ void ConnectionImpl::onHeaderField(const char* data, size_t length) { } void ConnectionImpl::onHeaderValue(const char* data, size_t length) { + ENVOY_CONN_LOG(trace, "ConnectionImpl::onHeaderValue", connection_); if (header_parsing_state_ == HeaderParsingState::Done) { // Ignore trailers. return; @@ -472,14 +530,9 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; sendProtocolError(); + std::cout << "throwing because !Http::HeaderUtility::headerIsValid(header_value)" << std::endl; throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); } - } else if (header_value.find('\0') != absl::string_view::npos) { - // http-parser should filter for this - // (https://tools.ietf.org/html/rfc7230#section-3.2.6), but it doesn't today. HeaderStrings - // have an invariant that they must not contain embedded zero characters - // (NUL, ASCII 0x0). - throw CodecProtocolException("http/1.1 protocol error: header value contains NUL"); } header_parsing_state_ = HeaderParsingState::Value; @@ -493,12 +546,13 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { error_code_ = Http::Code::RequestHeaderFieldsTooLarge; sendProtocolError(); + std::cout << "throwing exception because total > (max_headers_kb_ * 1024)" << std::endl; throw CodecProtocolException("headers size exceeds limit"); } } int ConnectionImpl::onHeadersCompleteBase() { - ENVOY_CONN_LOG(trace, "headers complete", connection_); + ENVOY_CONN_LOG(trace, "ConnectionImpl::onHeadersCompleteBase", connection_); completeLastHeader(); // Validate that the completed HeaderMap's cached byte size exists and is correct. // This assert iterates over the HeaderMap. @@ -519,7 +573,7 @@ int ConnectionImpl::onHeadersCompleteBase() { current_header_map_->removeUpgrade(); if (current_header_map_->Connection()) { const auto& tokens_to_remove = caseUnorderdSetContainingUpgradeAndHttp2Settings(); - std::string new_value = StringUtil::removeTokens( + const std::string new_value = StringUtil::removeTokens( current_header_map_->Connection()->value().getStringView(), ",", tokens_to_remove, ","); if (new_value.empty()) { current_header_map_->removeConnection(); @@ -534,29 +588,32 @@ int ConnectionImpl::onHeadersCompleteBase() { } } - int rc = onHeadersComplete(std::move(current_header_map_)); + seen_content_length_ = current_header_map_->ContentLength() != nullptr; + + const int rc = onHeadersComplete(std::move(current_header_map_)); current_header_map_.reset(); header_parsing_state_ = HeaderParsingState::Done; - // Returning 2 informs http_parser to not expect a body or further data on this connection. + // Returning 2 informs llhttp to not expect a body or further data on this connection. + std::cout << "return of onHeadersCompleteBase: " << (handling_upgrade_ ? 2 : rc) << std::endl; return handling_upgrade_ ? 2 : rc; } void ConnectionImpl::onMessageCompleteBase() { - ENVOY_CONN_LOG(trace, "message complete", connection_); + ENVOY_CONN_LOG(trace, "ConnectionImpl::onMessageCompleteBase", connection_); if (handling_upgrade_) { // If this is an upgrade request, swallow the onMessageComplete. The // upgrade payload will be treated as stream body. ASSERT(!deferred_end_stream_headers_); ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); - http_parser_pause(&parser_, 1); + llhttp_pause(&parser_); return; } onMessageComplete(); } void ConnectionImpl::onMessageBeginBase() { - ENVOY_CONN_LOG(trace, "message begin", connection_); + ENVOY_CONN_LOG(trace, "ConnectionImpl::onMessageBeginBase", connection_); // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable // in onHeadersCompleteBase @@ -619,6 +676,7 @@ void ServerConnectionImpl::handlePath(HeaderMapImpl& headers, unsigned int metho Utility::Url absolute_url; if (!absolute_url.initialize(active_request_->request_url_.getStringView())) { sendProtocolError(); + std::cout << "throwing because !absolute_url.initialize(active_request_->request_url_.getStringView())" << std::endl; throw CodecProtocolException("http/1.1 protocol error: invalid url in request line"); } // RFC7230#5.7 @@ -639,13 +697,13 @@ int ServerConnectionImpl::onHeadersComplete(HeaderMapImplPtr&& headers) { // to disconnect the connection but we shouldn't fire any more events since it doesn't make // sense. if (active_request_) { - const char* method_string = http_method_str(static_cast(parser_.method)); + const char* method_string = llhttp_method_name(static_cast(parser_.method)); // Inform the response encoder about any HEAD method, so it can set content // length and transfer encoding headers correctly. active_request_->response_encoder_.isResponseToHeadRequest(parser_.method == HTTP_HEAD); - // Currently, CONNECT is not supported, however; http_parser_parse_url needs to know about + // Currently, CONNECT is not supported, however; llhttp_parse_url needs to know about // CONNECT handlePath(*headers, parser_.method); ASSERT(active_request_->request_url_.empty()); @@ -665,7 +723,7 @@ int ServerConnectionImpl::onHeadersComplete(HeaderMapImplPtr&& headers) { // If the connection has been closed (or is closing) after decoding headers, pause the parser // so we return control to the caller. if (connection_.state() != Network::Connection::State::Open) { - http_parser_pause(&parser_, 1); + llhttp_pause(&parser_); } } else { @@ -716,7 +774,7 @@ void ServerConnectionImpl::onMessageComplete() { // Always pause the parser so that the calling code can process 1 request at a time and apply // back pressure. However this means that the calling code needs to detect if there is more data // in the buffer and dispatch it again. - http_parser_pause(&parser_, 1); + llhttp_pause(&parser_); } void ServerConnectionImpl::onResetStream(StreamResetReason reason) { @@ -759,7 +817,7 @@ ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, Stat bool ClientConnectionImpl::cannotHaveBody() { if ((!pending_responses_.empty() && pending_responses_.front().head_request_) || parser_.status_code == 204 || parser_.status_code == 304 || - (parser_.status_code >= 200 && parser_.content_length == 0)) { + (parser_.status_code >= 200 && (seen_content_length_ && parser_.content_length == 0))) { return true; } else { return false; @@ -807,7 +865,7 @@ int ClientConnectionImpl::onHeadersComplete(HeaderMapImplPtr&& headers) { } } - // Here we deal with cases where the response cannot have a body, but http_parser does not deal + // Here we deal with cases where the response cannot have a body, but llhttp does not deal // with it for us. return cannotHaveBody() ? 1 : 0; } @@ -817,6 +875,7 @@ void ClientConnectionImpl::onBody(const char* data, size_t length) { if (!pending_responses_.empty()) { Buffer::OwnedImpl buffer; buffer.add(data, length); + std::cout << buffer.toString() << std::endl; pending_responses_.front().decoder_->decodeData(buffer, false); } } @@ -848,6 +907,7 @@ void ClientConnectionImpl::onMessageComplete() { response.decoder_->decodeHeaders(std::move(deferred_end_stream_headers_), true); deferred_end_stream_headers_.reset(); } else { + // TODO(dereka) this isn't getting hit with empty buffer b/c parser is not calling this callback Buffer::OwnedImpl buffer; response.decoder_->decodeData(buffer, true); } diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 8b00bdc6358c..7f38346cb954 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -193,7 +193,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable +#include + +namespace llhttp { +#include + +void parseWithLlhttp(const std::string& message) { + std::cout << "parsing with llhttp" << std::endl; + llhttp_t parser; + llhttp_settings_t settings; + + /* Initialize user callbacks and settings */ + llhttp_settings_init(&settings); + + /* Set user callback */ + settings.on_message_complete = [](llhttp_t*) -> int { + std::cout << "callback called" << std::endl; + return 0; + }; + + settings.on_url = [](llhttp_t*, const char*, size_t) -> int { + std::cout << "on_url called" << std::endl; + return 0; + }; + + settings.on_header_field = [](llhttp_t*, const char* at, size_t) -> int { + std::cout << "found header field: " << at << std::endl; + return 0; + }; + + settings.on_header_value = [](llhttp_t*, const char* at, size_t) -> int { + std::cout << "found header value: " << at << std::endl; + return 0; + }; + + settings.on_headers_complete = [](llhttp_t* parser) -> int { + std::cout << "headers_complete called, status: " << parser->status_code << std::endl; + return 1; + }; + + settings.on_body = [](llhttp_t*, const char* at, size_t) -> int { + std::cout << "body: " << at << std::endl; + return 0; + }; + + /* Initialize the parser in HTTP_BOTH mode, meaning that it will select between + * HTTP_REQUEST and HTTP_RESPONSE parsing automatically while reading the first + * input. + */ + llhttp_init(&parser, HTTP_RESPONSE, &settings); + + /* Parse request! */ + const int message_len = message.size(); + + llhttp_resume(&parser); + + enum llhttp_errno err = llhttp_execute(&parser, message.c_str(), message_len); + std::cerr << "Parse error 1: " << llhttp_errno_name(err) << std::endl;; + std::cerr << "Parse error 2: " << llhttp_errno_name(llhttp_get_errno(&parser)) << std::endl;; +} + +} // namespace llhttp + +namespace http_parser { + +#include + +void parseWithHttpParser(const std::string& message) { + std::cout << "parsing with http-parser" << std::endl; + http_parser parser; + http_parser_settings settings; + + /* Initialize user callbacks and settings */ + http_parser_settings_init(&settings); + + /* Set user callback */ + settings.on_message_complete = [](http_parser*) -> int { + std::cout << "callback called" << std::endl; + return 0; + }; + + settings.on_url = [](http_parser*, const char*, size_t) -> int { + std::cout << "on_url called" << std::endl; + return 0; + }; + + settings.on_header_field = [](http_parser*, const char* at, size_t) -> int { + std::cout << "found header field: " << at << std::endl; + return 0; + }; + + settings.on_header_value = [](http_parser*, const char* at, size_t) -> int { + std::cout << "found header value: " << at << std::endl; + return 0; + }; + + settings.on_headers_complete = [](http_parser* parser) -> int { + std::cout << "headers_complete called, status: " << parser->status_code << std::endl; + return 0; + }; + + settings.on_body = [](http_parser*, const char* at, size_t) -> int { + std::cout << "body: " << at << std::endl; + return 0; + }; + + /* Initialize the parser in HTTP_BOTH mode, meaning that it will select between + * HTTP_REQUEST and HTTP_RESPONSE parsing automatically while reading the first + * input. + */ + http_parser_init(&parser, HTTP_RESPONSE); + + /* Parse request! */ + const int message_len = message.size(); + + auto bytes = http_parser_execute(&parser, &settings, message.c_str(), message_len); + std::cout << "bytes parsed: " << bytes << std::endl; + std::cerr << "Parse error: " << std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser))) << std::endl;; +} +} + +int main(int, char**) { + llhttp::parseWithLlhttp("HTTP/1.1 200 OK\r\n\r\nHello World"); + http_parser::parseWithHttpParser("HTTP/1.1 200 OK\r\n\r\nHello World"); + + std::cout << "================================================================" << std::endl; + + llhttp::parseWithLlhttp("HTTP/1.1 200 OK\r\n\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello World\r\n0\r\nhello: world\r\nsecond: header\r\n\r\n"); + http_parser::parseWithHttpParser("HTTP/1.1 200 OK\r\n\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello World\r\n0\r\nhello: world\r\nsecond: header\r\n\r\n"); + + std::cout << "================================================================" << std::endl; +} diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index f6fde28db2a2..45fe89fb01d4 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -282,7 +282,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { +TEST_F(Http1ServerConnectionImplTest, DISABLED_Http11InvalidRequest) { initialize(); // Invalid because www.somewhere.com is not an absolute path nor an absolute url @@ -353,8 +353,12 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + Http::MockStreamDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + Buffer::OwnedImpl buffer("bad"); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, "http/1.1 protocol error: HPE_INVALID_METHOD"); + EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } @@ -391,24 +395,32 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { EXPECT_EQ(0U, buffer.length()); } + +// We can no longer enable this feature because llhttp rejects this, http-parser didn't. +// llhttp returns INVALID_HEADER_TOKEN from llhttp_get_errno(&parser_) in dispatchSlice +// +// hmm, http-parser has INVALID_HEADER_TOKEN, why didn't that work? +// +// If we want to keep this feature, we'd need to manually parse to the next valid token. +// // Ensures that requests with invalid HTTP header values are not rejected // when the runtime guard is not enabled for the feature. -TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRuntimeGuard) { - TestScopedRuntime scoped_runtime; - // When the runtime-guarded feature is NOT enabled, invalid header values - // should be accepted by the codec. - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.strict_header_validation", "false"}}); +// TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRuntimeGuard) { +// TestScopedRuntime scoped_runtime; +// // When the runtime-guarded feature is NOT enabled, invalid header values +// // should be accepted by the codec. +// Runtime::LoaderSingleton::getExisting()->mergeValues( +// {{"envoy.reloadable_features.strict_header_validation", "false"}}); - initialize(); +// initialize(); - Http::MockStreamDecoder decoder; - EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); +// Http::MockStreamDecoder decoder; +// EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - Buffer::OwnedImpl buffer( - absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); - codec_->dispatch(buffer); -} +// Buffer::OwnedImpl buffer( +// absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); +// EXPECT_NO_THROW(codec_->dispatch(buffer)); +// } // Ensures that requests with invalid HTTP header values are properly rejected // when the runtime guard is enabled for the feature. @@ -427,7 +439,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: header value contains invalid chars"); + "http/1.1 protocol error: HPE_INVALID_HEADER_TOKEN"); } // Regression test for http-parser allowing embedded NULs in header values, @@ -446,7 +458,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: bar", std::string(1, '\0'), "baz\r\n")); EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: header value contains NUL"); + "http/1.1 protocol error: HPE_INVALID_HEADER_TOKEN"); } // Mutate an HTTP GET with embedded NULs, this should always be rejected in some @@ -472,7 +484,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { // Mutate an HTTP GET with CR or LF. These can cause an exception or maybe // result in a valid decodeHeaders(). In any case, the validHeaderString() // ASSERTs should validate we never have any embedded CR or LF. -TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { +TEST_F(Http1ServerConnectionImplTest, DISABLED_HeaderMutateEmbeddedCRLF) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (const char c : {'\r', '\n'}) { @@ -494,7 +506,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { } } -TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { +TEST_F(Http1ServerConnectionImplTest, DISABLED_CloseDuringHeadersComplete) { initialize(); InSequence sequence; @@ -800,7 +812,7 @@ TEST_F(Http1ServerConnectionImplTest, RequestWithTrailers) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { +TEST_F(Http1ServerConnectionImplTest, DISABLED_IgnoreUpgradeH2c) { initialize(); TestHeaderMapImpl expected_headers{ @@ -811,7 +823,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { +TEST_F(Http1ServerConnectionImplTest, DISABLED_IgnoreUpgradeH2cClose) { initialize(); TestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -824,7 +836,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { +TEST_F(Http1ServerConnectionImplTest, DISABLED_IgnoreUpgradeH2cCloseEtc) { initialize(); TestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -847,20 +859,20 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { EXPECT_CALL(decoder, decodeHeaders_(_, false)).Times(1); Buffer::OwnedImpl buffer( "POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: foo\r\ncontent-length:5\r\n\r\n"); - codec_->dispatch(buffer); + EXPECT_NO_THROW(codec_->dispatch(buffer)); Buffer::OwnedImpl expected_data1("12345"); Buffer::OwnedImpl body("12345"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false)).Times(1); - codec_->dispatch(body); + EXPECT_NO_THROW(codec_->dispatch(body)); Buffer::OwnedImpl expected_data2("abcd"); Buffer::OwnedImpl websocket_payload("abcd"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), false)).Times(1); - codec_->dispatch(websocket_payload); + EXPECT_NO_THROW(codec_->dispatch(websocket_payload)); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { +TEST_F(Http1ServerConnectionImplTest, DISABLED_UpgradeRequestWithEarlyData) { initialize(); InSequence sequence; @@ -875,7 +887,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { codec_->dispatch(buffer); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { +TEST_F(Http1ServerConnectionImplTest, DISABLED_UpgradeRequestWithTEChunked) { initialize(); InSequence sequence; @@ -892,7 +904,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { codec_->dispatch(buffer); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { +TEST_F(Http1ServerConnectionImplTest, DISABLED_UpgradeRequestWithNoBody) { initialize(); InSequence sequence; @@ -1171,7 +1183,7 @@ TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { codec_->dispatch(empty); } -TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { +TEST_F(Http1ClientConnectionImplTest, DISABLED_ResponseWithTrailers) { initialize(); NiceMock response_decoder; @@ -1199,7 +1211,7 @@ TEST_F(Http1ClientConnectionImplTest, GiantPath) { codec_->dispatch(response); } -TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { +TEST_F(Http1ClientConnectionImplTest, DISABLED_UpgradeResponse) { initialize(); InSequence s; @@ -1230,7 +1242,7 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { +TEST_F(Http1ClientConnectionImplTest, DISABLED_UpgradeResponseWithEarlyData) { initialize(); InSequence s; From 0f783e7b1332f4999511301fb5dcd3f75a4cfb01 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Sun, 10 Nov 2019 03:37:04 -0800 Subject: [PATCH 02/14] fix llhttp finish Signed-off-by: Derek Argueta --- source/common/http/http1/codec_impl.cc | 8 +++++++- test/common/http/http1/codec_impl_test.cc | 20 ++++++++++---------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 3b48892266ec..2ba7f1bb5498 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -445,7 +445,13 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { } size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { - /*const auto rc = */llhttp_execute(&parser_, slice, len); + + llhttp_errno_t err; + if (slice == nullptr || len == 0) { + err = llhttp_finish(&parser_); + } else { + err = llhttp_execute(&parser_, slice, len); + } // If llhttp ran into an error, llhttp_get_error_pos will return a char* to where it // left off, allowing us to calculate how many bytes were read. Otherwise, we assume diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 45fe89fb01d4..3bfc55439c8d 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -282,7 +282,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, DISABLED_Http11InvalidRequest) { +TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { initialize(); // Invalid because www.somewhere.com is not an absolute path nor an absolute url @@ -770,7 +770,7 @@ TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { +TEST_F(Http1ServerConnectionImplTest, DISABLED_DoubleRequest) { initialize(); NiceMock decoder; @@ -812,7 +812,7 @@ TEST_F(Http1ServerConnectionImplTest, RequestWithTrailers) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, DISABLED_IgnoreUpgradeH2c) { +TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { initialize(); TestHeaderMapImpl expected_headers{ @@ -823,7 +823,7 @@ TEST_F(Http1ServerConnectionImplTest, DISABLED_IgnoreUpgradeH2c) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, DISABLED_IgnoreUpgradeH2cClose) { +TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { initialize(); TestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -836,7 +836,7 @@ TEST_F(Http1ServerConnectionImplTest, DISABLED_IgnoreUpgradeH2cClose) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, DISABLED_IgnoreUpgradeH2cCloseEtc) { +TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { initialize(); TestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -859,17 +859,17 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { EXPECT_CALL(decoder, decodeHeaders_(_, false)).Times(1); Buffer::OwnedImpl buffer( "POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: foo\r\ncontent-length:5\r\n\r\n"); - EXPECT_NO_THROW(codec_->dispatch(buffer)); + codec_->dispatch(buffer); Buffer::OwnedImpl expected_data1("12345"); Buffer::OwnedImpl body("12345"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false)).Times(1); - EXPECT_NO_THROW(codec_->dispatch(body)); + codec_->dispatch(body); Buffer::OwnedImpl expected_data2("abcd"); Buffer::OwnedImpl websocket_payload("abcd"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), false)).Times(1); - EXPECT_NO_THROW(codec_->dispatch(websocket_payload)); + codec_->dispatch(websocket_payload); } TEST_F(Http1ServerConnectionImplTest, DISABLED_UpgradeRequestWithEarlyData) { @@ -1183,7 +1183,7 @@ TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { codec_->dispatch(empty); } -TEST_F(Http1ClientConnectionImplTest, DISABLED_ResponseWithTrailers) { +TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { initialize(); NiceMock response_decoder; @@ -1211,7 +1211,7 @@ TEST_F(Http1ClientConnectionImplTest, GiantPath) { codec_->dispatch(response); } -TEST_F(Http1ClientConnectionImplTest, DISABLED_UpgradeResponse) { +TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { initialize(); InSequence s; From b606eeeadd58dd2776d23cd4f9f5d78b1a53b979 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Sun, 10 Nov 2019 03:38:32 -0800 Subject: [PATCH 03/14] remove extra comparison program Signed-off-by: Derek Argueta --- source/common/http/http1/comparison.cc | 134 ------------------------- 1 file changed, 134 deletions(-) delete mode 100644 source/common/http/http1/comparison.cc diff --git a/source/common/http/http1/comparison.cc b/source/common/http/http1/comparison.cc deleted file mode 100644 index 6d2ae44e60a8..000000000000 --- a/source/common/http/http1/comparison.cc +++ /dev/null @@ -1,134 +0,0 @@ - - -#include -#include - -namespace llhttp { -#include - -void parseWithLlhttp(const std::string& message) { - std::cout << "parsing with llhttp" << std::endl; - llhttp_t parser; - llhttp_settings_t settings; - - /* Initialize user callbacks and settings */ - llhttp_settings_init(&settings); - - /* Set user callback */ - settings.on_message_complete = [](llhttp_t*) -> int { - std::cout << "callback called" << std::endl; - return 0; - }; - - settings.on_url = [](llhttp_t*, const char*, size_t) -> int { - std::cout << "on_url called" << std::endl; - return 0; - }; - - settings.on_header_field = [](llhttp_t*, const char* at, size_t) -> int { - std::cout << "found header field: " << at << std::endl; - return 0; - }; - - settings.on_header_value = [](llhttp_t*, const char* at, size_t) -> int { - std::cout << "found header value: " << at << std::endl; - return 0; - }; - - settings.on_headers_complete = [](llhttp_t* parser) -> int { - std::cout << "headers_complete called, status: " << parser->status_code << std::endl; - return 1; - }; - - settings.on_body = [](llhttp_t*, const char* at, size_t) -> int { - std::cout << "body: " << at << std::endl; - return 0; - }; - - /* Initialize the parser in HTTP_BOTH mode, meaning that it will select between - * HTTP_REQUEST and HTTP_RESPONSE parsing automatically while reading the first - * input. - */ - llhttp_init(&parser, HTTP_RESPONSE, &settings); - - /* Parse request! */ - const int message_len = message.size(); - - llhttp_resume(&parser); - - enum llhttp_errno err = llhttp_execute(&parser, message.c_str(), message_len); - std::cerr << "Parse error 1: " << llhttp_errno_name(err) << std::endl;; - std::cerr << "Parse error 2: " << llhttp_errno_name(llhttp_get_errno(&parser)) << std::endl;; -} - -} // namespace llhttp - -namespace http_parser { - -#include - -void parseWithHttpParser(const std::string& message) { - std::cout << "parsing with http-parser" << std::endl; - http_parser parser; - http_parser_settings settings; - - /* Initialize user callbacks and settings */ - http_parser_settings_init(&settings); - - /* Set user callback */ - settings.on_message_complete = [](http_parser*) -> int { - std::cout << "callback called" << std::endl; - return 0; - }; - - settings.on_url = [](http_parser*, const char*, size_t) -> int { - std::cout << "on_url called" << std::endl; - return 0; - }; - - settings.on_header_field = [](http_parser*, const char* at, size_t) -> int { - std::cout << "found header field: " << at << std::endl; - return 0; - }; - - settings.on_header_value = [](http_parser*, const char* at, size_t) -> int { - std::cout << "found header value: " << at << std::endl; - return 0; - }; - - settings.on_headers_complete = [](http_parser* parser) -> int { - std::cout << "headers_complete called, status: " << parser->status_code << std::endl; - return 0; - }; - - settings.on_body = [](http_parser*, const char* at, size_t) -> int { - std::cout << "body: " << at << std::endl; - return 0; - }; - - /* Initialize the parser in HTTP_BOTH mode, meaning that it will select between - * HTTP_REQUEST and HTTP_RESPONSE parsing automatically while reading the first - * input. - */ - http_parser_init(&parser, HTTP_RESPONSE); - - /* Parse request! */ - const int message_len = message.size(); - - auto bytes = http_parser_execute(&parser, &settings, message.c_str(), message_len); - std::cout << "bytes parsed: " << bytes << std::endl; - std::cerr << "Parse error: " << std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser))) << std::endl;; -} -} - -int main(int, char**) { - llhttp::parseWithLlhttp("HTTP/1.1 200 OK\r\n\r\nHello World"); - http_parser::parseWithHttpParser("HTTP/1.1 200 OK\r\n\r\nHello World"); - - std::cout << "================================================================" << std::endl; - - llhttp::parseWithLlhttp("HTTP/1.1 200 OK\r\n\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello World\r\n0\r\nhello: world\r\nsecond: header\r\n\r\n"); - http_parser::parseWithHttpParser("HTTP/1.1 200 OK\r\n\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello World\r\n0\r\nhello: world\r\nsecond: header\r\n\r\n"); - - std::cout << "================================================================" << std::endl; -} From 0e3aa68c1860bb4afc250769135af744e8abde30 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Sun, 10 Nov 2019 03:43:43 -0800 Subject: [PATCH 04/14] cleanup Signed-off-by: Derek Argueta --- source/common/http/http1/codec_impl.cc | 114 ++++++---------------- source/common/http/http1/codec_impl.h | 8 +- test/common/http/http1/codec_impl_test.cc | 40 ++++---- 3 files changed, 55 insertions(+), 107 deletions(-) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 2ba7f1bb5498..405cda24c0d1 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -1,7 +1,6 @@ #include "common/http/http1/codec_impl.h" #include -#include #include #include @@ -257,8 +256,8 @@ void StreamEncoderImpl::readDisable(bool disable) { connection_.readDisable(disa uint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); } -static constexpr char RESPONSE_PREFIX[] = "HTTP/1.1 "; -static constexpr char HTTP_10_RESPONSE_PREFIX[] = "HTTP/1.0 "; +static const char RESPONSE_PREFIX[] = "HTTP/1.1 "; +static const char HTTP_10_RESPONSE_PREFIX[] = "HTTP/1.0 "; void ResponseStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_stream) { started_response_ = true; @@ -292,7 +291,7 @@ void ResponseStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end StreamEncoderImpl::encodeHeaders(headers, end_stream); } -static constexpr char REQUEST_POSTFIX[] = " HTTP/1.1\r\n"; +static const char REQUEST_POSTFIX[] = " HTTP/1.1\r\n"; void RequestStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_stream) { const HeaderEntry* method = headers.Method(); @@ -315,39 +314,31 @@ void RequestStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_ llhttp_settings_s ConnectionImpl::settings_{ [](llhttp_t* parser) -> int { - std::cout << "on_message_begin called" << std::endl; static_cast(parser->data)->onMessageBeginBase(); return 0; }, [](llhttp_t* parser, const char* at, size_t length) -> int { - std::cout << "on_url called" << std::endl; static_cast(parser->data)->onUrl(at, length); return 0; }, nullptr, // on_status [](llhttp_t* parser, const char* at, size_t length) -> int { - std::cout << "on_header_field called" << std::endl; static_cast(parser->data)->onHeaderField(at, length); return 0; }, [](llhttp_t* parser, const char* at, size_t length) -> int { - std::cout << "on_header_value called" << std::endl; static_cast(parser->data)->onHeaderValue(at, length); return 0; }, [](llhttp_t* parser) -> int { - std::cout << "on_headers_complete called" << std::endl; return static_cast(parser->data)->onHeadersCompleteBase(); }, [](llhttp_t* parser, const char* at, size_t length) -> int { - std::cout << "on_body called" << std::endl; static_cast(parser->data)->onBody(at, length); return 0; }, [](llhttp_t* parser) -> int { - std::cout << "on_message_complete called" << std::endl; - static_cast(parser->data)->onMessageCompleteBase(); - return 0; + return static_cast(parser->data)->onMessageCompleteBase(); }, nullptr, // on_chunk_header nullptr // on_chunk_complete @@ -425,11 +416,10 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { ssize_t total_parsed = 0; if (data.length() > 0) { - const uint64_t num_slices = data.getRawSlices(nullptr, 0); + uint64_t num_slices = data.getRawSlices(nullptr, 0); STACK_ARRAY(slices, Buffer::RawSlice, num_slices); data.getRawSlices(slices.begin(), num_slices); for (const Buffer::RawSlice& slice : slices) { - ENVOY_CONN_LOG(trace, "dispatching a slice of {} length", connection_, slice.len_); total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); } } else { @@ -445,7 +435,6 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { } size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { - llhttp_errno_t err; if (slice == nullptr || len == 0) { err = llhttp_finish(&parser_); @@ -453,63 +442,28 @@ size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { err = llhttp_execute(&parser_, slice, len); } - // If llhttp ran into an error, llhttp_get_error_pos will return a char* to where it - // left off, allowing us to calculate how many bytes were read. Otherwise, we assume - // the entire message was parsed. - const char* error_pos = llhttp_get_error_pos(&parser_); - if (llhttp_get_errno(&parser_) == HPE_PAUSED_UPGRADE) { - ENVOY_CONN_LOG(trace, "resuming llhttp after upgrade", connection_); - llhttp_resume_after_upgrade(&parser_); - } - - ENVOY_CONN_LOG(trace, "len received: {}", connection_, len); - - size_t return_val; - if (error_pos != nullptr) { - const auto error_pos_len = strlen(error_pos); - // There's some weirdness where after the buffer has been static_cast'd to a const char*, there's - // possibly some extra stray noise in the char* that makes strlen inaccurate. Since we know we can - // only parse at most `len`, if the error_pos len is greater than the slice len, we assume to truncate - // it.... I'm not yet sure if this is correct.... - if (len > strlen(error_pos)) { - return_val = len; - } else { - return_val = len - error_pos_len; + size_t nread = len; + if (err != HPE_OK) { + // If llhttp ran into an error, llhttp_get_error_pos will return a char* to where it + // left off, allowing us to calculate how many bytes were read. Otherwise, we assume + // the entire message was parsed. + nread = llhttp_get_error_pos(&parser_) - slice; + if (err == HPE_PAUSED_UPGRADE) { + err = HPE_OK; + llhttp_resume_after_upgrade(&parser_); } - ENVOY_CONN_LOG(trace, "non-null error_pos, setting return_val to {}", connection_, return_val); - } else { - return_val = len; - ENVOY_CONN_LOG(trace, "null error_pos, setting return_val to {}", connection_, return_val); - } - - // std::cout << "_index: " << parser_._index << std::endl; - // std::cout << "error: " << parser_.error << std::endl; - // std::cout << "content_length: " << parser_.content_length << std::endl; - // std::cout << "type: " << parser_.type << std::endl; - // std::cout << "method: " << parser_.method << std::endl; - // std::cout << "http_major: " << parser_.http_major << std::endl; - // std::cout << "http_minor: " << parser_.http_minor << std::endl; - // std::cout << "header_state: " << parser_.header_state << std::endl; - // std::cout << "flags: " << parser_.flags << std::endl; - // std::cout << "upgrade: " << parser_.flags << std::endl; - // std::cout << "status_code: " << parser_.status_code << std::endl; - // std::cout << "finish: " << parser_.finish << std::endl; + } if (llhttp_get_errno(&parser_) != HPE_OK && llhttp_get_errno(&parser_) != HPE_PAUSED) { sendProtocolError(); - std::cout << llhttp_get_error_reason(&parser_) << std::endl; throw CodecProtocolException("http/1.1 protocol error: " + std::string(llhttp_errno_name(llhttp_get_errno(&parser_)))); } - // TODO(dereka) not sure if this is right, but presumably if rc == HPE_OK then the whole - // thing was read? - // ASSERT(rc == HPE_OK); - return return_val; + return nread; } void ConnectionImpl::onHeaderField(const char* data, size_t length) { - ENVOY_CONN_LOG(trace, "ConnectionImpl::onHeaderField", connection_); if (header_parsing_state_ == HeaderParsingState::Done) { // Ignore trailers. return; @@ -523,7 +477,6 @@ void ConnectionImpl::onHeaderField(const char* data, size_t length) { } void ConnectionImpl::onHeaderValue(const char* data, size_t length) { - ENVOY_CONN_LOG(trace, "ConnectionImpl::onHeaderValue", connection_); if (header_parsing_state_ == HeaderParsingState::Done) { // Ignore trailers. return; @@ -536,7 +489,6 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; sendProtocolError(); - std::cout << "throwing because !Http::HeaderUtility::headerIsValid(header_value)" << std::endl; throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); } } @@ -552,13 +504,12 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { error_code_ = Http::Code::RequestHeaderFieldsTooLarge; sendProtocolError(); - std::cout << "throwing exception because total > (max_headers_kb_ * 1024)" << std::endl; throw CodecProtocolException("headers size exceeds limit"); } } int ConnectionImpl::onHeadersCompleteBase() { - ENVOY_CONN_LOG(trace, "ConnectionImpl::onHeadersCompleteBase", connection_); + ENVOY_CONN_LOG(trace, "headers complete", connection_); completeLastHeader(); // Validate that the completed HeaderMap's cached byte size exists and is correct. // This assert iterates over the HeaderMap. @@ -579,7 +530,7 @@ int ConnectionImpl::onHeadersCompleteBase() { current_header_map_->removeUpgrade(); if (current_header_map_->Connection()) { const auto& tokens_to_remove = caseUnorderdSetContainingUpgradeAndHttp2Settings(); - const std::string new_value = StringUtil::removeTokens( + std::string new_value = StringUtil::removeTokens( current_header_map_->Connection()->value().getStringView(), ",", tokens_to_remove, ","); if (new_value.empty()) { current_header_map_->removeConnection(); @@ -601,25 +552,23 @@ int ConnectionImpl::onHeadersCompleteBase() { header_parsing_state_ = HeaderParsingState::Done; // Returning 2 informs llhttp to not expect a body or further data on this connection. - std::cout << "return of onHeadersCompleteBase: " << (handling_upgrade_ ? 2 : rc) << std::endl; return handling_upgrade_ ? 2 : rc; } -void ConnectionImpl::onMessageCompleteBase() { - ENVOY_CONN_LOG(trace, "ConnectionImpl::onMessageCompleteBase", connection_); +int ConnectionImpl::onMessageCompleteBase() { + ENVOY_CONN_LOG(trace, "message complete", connection_); if (handling_upgrade_) { // If this is an upgrade request, swallow the onMessageComplete. The // upgrade payload will be treated as stream body. ASSERT(!deferred_end_stream_headers_); ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); - llhttp_pause(&parser_); - return; + return HPE_PAUSED; } - onMessageComplete(); + return onMessageComplete(); } void ConnectionImpl::onMessageBeginBase() { - ENVOY_CONN_LOG(trace, "ConnectionImpl::onMessageBeginBase", connection_); + ENVOY_CONN_LOG(trace, "message begin", connection_); // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable // in onHeadersCompleteBase @@ -682,7 +631,6 @@ void ServerConnectionImpl::handlePath(HeaderMapImpl& headers, unsigned int metho Utility::Url absolute_url; if (!absolute_url.initialize(active_request_->request_url_.getStringView())) { sendProtocolError(); - std::cout << "throwing because !absolute_url.initialize(active_request_->request_url_.getStringView())" << std::endl; throw CodecProtocolException("http/1.1 protocol error: invalid url in request line"); } // RFC7230#5.7 @@ -729,7 +677,7 @@ int ServerConnectionImpl::onHeadersComplete(HeaderMapImplPtr&& headers) { // If the connection has been closed (or is closing) after decoding headers, pause the parser // so we return control to the caller. if (connection_.state() != Network::Connection::State::Open) { - llhttp_pause(&parser_); + return HPE_PAUSED; } } else { @@ -737,7 +685,7 @@ int ServerConnectionImpl::onHeadersComplete(HeaderMapImplPtr&& headers) { } } - return 0; + return HPE_OK; } void ServerConnectionImpl::onMessageBegin() { @@ -763,7 +711,7 @@ void ServerConnectionImpl::onBody(const char* data, size_t length) { } } -void ServerConnectionImpl::onMessageComplete() { +int ServerConnectionImpl::onMessageComplete() { if (active_request_) { Buffer::OwnedImpl buffer; active_request_->remote_complete_ = true; @@ -780,7 +728,7 @@ void ServerConnectionImpl::onMessageComplete() { // Always pause the parser so that the calling code can process 1 request at a time and apply // back pressure. However this means that the calling code needs to detect if there is more data // in the buffer and dispatch it again. - llhttp_pause(&parser_); + return HPE_PAUSED; } void ServerConnectionImpl::onResetStream(StreamResetReason reason) { @@ -881,16 +829,15 @@ void ClientConnectionImpl::onBody(const char* data, size_t length) { if (!pending_responses_.empty()) { Buffer::OwnedImpl buffer; buffer.add(data, length); - std::cout << buffer.toString() << std::endl; pending_responses_.front().decoder_->decodeData(buffer, false); } } -void ClientConnectionImpl::onMessageComplete() { +int ClientConnectionImpl::onMessageComplete() { ENVOY_CONN_LOG(trace, "message complete", connection_); if (ignore_message_complete_for_100_continue_) { ignore_message_complete_for_100_continue_ = false; - return; + return HPE_OK; } if (!pending_responses_.empty()) { // After calling decodeData() with end stream set to true, we should no longer be able to reset. @@ -913,11 +860,12 @@ void ClientConnectionImpl::onMessageComplete() { response.decoder_->decodeHeaders(std::move(deferred_end_stream_headers_), true); deferred_end_stream_headers_.reset(); } else { - // TODO(dereka) this isn't getting hit with empty buffer b/c parser is not calling this callback Buffer::OwnedImpl buffer; response.decoder_->decodeData(buffer, true); } } + + return HPE_OK; } void ClientConnectionImpl::onResetStream(StreamResetReason reason) { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 7f38346cb954..200ac300eca5 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -269,8 +269,8 @@ class ConnectionImpl : public virtual Connection, protected Logger::LoggablemergeValues( -// {{"envoy.reloadable_features.strict_header_validation", "false"}}); +TEST_F(Http1ServerConnectionImplTest, DISABLED_ HeaderInvalidCharsRuntimeGuard) { + TestScopedRuntime scoped_runtime; + // When the runtime-guarded feature is NOT enabled, invalid header values + // should be accepted by the codec. + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.strict_header_validation", "false"}}); -// initialize(); + initialize(); -// Http::MockStreamDecoder decoder; -// EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + Http::MockStreamDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); -// Buffer::OwnedImpl buffer( -// absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); -// EXPECT_NO_THROW(codec_->dispatch(buffer)); -// } + Buffer::OwnedImpl buffer( + absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); + EXPECT_NO_THROW(codec_->dispatch(buffer)); +} // Ensures that requests with invalid HTTP header values are properly rejected // when the runtime guard is enabled for the feature. @@ -484,7 +484,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { // Mutate an HTTP GET with CR or LF. These can cause an exception or maybe // result in a valid decodeHeaders(). In any case, the validHeaderString() // ASSERTs should validate we never have any embedded CR or LF. -TEST_F(Http1ServerConnectionImplTest, DISABLED_HeaderMutateEmbeddedCRLF) { +TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (const char c : {'\r', '\n'}) { @@ -506,7 +506,7 @@ TEST_F(Http1ServerConnectionImplTest, DISABLED_HeaderMutateEmbeddedCRLF) { } } -TEST_F(Http1ServerConnectionImplTest, DISABLED_CloseDuringHeadersComplete) { +TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { initialize(); InSequence sequence; @@ -770,7 +770,7 @@ TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, DISABLED_DoubleRequest) { +TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { initialize(); NiceMock decoder; @@ -872,7 +872,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { codec_->dispatch(websocket_payload); } -TEST_F(Http1ServerConnectionImplTest, DISABLED_UpgradeRequestWithEarlyData) { +TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { initialize(); InSequence sequence; @@ -887,7 +887,7 @@ TEST_F(Http1ServerConnectionImplTest, DISABLED_UpgradeRequestWithEarlyData) { codec_->dispatch(buffer); } -TEST_F(Http1ServerConnectionImplTest, DISABLED_UpgradeRequestWithTEChunked) { +TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { initialize(); InSequence sequence; @@ -904,7 +904,7 @@ TEST_F(Http1ServerConnectionImplTest, DISABLED_UpgradeRequestWithTEChunked) { codec_->dispatch(buffer); } -TEST_F(Http1ServerConnectionImplTest, DISABLED_UpgradeRequestWithNoBody) { +TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { initialize(); InSequence sequence; @@ -1242,7 +1242,7 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_F(Http1ClientConnectionImplTest, DISABLED_UpgradeResponseWithEarlyData) { +TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { initialize(); InSequence s; From bc5937199e8f99aacd19b06ea5fbd75179b58d04 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Thu, 14 Nov 2019 11:53:19 -0800 Subject: [PATCH 05/14] add a compile-time switch between llhttp and http_parser Signed-off-by: Derek Argueta --- bazel/BUILD | 5 ++ bazel/envoy_internal.bzl | 3 + source/common/http/http1/BUILD | 13 ++-- source/common/http/http1/codec_impl.cc | 74 ++++++++++---------- source/common/http/http1/codec_impl.h | 9 ++- source/common/http/http1/parser_adapter.h | 82 +++++++++++++++++++++++ test/common/http/http1/codec_impl_test.cc | 32 +++++---- 7 files changed, 159 insertions(+), 59 deletions(-) create mode 100644 source/common/http/http1/parser_adapter.h diff --git a/bazel/BUILD b/bazel/BUILD index 7d5ab93f6c3c..57d367f3cead 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -246,6 +246,11 @@ alias( }), ) +config_setting( + name = "enable_legacy_http_parser", + values = {"define": "legacy_http_parser=enabled"}, +) + config_setting( name = "linux_x86_64", values = {"cpu": "k8"}, diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index 8a8a88ad2d0b..ff250fa9fa2c 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -66,6 +66,9 @@ def envoy_copts(repository, test = False): }) + select({ repository + "//bazel:enable_log_debug_assert_in_release": ["-DENVOY_LOG_DEBUG_ASSERT_IN_RELEASE"], "//conditions:default": [], + }) + select({ + repository + "//bazel:enable_legacy_http_parser": ["-DENVOY_ENABLE_LEGACY_HTTP_PARSER"], + "//conditions:default": [], }) + select({ # APPLE_USE_RFC_3542 is needed to support IPV6_PKTINFO in MAC OS. repository + "//bazel:apple": ["-D__APPLE_USE_RFC_3542"], diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index db0f7a7857b0..4e5c7e9f204b 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -19,11 +19,8 @@ envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], hdrs = ["codec_impl.h"], - external_deps = [ - "http_parser", - "llhttp" - ], deps = [ + ":parser_adapter", "//include/envoy/buffer:buffer_interface", "//include/envoy/http:codec_interface", "//include/envoy/http:header_map_interface", @@ -76,12 +73,12 @@ envoy_cc_library( ], ) - -envoy_cc_binary( - name = "comparison", - srcs = ["comparison.cc"], +envoy_cc_library( + name = "parser_adapter", + hdrs = ["parser_adapter.h"], external_deps = [ "http_parser", "llhttp" ], ) + diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 405cda24c0d1..c054d2a59d76 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -16,6 +16,7 @@ #include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/header_formatter.h" +#include "common/http/http1/parser_adapter.h" #include "common/http/utility.h" #include "common/runtime/runtime_impl.h" @@ -312,32 +313,32 @@ void RequestStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_ StreamEncoderImpl::encodeHeaders(headers, end_stream); } -llhttp_settings_s ConnectionImpl::settings_{ - [](llhttp_t* parser) -> int { +parser_settings_t ConnectionImpl::settings_{ + [](parser_t* parser) -> int { static_cast(parser->data)->onMessageBeginBase(); return 0; }, - [](llhttp_t* parser, const char* at, size_t length) -> int { + [](parser_t* parser, const char* at, size_t length) -> int { static_cast(parser->data)->onUrl(at, length); return 0; }, nullptr, // on_status - [](llhttp_t* parser, const char* at, size_t length) -> int { + [](parser_t* parser, const char* at, size_t length) -> int { static_cast(parser->data)->onHeaderField(at, length); return 0; }, - [](llhttp_t* parser, const char* at, size_t length) -> int { + [](parser_t* parser, const char* at, size_t length) -> int { static_cast(parser->data)->onHeaderValue(at, length); return 0; }, - [](llhttp_t* parser) -> int { + [](parser_t* parser) -> int { return static_cast(parser->data)->onHeadersCompleteBase(); }, - [](llhttp_t* parser, const char* at, size_t length) -> int { + [](parser_t* parser, const char* at, size_t length) -> int { static_cast(parser->data)->onBody(at, length); return 0; }, - [](llhttp_t* parser) -> int { + [](parser_t* parser) -> int { return static_cast(parser->data)->onMessageCompleteBase(); }, nullptr, // on_chunk_header @@ -350,7 +351,7 @@ const ToLowerTable& ConnectionImpl::toLowerTable() { } ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, - llhttp_type type, uint32_t max_headers_kb, + parser_type_t type, uint32_t max_headers_kb, const uint32_t max_headers_count, HeaderKeyFormatterPtr&& header_key_formatter) : connection_(connection), stats_{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(stats, "http1."))}, @@ -361,7 +362,7 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& st strict_header_validation_( Runtime::runtimeFeatureEnabled("envoy.reloadable_features.strict_header_validation")) { output_buffer_.setWatermarks(connection.bufferLimit()); - llhttp_init(&parser_, type, &settings_); + parser_init(&parser_, type, &settings_); parser_.data = this; } @@ -412,7 +413,7 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { } // Always unpause before dispatch. - llhttp_resume(&parser_); + parser_resume(&parser_); ssize_t total_parsed = 0; if (data.length() > 0) { @@ -435,32 +436,15 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { } size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { - llhttp_errno_t err; - if (slice == nullptr || len == 0) { - err = llhttp_finish(&parser_); - } else { - err = llhttp_execute(&parser_, slice, len); - } - - size_t nread = len; - if (err != HPE_OK) { - // If llhttp ran into an error, llhttp_get_error_pos will return a char* to where it - // left off, allowing us to calculate how many bytes were read. Otherwise, we assume - // the entire message was parsed. - nread = llhttp_get_error_pos(&parser_) - slice; - if (err == HPE_PAUSED_UPGRADE) { - err = HPE_OK; - llhttp_resume_after_upgrade(&parser_); - } - } + const size_t bytes_read = parser_execute(&parser_, &settings_, slice, len); - if (llhttp_get_errno(&parser_) != HPE_OK && llhttp_get_errno(&parser_) != HPE_PAUSED) { + if (parser_get_errno(&parser_) != HPE_OK && parser_get_errno(&parser_) != HPE_PAUSED) { sendProtocolError(); throw CodecProtocolException("http/1.1 protocol error: " + - std::string(llhttp_errno_name(llhttp_get_errno(&parser_)))); + std::string(parser_errno_name(parser_get_errno(&parser_)))); } - return nread; + return bytes_read; } void ConnectionImpl::onHeaderField(const char* data, size_t length) { @@ -482,8 +466,8 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { return; } +#ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER const absl::string_view header_value = absl::string_view(data, length); - if (strict_header_validation_) { if (!Http::HeaderUtility::headerIsValid(header_value)) { ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); @@ -491,7 +475,14 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { sendProtocolError(); throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); } + } else if (header_value.find('\0') != absl::string_view::npos) { + // http-parser should filter for this + // (https://tools.ietf.org/html/rfc7230#section-3.2.6), but it doesn't today. HeaderStrings + // have an invariant that they must not contain embedded zero characters + // (NUL, ASCII 0x0). + throw CodecProtocolException("http/1.1 protocol error: header value contains NUL"); } +#endif header_parsing_state_ = HeaderParsingState::Value; current_header_value_.append(data, length); @@ -562,7 +553,12 @@ int ConnectionImpl::onMessageCompleteBase() { // upgrade payload will be treated as stream body. ASSERT(!deferred_end_stream_headers_); ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); +#ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER + http_parser_pause(&parser_, 1); + return HPE_OK; +#else return HPE_PAUSED; +#endif } return onMessageComplete(); } @@ -651,7 +647,7 @@ int ServerConnectionImpl::onHeadersComplete(HeaderMapImplPtr&& headers) { // to disconnect the connection but we shouldn't fire any more events since it doesn't make // sense. if (active_request_) { - const char* method_string = llhttp_method_name(static_cast(parser_.method)); + const char* method_string = parser_method_name(static_cast(parser_.method)); // Inform the response encoder about any HEAD method, so it can set content // length and transfer encoding headers correctly. @@ -677,7 +673,12 @@ int ServerConnectionImpl::onHeadersComplete(HeaderMapImplPtr&& headers) { // If the connection has been closed (or is closing) after decoding headers, pause the parser // so we return control to the caller. if (connection_.state() != Network::Connection::State::Open) { +#ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER + http_parser_pause(&parser_, 1); + return HPE_OK; +#else return HPE_PAUSED; +#endif } } else { @@ -728,7 +729,12 @@ int ServerConnectionImpl::onMessageComplete() { // Always pause the parser so that the calling code can process 1 request at a time and apply // back pressure. However this means that the calling code needs to detect if there is more data // in the buffer and dispatch it again. +#ifndef ENVOY_ENABLE_LEGACY_HTTP_PARSER return HPE_PAUSED; +#else + http_parser_pause(&parser_, 1); + return 0; +#endif } void ServerConnectionImpl::onResetStream(StreamResetReason reason) { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 200ac300eca5..7a1893b0d32b 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -1,7 +1,5 @@ #pragma once -#include - #include #include #include @@ -19,6 +17,7 @@ #include "common/http/codes.h" #include "common/http/header_map_impl.h" #include "common/http/http1/header_formatter.h" +#include "common/http/http1/parser_adapter.h" namespace Envoy { namespace Http { @@ -193,7 +192,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable +#else +#include +#endif /* ENVOY_ENABLE_LEGACY_HTTP_PARSER */ + +/** + * This is a temporary shim to easily allow switching between llhttp and http-parser at compile + * time by providing a consistent interface, then adapting them to the respective implementations. + * + * When http-parser is ready to be removed, this shim should also disappear and the llhttp_* methods + * moved into the codec implementation. + */ + +namespace Envoy { +namespace Http { +namespace Http1 { + +#ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER + +using parser_type_t = enum http_parser_type; // NOLINT(readability-identifier-naming) +using parser_errno_t = enum http_errno; // NOLINT(readability-identifier-naming) +using parser_settings_t = http_parser_settings; // NOLINT(readability-identifier-naming) +using parser_t = http_parser; // NOLINT(readability-identifier-naming) +using parser_method = http_method; // NOLINT(readability-identifier-naming) + +inline void parser_init(parser_t* parser, parser_type_t parser_type, parser_settings_t*) { + http_parser_init(parser, parser_type); +} +const auto parser_execute = http_parser_execute; + +inline void parser_resume(parser_t* parser) { + http_parser_pause(parser, 0); +} + +inline parser_errno_t parser_get_errno(parser_t* parser) { + return HTTP_PARSER_ERRNO(parser); +} + +const auto parser_errno_name = http_errno_name; +const auto parser_method_name = http_method_str; + +#else + +using parser_type_t = llhttp_type_t; // NOLINT(readability-identifier-naming) +using parser_errno_t = llhttp_errno_t; // NOLINT(readability-identifier-naming) +using parser_settings_t = llhttp_settings_s; // NOLINT(readability-identifier-naming) +using parser_t = llhttp_t; // NOLINT(readability-identifier-naming) +using parser_method = llhttp_method; // NOLINT(readability-identifier-naming) + +const auto parser_init = llhttp_init; +inline size_t parser_execute(parser_t* parser, parser_settings_t*, const char* slice, int len) { + parser_errno_t err; + if (slice == nullptr || len == 0) { + err = llhttp_finish(parser); + } else { + err = llhttp_execute(parser, slice, len); + } + + size_t nread = len; + if (err != HPE_OK) { + nread = llhttp_get_error_pos(parser) - slice; + if (err == HPE_PAUSED_UPGRADE) { + err = HPE_OK; + llhttp_resume_after_upgrade(parser); + } + } + + return nread; +} +const auto parser_resume = llhttp_resume; +const auto parser_get_errno = llhttp_get_errno; +const auto parser_errno_name = llhttp_errno_name; +const auto parser_method_name = llhttp_method_name; + +#endif /* ENVOY_ENABLE_LEGACY_HTTP_PARSER */ + +} // namespace Http1 +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 56c222144d05..8b70e7fa3063 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -353,10 +353,13 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + Buffer::OwnedImpl buffer("bad"); +#ifndef ENVOY_ENABLE_LEGACY_HTTP_PARSER + // TODO(dereka) fixme Http::MockStreamDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); +#endif - Buffer::OwnedImpl buffer("bad"); EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, "http/1.1 protocol error: HPE_INVALID_METHOD"); EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); @@ -395,19 +398,12 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { EXPECT_EQ(0U, buffer.length()); } - -// We can no longer enable this feature because llhttp rejects this, http-parser didn't. -// llhttp returns INVALID_HEADER_TOKEN from llhttp_get_errno(&parser_) in dispatchSlice -// -// hmm, http-parser has INVALID_HEADER_TOKEN, why didn't that work? -// -// If we want to keep this feature, we'd need to manually parse to the next valid token. -// +#ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER // Ensures that requests with invalid HTTP header values are not rejected // when the runtime guard is not enabled for the feature. -TEST_F(Http1ServerConnectionImplTest, DISABLED_ HeaderInvalidCharsRuntimeGuard) { +TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRuntimeGuard) { TestScopedRuntime scoped_runtime; - // When the runtime-guarded feature is NOT enabled, invalid header values + // When the runtime-guarded feature is NOT enabled, invalid header values // should be accepted by the codec. Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.strict_header_validation", "false"}}); @@ -419,8 +415,9 @@ TEST_F(Http1ServerConnectionImplTest, DISABLED_ HeaderInvalidCharsRuntimeGuard) Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); - EXPECT_NO_THROW(codec_->dispatch(buffer)); + codec_->dispatch(buffer); } +#endif // Ensures that requests with invalid HTTP header values are properly rejected // when the runtime guard is enabled for the feature. @@ -438,8 +435,14 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); + +#ifndef ENVOY_ENABLE_LEGACY_HTTP_PARSER EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, "http/1.1 protocol error: HPE_INVALID_HEADER_TOKEN"); +#else + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, + "http/1.1 protocol error: header value contains invalid chars"); +#endif } // Regression test for http-parser allowing embedded NULs in header values, @@ -457,8 +460,13 @@ TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: bar", std::string(1, '\0'), "baz\r\n")); +#ifndef ENVOY_ENABLE_LEGACY_HTTP_PARSER EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, "http/1.1 protocol error: HPE_INVALID_HEADER_TOKEN"); +#else + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, + "http/1.1 protocol error: header value contains NUL"); +#endif } // Mutate an HTTP GET with embedded NULs, this should always be rejected in some From 3b5425314dc488818004ad9ecde31140d8bf41e6 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Thu, 14 Nov 2019 12:20:55 -0800 Subject: [PATCH 06/14] add release note Signed-off-by: Derek Argueta --- docs/root/intro/version_history.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index ac6cd79c7d67..a375301609dd 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -10,6 +10,7 @@ Version history * ext_authz: added :ref:`configurable ability` to send the :ref:`certificate` to the `ext_authz` service. * health check: gRPC health checker sets the gRPC deadline to the configured timeout duration. * http: support :ref:`auto_host_rewrite_header` in the dynamic forward proxy. +* http: use llhttp as default http parser * lb_subset_config: new fallback policy for selectors: :ref:`KEYS_SUBSET` * logger: added :ref:`--log-format-escaped ` command line option to escape newline characters in application logs. * redis: performance improvement for larger split commands by avoiding string copies. From 082f0f7eba7b2fa11e07842da5bdc47c796d5154 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Sat, 16 Nov 2019 02:05:52 -0800 Subject: [PATCH 07/14] start switching to runtime flag Signed-off-by: Derek Argueta --- bazel/BUILD | 5 - bazel/envoy_internal.bzl | 3 - bazel/repository_locations.bzl | 6 +- include/envoy/server/options.h | 5 + source/common/http/http1/BUILD | 44 +++- source/common/http/http1/codec_impl.cc | 227 +++++++++--------- source/common/http/http1/codec_impl.h | 132 +++++----- .../common/http/http1/legacy_http_parser.cc | 176 ++++++++++++++ source/common/http/http1/legacy_http_parser.h | 36 +++ source/common/http/http1/llhttp_parser.cc | 150 ++++++++++++ source/common/http/http1/llhttp_parser.h | 37 +++ source/common/http/http1/parser.h | 124 ++++++++++ source/common/http/http1/parser_adapter.h | 82 ------- source/common/http/http1/parser_factory.cc | 32 +++ source/common/http/http1/parser_factory.h | 25 ++ source/server/BUILD | 1 + source/server/options_impl.cc | 4 +- source/server/options_impl.h | 2 + source/server/server.cc | 4 + test/common/http/http1/BUILD | 2 + test/common/http/http1/codec_fuzz_test.cc | 0 test/common/http/http1/codec_impl_test.cc | 187 +++++++++------ test/common/http/http1/codec_speed_test.cc | 58 +++++ 23 files changed, 989 insertions(+), 353 deletions(-) create mode 100644 source/common/http/http1/legacy_http_parser.cc create mode 100644 source/common/http/http1/legacy_http_parser.h create mode 100644 source/common/http/http1/llhttp_parser.cc create mode 100644 source/common/http/http1/llhttp_parser.h create mode 100644 source/common/http/http1/parser.h delete mode 100644 source/common/http/http1/parser_adapter.h create mode 100644 source/common/http/http1/parser_factory.cc create mode 100644 source/common/http/http1/parser_factory.h create mode 100644 test/common/http/http1/codec_fuzz_test.cc create mode 100644 test/common/http/http1/codec_speed_test.cc diff --git a/bazel/BUILD b/bazel/BUILD index 57d367f3cead..7d5ab93f6c3c 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -246,11 +246,6 @@ alias( }), ) -config_setting( - name = "enable_legacy_http_parser", - values = {"define": "legacy_http_parser=enabled"}, -) - config_setting( name = "linux_x86_64", values = {"cpu": "k8"}, diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index ff250fa9fa2c..8a8a88ad2d0b 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -66,9 +66,6 @@ def envoy_copts(repository, test = False): }) + select({ repository + "//bazel:enable_log_debug_assert_in_release": ["-DENVOY_LOG_DEBUG_ASSERT_IN_RELEASE"], "//conditions:default": [], - }) + select({ - repository + "//bazel:enable_legacy_http_parser": ["-DENVOY_ENABLE_LEGACY_HTTP_PARSER"], - "//conditions:default": [], }) + select({ # APPLE_USE_RFC_3542 is needed to support IPV6_PKTINFO in MAC OS. repository + "//bazel:apple": ["-D__APPLE_USE_RFC_3542"], diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 1e1815598700..6e820815339f 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -185,9 +185,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/nodejs/http-parser/archive/v2.9.0.tar.gz"], ), com_github_nodejs_llhttp = dict( - sha256 = "579a81e1e56088470a121e308127f205e22b8988b944b5f36b2a11c1724590d4", - strip_prefix = "llhttp-release-v1.1.1", - urls = ["https://github.com/nodejs/llhttp/archive/release/v1.1.1.tar.gz"], + sha256 = "76100c5b7948c8e49e8afa8c658e2a477bcb831e18854cf134aa3bfd44a5c669", + strip_prefix = "llhttp-release-v2.0.1", + urls = ["https://github.com/nodejs/llhttp/archive/release/v2.0.1.tar.gz"], ), com_github_pallets_jinja = dict( sha256 = "db49236731373e4f3118af880eb91bb0aa6978bc0cf8b35760f6a026f1a9ffc4", diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index c19285f14d17..1bd369b88276 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -189,6 +189,11 @@ class Options { */ virtual bool fakeSymbolTableEnabled() const PURE; + /** + * @return whether to use the fake symbol table implementation. + */ + virtual bool legacyHttpParserEnabled() const PURE; + /** * @return bool indicating whether cpuset size should determine the number of worker threads. */ diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 4e5c7e9f204b..a4ff68c4581a 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -2,7 +2,6 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_binary", "envoy_cc_library", "envoy_package", ) @@ -20,7 +19,8 @@ envoy_cc_library( srcs = ["codec_impl.cc"], hdrs = ["codec_impl.h"], deps = [ - ":parser_adapter", + ":parser_interface", + ":parser_factory_lib", "//include/envoy/buffer:buffer_interface", "//include/envoy/http:codec_interface", "//include/envoy/http:header_map_interface", @@ -74,11 +74,43 @@ envoy_cc_library( ) envoy_cc_library( - name = "parser_adapter", - hdrs = ["parser_adapter.h"], + name = "parser_interface", + hdrs = ["parser.h"], +) + +envoy_cc_library( + name = "llhttp_lib", + srcs = ["llhttp_parser.cc"], + hdrs = ["llhttp_parser.h"], + deps = [ + ":parser_interface", + "//source/common/common:assert_lib", + ], external_deps = [ - "http_parser", - "llhttp" + "llhttp", + ], +) + +envoy_cc_library( + name = "legacy_http_parser_lib", + srcs = ["legacy_http_parser.cc"], + hdrs = ["legacy_http_parser.h"], + deps = [ + ":parser_interface", + "//source/common/common:assert_lib", ], + external_deps = [ + "http_parser" + ] ) +envoy_cc_library( + name = "parser_factory_lib", + srcs = ["parser_factory.cc"], + hdrs = ["parser_factory.h"], + deps = [ + ":parser_interface", + ":legacy_http_parser_lib", + ":llhttp_lib", + ] +) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index c054d2a59d76..1796221a54ed 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -16,7 +16,6 @@ #include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/header_formatter.h" -#include "common/http/http1/parser_adapter.h" #include "common/http/utility.h" #include "common/runtime/runtime_impl.h" @@ -313,45 +312,13 @@ void RequestStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_ StreamEncoderImpl::encodeHeaders(headers, end_stream); } -parser_settings_t ConnectionImpl::settings_{ - [](parser_t* parser) -> int { - static_cast(parser->data)->onMessageBeginBase(); - return 0; - }, - [](parser_t* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onUrl(at, length); - return 0; - }, - nullptr, // on_status - [](parser_t* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onHeaderField(at, length); - return 0; - }, - [](parser_t* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onHeaderValue(at, length); - return 0; - }, - [](parser_t* parser) -> int { - return static_cast(parser->data)->onHeadersCompleteBase(); - }, - [](parser_t* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onBody(at, length); - return 0; - }, - [](parser_t* parser) -> int { - return static_cast(parser->data)->onMessageCompleteBase(); - }, - nullptr, // on_chunk_header - nullptr // on_chunk_complete -}; - const ToLowerTable& ConnectionImpl::toLowerTable() { static auto* table = new ToLowerTable(); return *table; } ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, - parser_type_t type, uint32_t max_headers_kb, + MessageType, uint32_t max_headers_kb, const uint32_t max_headers_count, HeaderKeyFormatterPtr&& header_key_formatter) : connection_(connection), stats_{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(stats, "http1."))}, @@ -361,9 +328,8 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& st max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count), strict_header_validation_( Runtime::runtimeFeatureEnabled("envoy.reloadable_features.strict_header_validation")) { + // parser_ = ParserFactory::create(type, this); output_buffer_.setWatermarks(connection.bufferLimit()); - parser_init(&parser_, type, &settings_); - parser_.data = this; } void ConnectionImpl::completeLastHeader() { @@ -398,7 +364,7 @@ bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { data.getRawSlices(slices.begin(), num_slices); for (const Buffer::RawSlice& slice : slices) { total_parsed += slice.len_; - onBody(static_cast(slice.mem_), slice.len_); + processBody(slice); } ENVOY_CONN_LOG(trace, "direct-dispatched {} bytes", connection_, total_parsed); data.drain(total_parsed); @@ -413,7 +379,7 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { } // Always unpause before dispatch. - parser_resume(&parser_); + parser_->resume(); ssize_t total_parsed = 0; if (data.length() > 0) { @@ -436,21 +402,22 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { } size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { - const size_t bytes_read = parser_execute(&parser_, &settings_, slice, len); + ASSERT(parser_ != nullptr); + const size_t bytes_read = parser_->execute(slice, len); - if (parser_get_errno(&parser_) != HPE_OK && parser_get_errno(&parser_) != HPE_PAUSED) { + if (parser_->getErrno() != static_cast(ParserStatus::Ok) && parser_->getErrno() != static_cast(ParserStatus::Paused)) { sendProtocolError(); throw CodecProtocolException("http/1.1 protocol error: " + - std::string(parser_errno_name(parser_get_errno(&parser_)))); + std::string(parser_->errnoName())); } return bytes_read; } -void ConnectionImpl::onHeaderField(const char* data, size_t length) { +int ConnectionImpl::onHeaderFieldBase(const char* data, size_t length) { if (header_parsing_state_ == HeaderParsingState::Done) { // Ignore trailers. - return; + return 0; } if (header_parsing_state_ == HeaderParsingState::Value) { @@ -458,15 +425,15 @@ void ConnectionImpl::onHeaderField(const char* data, size_t length) { } current_header_field_.append(data, length); + return 0; } -void ConnectionImpl::onHeaderValue(const char* data, size_t length) { +int ConnectionImpl::onHeaderValueBase(const char* data, size_t length) { if (header_parsing_state_ == HeaderParsingState::Done) { // Ignore trailers. - return; + return 0; } -#ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER const absl::string_view header_value = absl::string_view(data, length); if (strict_header_validation_) { if (!Http::HeaderUtility::headerIsValid(header_value)) { @@ -475,14 +442,13 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { sendProtocolError(); throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); } - } else if (header_value.find('\0') != absl::string_view::npos) { + } else if (ParserFactory::usesLegacyParser() && header_value.find('\0') != absl::string_view::npos) { // http-parser should filter for this // (https://tools.ietf.org/html/rfc7230#section-3.2.6), but it doesn't today. HeaderStrings // have an invariant that they must not contain embedded zero characters // (NUL, ASCII 0x0). throw CodecProtocolException("http/1.1 protocol error: header value contains NUL"); } -#endif header_parsing_state_ = HeaderParsingState::Value; current_header_value_.append(data, length); @@ -497,16 +463,18 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { sendProtocolError(); throw CodecProtocolException("headers size exceeds limit"); } + + return 0; } -int ConnectionImpl::onHeadersCompleteBase() { +void ConnectionImpl::onHeadersCompleteBase() { ENVOY_CONN_LOG(trace, "headers complete", connection_); completeLastHeader(); // Validate that the completed HeaderMap's cached byte size exists and is correct. // This assert iterates over the HeaderMap. ASSERT(current_header_map_->byteSize().has_value() && current_header_map_->byteSize() == current_header_map_->byteSizeInternal()); - if (!(parser_.http_major == 1 && parser_.http_minor == 1)) { + if (!(parser_->httpMajor() == 1 && parser_->httpMinor() == 1)) { // This is not necessarily true, but it's good enough since higher layers only care if this is // HTTP/1.1 or not. protocol_ = Protocol::Http10; @@ -537,13 +505,6 @@ int ConnectionImpl::onHeadersCompleteBase() { } seen_content_length_ = current_header_map_->ContentLength() != nullptr; - - const int rc = onHeadersComplete(std::move(current_header_map_)); - current_header_map_.reset(); - header_parsing_state_ = HeaderParsingState::Done; - - // Returning 2 informs llhttp to not expect a body or further data on this connection. - return handling_upgrade_ ? 2 : rc; } int ConnectionImpl::onMessageCompleteBase() { @@ -553,14 +514,10 @@ int ConnectionImpl::onMessageCompleteBase() { // upgrade payload will be treated as stream body. ASSERT(!deferred_end_stream_headers_); ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); -#ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER - http_parser_pause(&parser_, 1); - return HPE_OK; -#else - return HPE_PAUSED; -#endif + return parser_->pause(); } - return onMessageComplete(); + + return 0; } void ConnectionImpl::onMessageBeginBase() { @@ -572,7 +529,6 @@ void ConnectionImpl::onMessageBeginBase() { ASSERT(!current_header_map_); current_header_map_ = std::make_unique(); header_parsing_state_ = HeaderParsingState::Field; - onMessageBegin(); } void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { @@ -585,9 +541,11 @@ ServerConnectionImpl::ServerConnectionImpl(Network::Connection& connection, Stat ServerConnectionCallbacks& callbacks, Http1Settings settings, uint32_t max_request_headers_kb, const uint32_t max_request_headers_count) - : ConnectionImpl(connection, stats, HTTP_REQUEST, max_request_headers_kb, + : ConnectionImpl(connection, stats, MessageType::Request, max_request_headers_kb, max_request_headers_count, formatter(settings)), - callbacks_(callbacks), codec_settings_(settings) {} + callbacks_(callbacks), codec_settings_(settings) { + parser_ = ParserFactory::create(MessageType::Request, this); +} void ServerConnectionImpl::onEncodeComplete() { ASSERT(active_request_); @@ -602,12 +560,12 @@ void ServerConnectionImpl::onEncodeComplete() { void ServerConnectionImpl::handlePath(HeaderMapImpl& headers, unsigned int method) { HeaderString path(Headers::get().Path); - bool is_connect = (method == HTTP_CONNECT); + bool is_connect = (method == static_cast(Method::Connect)); // The url is relative or a wildcard when the method is OPTIONS. Nothing to do here. if (!active_request_->request_url_.getStringView().empty() && (active_request_->request_url_.getStringView()[0] == '/' || - ((method == HTTP_OPTIONS) && active_request_->request_url_.getStringView()[0] == '*'))) { + ((method == static_cast(Method::Options)) && active_request_->request_url_.getStringView()[0] == '*'))) { headers.addViaMove(std::move(path), std::move(active_request_->request_url_)); return; } @@ -642,23 +600,26 @@ void ServerConnectionImpl::handlePath(HeaderMapImpl& headers, unsigned int metho active_request_->request_url_.clear(); } -int ServerConnectionImpl::onHeadersComplete(HeaderMapImplPtr&& headers) { +int ServerConnectionImpl::onHeadersComplete() { + onHeadersCompleteBase(); + + int rc = 0; // Handle the case where response happens prior to request complete. It's up to upper layer code // to disconnect the connection but we shouldn't fire any more events since it doesn't make // sense. if (active_request_) { - const char* method_string = parser_method_name(static_cast(parser_.method)); + const char* method_string = parser_->methodName(); // Inform the response encoder about any HEAD method, so it can set content // length and transfer encoding headers correctly. - active_request_->response_encoder_.isResponseToHeadRequest(parser_.method == HTTP_HEAD); + active_request_->response_encoder_.isResponseToHeadRequest(parser_->method() == static_cast(Method::Head)); // Currently, CONNECT is not supported, however; llhttp_parse_url needs to know about // CONNECT - handlePath(*headers, parser_.method); + handlePath(*current_header_map_, parser_->method()); ASSERT(active_request_->request_url_.empty()); - headers->setMethod(method_string); + current_header_map_->setMethod(method_string); // Determine here whether we have a body or not. This uses the new RFC semantics where the // presence of content-length or chunked transfer-encoding indicates a body vs. a particular @@ -666,53 +627,71 @@ int ServerConnectionImpl::onHeadersComplete(HeaderMapImplPtr&& headers) { // with message complete. This allows upper layers to behave like HTTP/2 and prevents a proxy // scenario where the higher layers stream through and implicitly switch to chunked transfer // encoding because end stream with zero body length has not yet been indicated. - if (parser_.flags & F_CHUNKED || - (parser_.content_length > 0 && parser_.content_length != ULLONG_MAX) || handling_upgrade_) { - active_request_->request_decoder_->decodeHeaders(std::move(headers), false); + if (parser_->flags() & static_cast(Flags::Chunked) || + (parser_->contentLength() > 0 && parser_->contentLength() != ULLONG_MAX) || handling_upgrade_) { + active_request_->request_decoder_->decodeHeaders(std::move(current_header_map_), false); // If the connection has been closed (or is closing) after decoding headers, pause the parser // so we return control to the caller. if (connection_.state() != Network::Connection::State::Open) { -#ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER - http_parser_pause(&parser_, 1); - return HPE_OK; -#else - return HPE_PAUSED; -#endif + rc = parser_->pause(); } } else { - deferred_end_stream_headers_ = std::move(headers); + deferred_end_stream_headers_ = std::move(current_header_map_); } } - return HPE_OK; + // TODO(dereka) share below with ClientConnectionImpl + current_header_map_.reset(); + header_parsing_state_ = HeaderParsingState::Done; + + // Returning 2 informs llhttp to not expect a body or further data on this connection. + return handling_upgrade_ ? 2 : rc; } -void ServerConnectionImpl::onMessageBegin() { +int ServerConnectionImpl::onMessageBegin() { + onMessageBeginBase(); + if (!resetStreamCalled()) { ASSERT(!active_request_); active_request_ = std::make_unique(*this, header_key_formatter_.get()); active_request_->request_decoder_ = &callbacks_.newStream(active_request_->response_encoder_); } + + return 0; } -void ServerConnectionImpl::onUrl(const char* data, size_t length) { +int ServerConnectionImpl::onUrl(const char* data, size_t length) { if (active_request_) { active_request_->request_url_.append(data, length); } + + return 0; +} + +int ServerConnectionImpl::onHeaderField(const char* data, size_t length) { + return onHeaderFieldBase(data, length); } -void ServerConnectionImpl::onBody(const char* data, size_t length) { +int ServerConnectionImpl::onHeaderValue(const char* data, size_t length) { + return onHeaderValueBase(data, length); +} + +int ServerConnectionImpl::onBody(const char* data, size_t length) { ASSERT(!deferred_end_stream_headers_); if (active_request_) { ENVOY_CONN_LOG(trace, "body size={}", connection_, length); Buffer::OwnedImpl buffer(data, length); active_request_->request_decoder_->decodeData(buffer, false); } + + return 0; } int ServerConnectionImpl::onMessageComplete() { + onMessageCompleteBase(); + if (active_request_) { Buffer::OwnedImpl buffer; active_request_->remote_complete_ = true; @@ -729,12 +708,7 @@ int ServerConnectionImpl::onMessageComplete() { // Always pause the parser so that the calling code can process 1 request at a time and apply // back pressure. However this means that the calling code needs to detect if there is more data // in the buffer and dispatch it again. -#ifndef ENVOY_ENABLE_LEGACY_HTTP_PARSER - return HPE_PAUSED; -#else - http_parser_pause(&parser_, 1); - return 0; -#endif + return parser_->pause(); } void ServerConnectionImpl::onResetStream(StreamResetReason reason) { @@ -768,16 +742,22 @@ void ServerConnectionImpl::onBelowLowWatermark() { } } +void ServerConnectionImpl::processBody(const Buffer::RawSlice& slice) { + onBody(static_cast(slice.mem_), slice.len_); +} + ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, Stats::Scope& stats, ConnectionCallbacks&, const Http1Settings& settings, const uint32_t max_response_headers_count) - : ConnectionImpl(connection, stats, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB, - max_response_headers_count, formatter(settings)) {} + : ConnectionImpl(connection, stats, MessageType::Response, MAX_RESPONSE_HEADERS_KB, + max_response_headers_count, formatter(settings)) { + parser_ = ParserFactory::create(MessageType::Response, this); +} bool ClientConnectionImpl::cannotHaveBody() { if ((!pending_responses_.empty() && pending_responses_.front().head_request_) || - parser_.status_code == 204 || parser_.status_code == 304 || - (parser_.status_code >= 200 && (seen_content_length_ && parser_.content_length == 0))) { + parser_->statusCode() == 204 || parser_->statusCode() == 304 || + (parser_->statusCode() >= 200 && (seen_content_length_ && parser_->contentLength() == 0))) { return true; } else { return false; @@ -804,46 +784,71 @@ void ClientConnectionImpl::onEncodeHeaders(const HeaderMap& headers) { } } -int ClientConnectionImpl::onHeadersComplete(HeaderMapImplPtr&& headers) { - headers->setStatus(parser_.status_code); +int ClientConnectionImpl::onMessageBegin() { + onMessageBeginBase(); + return 0; +} + +int ClientConnectionImpl::onHeaderField(const char* data, size_t length) { + return onHeaderFieldBase(data, length); +} + +int ClientConnectionImpl::onHeaderValue(const char* data, size_t length) { + return onHeaderValueBase(data, length); +} + +int ClientConnectionImpl::onHeadersComplete() { + onHeadersCompleteBase(); + current_header_map_->setStatus(parser_->statusCode()); // Handle the case where the client is closing a kept alive connection (by sending a 408 // with a 'Connection: close' header). In this case we just let response flush out followed // by the remote close. if (pending_responses_.empty() && !resetStreamCalled()) { - throw PrematureResponseException(std::move(headers)); + throw PrematureResponseException(std::move(current_header_map_)); } else if (!pending_responses_.empty()) { - if (parser_.status_code == 100) { + if (parser_->statusCode() == 100) { // http-parser treats 100 continue headers as their own complete response. // Swallow the spurious onMessageComplete and continue processing. ignore_message_complete_for_100_continue_ = true; - pending_responses_.front().decoder_->decode100ContinueHeaders(std::move(headers)); + pending_responses_.front().decoder_->decode100ContinueHeaders(std::move(current_header_map_)); } else if (cannotHaveBody()) { - deferred_end_stream_headers_ = std::move(headers); + deferred_end_stream_headers_ = std::move(current_header_map_); } else { - pending_responses_.front().decoder_->decodeHeaders(std::move(headers), false); + pending_responses_.front().decoder_->decodeHeaders(std::move(current_header_map_), false); } } // Here we deal with cases where the response cannot have a body, but llhttp does not deal // with it for us. - return cannotHaveBody() ? 1 : 0; + const int rc = cannotHaveBody() ? 1 : 0; + + // TODO(dereka) share below with ServerConnectionImpl + current_header_map_.reset(); + header_parsing_state_ = HeaderParsingState::Done; + + // Returning 2 informs llhttp to not expect a body or further data on this connection. + return handling_upgrade_ ? 2 : rc; } -void ClientConnectionImpl::onBody(const char* data, size_t length) { +int ClientConnectionImpl::onBody(const char* data, size_t length) { ASSERT(!deferred_end_stream_headers_); if (!pending_responses_.empty()) { Buffer::OwnedImpl buffer; buffer.add(data, length); pending_responses_.front().decoder_->decodeData(buffer, false); } + + return 0; } int ClientConnectionImpl::onMessageComplete() { + onMessageCompleteBase(); + ENVOY_CONN_LOG(trace, "message complete", connection_); if (ignore_message_complete_for_100_continue_) { ignore_message_complete_for_100_continue_ = false; - return HPE_OK; + return 0; } if (!pending_responses_.empty()) { // After calling decodeData() with end stream set to true, we should no longer be able to reset. @@ -871,7 +876,7 @@ int ClientConnectionImpl::onMessageComplete() { } } - return HPE_OK; + return 0; } void ClientConnectionImpl::onResetStream(StreamResetReason reason) { @@ -897,6 +902,10 @@ void ClientConnectionImpl::onBelowLowWatermark() { } } +void ClientConnectionImpl::processBody(const Buffer::RawSlice& slice) { + onBody(static_cast(slice.mem_), slice.len_); +} + } // namespace Http1 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 7a1893b0d32b..13904c0bc478 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -17,7 +17,8 @@ #include "common/http/codes.h" #include "common/http/header_map_impl.h" #include "common/http/http1/header_formatter.h" -#include "common/http/http1/parser_adapter.h" +#include "common/http/http1/parser.h" +#include "common/http/http1/parser_factory.h" namespace Envoy { namespace Http { @@ -191,8 +192,32 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable parser_; HeaderMapPtr deferred_end_stream_headers_; Http::Code error_code_{Http::Code::BadRequest}; bool handling_upgrade_{}; bool seen_content_length_{false}; const HeaderKeyFormatterPtr header_key_formatter_; + HeaderMapImplPtr current_header_map_; + HeaderParsingState header_parsing_state_{HeaderParsingState::Field}; + private: - enum class HeaderParsingState { Field, Value, Done }; /** * Called in order to complete an in progress header decode. @@ -222,54 +249,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable active_request_; @@ -367,7 +357,7 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { /** * Implementation of Http::ClientConnection for HTTP/1.1. */ -class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { +class ClientConnectionImpl : public ClientConnection, public ConnectionImpl, public ParserCallbacks { public: ClientConnectionImpl(Network::Connection& connection, Stats::Scope& stats, ConnectionCallbacks& callbacks, const Http1Settings& settings, @@ -376,6 +366,18 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // Http::ClientConnection StreamEncoder& newStream(StreamDecoder& response_decoder) override; + // ParserCallbacks + int onMessageBegin() override; + int onUrl(const char*, size_t) override { return 0; } + int onStatus() override { return 0; } + int onHeaderField(const char* data, size_t length) override; + int onHeaderValue(const char* data, size_t length) override; + int onHeadersComplete() override; + int onBody(const char* data, size_t length) override; + int onMessageComplete() override; + int onChunkHeader() override { return 0; } + int onChunkComplete() override { return 0; } + private: struct PendingResponse { PendingResponse(StreamDecoder* decoder) : decoder_(decoder) {} @@ -389,15 +391,11 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // ConnectionImpl void onEncodeComplete() override {} void onEncodeHeaders(const HeaderMap& headers) override; - void onMessageBegin() override {} - void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - int onHeadersComplete(HeaderMapImplPtr&& headers) override; - void onBody(const char* data, size_t length) override; - int onMessageComplete() override; void onResetStream(StreamResetReason reason) override; void sendProtocolError() override {} void onAboveHighWatermark() override; void onBelowLowWatermark() override; + void processBody(const Buffer::RawSlice& slice) override; std::unique_ptr request_encoder_; std::list pending_responses_; diff --git a/source/common/http/http1/legacy_http_parser.cc b/source/common/http/http1/legacy_http_parser.cc new file mode 100644 index 000000000000..61fd9dfa792a --- /dev/null +++ b/source/common/http/http1/legacy_http_parser.cc @@ -0,0 +1,176 @@ +#include "common/http/http1/legacy_http_parser.h" + +#include + +#include + +#include "common/common/assert.h" +#include "common/http/http1/parser.h" + +namespace Envoy { +namespace Http { +namespace Http1 { + +class LegacyHttpParserImpl::Impl { +public: + // Possible idea: have an overload that doesn't accept `data` which appears + // to just be used for callbacks? If no data, then leave settings as nullptrs? + // https://github.com/nodejs/http-parser/blob/master/http_parser.h#L320 + + // so far unused + Impl(http_parser_type type) { + http_parser_init(&parser_, type); + } + + Impl(http_parser_type type, void* data) : Impl(type) { + parser_.data = data; + settings_ = { + [](http_parser* parser) -> int { + std::cout << "message begin callback" << std::endl; + return static_cast(parser->data)->onMessageBegin(); + }, + [](http_parser* parser, const char* at, size_t length) -> int { + return static_cast(parser->data)->onUrl(at, length); + }, + // TODO(dereka) onStatus + nullptr, + [](http_parser* parser, const char* at, size_t length) -> int { + return static_cast(parser->data)->onHeaderField(at, length); + }, + [](http_parser* parser, const char* at, size_t length) -> int { + return static_cast(parser->data)->onHeaderValue(at, length); + }, + [](http_parser* parser) -> int { + return static_cast(parser->data)->onHeadersComplete(); + }, + [](http_parser* parser, const char* at, size_t length) -> int { + return static_cast(parser->data)->onBody(at, length); + }, + [](http_parser* parser) -> int { + return static_cast(parser->data)->onMessageComplete(); + }, + nullptr, // TODO(dereka) onChunkHeader + nullptr // TODO(dereka) onChunkComplete + }; + } + + size_t execute(const char* slice, int len) { + return http_parser_execute(&parser_, &settings_, slice, len); + } + + void resume() { + http_parser_pause(&parser_, 0); + } + + int pause() { + http_parser_pause(&parser_, 1); + return HPE_PAUSED; + } + + int getErrno() { + return HTTP_PARSER_ERRNO(&parser_); + } + + int statusCode() const { + return parser_.status_code; + } + + int httpMajor() const { + return parser_.http_major; + } + + int httpMinor() const { + return parser_.http_minor; + } + + uint64_t contentLength() const { + return parser_.content_length; + } + + int flags() const { + return parser_.flags; + } + + uint16_t method() const { + return parser_.method; + } + + const char* methodName() const { + return http_method_str(static_cast(parser_.method)); + } + +private: + http_parser parser_; + http_parser_settings settings_; +}; + +LegacyHttpParserImpl::LegacyHttpParserImpl(MessageType type, void* data) { + http_parser_type parser_type; + switch (type) { + case MessageType::Request: + parser_type = HTTP_REQUEST; + break; + case MessageType::Response: + parser_type = HTTP_RESPONSE; + default: + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + + impl_ = std::make_unique(parser_type, data); +} + +// Because we have a pointer-to-impl using std::unique_ptr, we must place the destructor in the +// same compilation unit so that the destructor has a complete definition of Impl. +LegacyHttpParserImpl::~LegacyHttpParserImpl() = default; + +int LegacyHttpParserImpl::execute(const char* slice, int len) { + return impl_->execute(slice, len); +} + +void LegacyHttpParserImpl::resume() { + impl_->resume(); +} + +int LegacyHttpParserImpl::pause() { + return impl_->pause(); +} + +int LegacyHttpParserImpl::getErrno() { + return impl_->getErrno(); +} + +int LegacyHttpParserImpl::statusCode() const { + return impl_->statusCode(); +} + +int LegacyHttpParserImpl::httpMajor() const { + return impl_->httpMajor(); +} + +int LegacyHttpParserImpl::httpMinor() const { + return impl_->httpMinor(); +} + +uint64_t LegacyHttpParserImpl::contentLength() const { + return impl_->contentLength(); +} + +int LegacyHttpParserImpl::flags() const { + return impl_->flags(); +} + +uint16_t LegacyHttpParserImpl::method() const { + return impl_->method(); +} + +const char* LegacyHttpParserImpl::methodName() const { + return impl_->methodName(); +} + +const char* LegacyHttpParserImpl::errnoName() { + return http_errno_name(static_cast(impl_->getErrno())); +} + +} // namespace Http1 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/legacy_http_parser.h b/source/common/http/http1/legacy_http_parser.h new file mode 100644 index 000000000000..a39ad2276dd1 --- /dev/null +++ b/source/common/http/http1/legacy_http_parser.h @@ -0,0 +1,36 @@ +#pragma once + +#include "common/http/http1/parser.h" + +#include + +namespace Envoy { +namespace Http { +namespace Http1 { + +class LegacyHttpParserImpl : public Parser { +public: + LegacyHttpParserImpl(MessageType type, void* data); + ~LegacyHttpParserImpl(); + int execute(const char* data, int len) override; + void resume() override; + int pause() override; + int getErrno() override; + int statusCode() const override; + int httpMajor() const override; + int httpMinor() const override; + uint64_t contentLength() const override; + int flags() const override; + uint16_t method() const override; + const char* methodName() const override; + const char* errnoName() override; + bool usesOldImpl() const override { return true; } + +private: + class Impl; + std::unique_ptr impl_; +}; + +} // namespace Http1 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/llhttp_parser.cc b/source/common/http/http1/llhttp_parser.cc new file mode 100644 index 000000000000..c83753fc7df7 --- /dev/null +++ b/source/common/http/http1/llhttp_parser.cc @@ -0,0 +1,150 @@ +#include "common/http/http1/llhttp_parser.h" + +#include "common/common/assert.h" +#include "common/http/http1/parser.h" + +#include + +namespace Envoy { +namespace Http { +namespace Http1 { + +class LlHttpParserImpl::Impl { +public: + Impl(llhttp_type_t type, void* data) { + llhttp_init(&parser_, type, &settings_); + parser_.data = data; + } + + size_t execute(const char* slice, int len) { + llhttp_errno_t err; + if (slice == nullptr || len == 0) { + err = llhttp_finish(&parser_); + } else { + err = llhttp_execute(&parser_, slice, len); + } + + size_t nread = len; + if (err != HPE_OK) { + nread = llhttp_get_error_pos(&parser_) - slice; + if (err == HPE_PAUSED_UPGRADE) { + err = HPE_OK; + llhttp_resume_after_upgrade(&parser_); + } + } + + return nread; + } + + void resume() { + llhttp_resume(&parser_); + } + + int getErrno() { + return llhttp_get_errno(&parser_); + } + + int statusCode() const { + return parser_.status_code; + } + + int httpMajor() const { + return parser_.http_major; + } + + int httpMinor() const { + return parser_.http_minor; + } + + uint64_t contentLength() const { + return parser_.content_length; + } + + int flags() const { + return parser_.flags; + } + + uint16_t method() const { + return parser_.method; + } + + const char* methodName() const { + return llhttp_method_name(static_cast(parser_.method)); + } + +private: + llhttp_t parser_; + llhttp_settings_s settings_; +}; + +LlHttpParserImpl::LlHttpParserImpl(MessageType type, void* data) { + llhttp_type_t llhttp_type; + switch (type) { + case MessageType::Request: + llhttp_type = HTTP_REQUEST; + break; + case MessageType::Response: + llhttp_type = HTTP_RESPONSE; + break; + default: + // We strictly use the parser for either request or response, not both. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + + impl_ = std::make_unique(llhttp_type, data); +} + +LlHttpParserImpl::~LlHttpParserImpl() = default; + +int LlHttpParserImpl::execute(const char* slice, int len) { + return impl_->execute(slice, len); +} + +void LlHttpParserImpl::resume() { + impl_->resume(); +} + +int LlHttpParserImpl::pause() { + // TODO(dereka) do we actually need to call llhttp_pause(&parser_); ? + return HPE_PAUSED; +} + +int LlHttpParserImpl::getErrno() { + return impl_->getErrno(); +} + +int LlHttpParserImpl::statusCode() const { + return impl_->statusCode(); +} + +int LlHttpParserImpl::httpMajor() const { + return impl_->httpMajor(); +} + +int LlHttpParserImpl::httpMinor() const { + return impl_->httpMinor(); +} + +uint64_t LlHttpParserImpl::contentLength() const { + return impl_->contentLength(); +} + +int LlHttpParserImpl::flags() const { + return impl_->flags(); +} + +uint16_t LlHttpParserImpl::method() const { + return impl_->method(); +} + +const char* LlHttpParserImpl::methodName() const { + return impl_->methodName(); +} + +const char* LlHttpParserImpl::errnoName() { + return llhttp_errno_name(static_cast(impl_->getErrno())); +} + +} // namespace Http1 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/llhttp_parser.h b/source/common/http/http1/llhttp_parser.h new file mode 100644 index 000000000000..a15626222cd6 --- /dev/null +++ b/source/common/http/http1/llhttp_parser.h @@ -0,0 +1,37 @@ +#pragma once + +#include "common/http/http1/parser.h" + +#include + +namespace Envoy { +namespace Http { +namespace Http1 { + +class LlHttpParserImpl : public Parser { +public: + LlHttpParserImpl(MessageType type, void* data); + ~LlHttpParserImpl(); + int execute(const char* data, int len) override; + void resume() override; + int pause() override; + int getErrno() override; + int statusCode() const override; + int httpMajor() const override; + int httpMinor() const override; + uint64_t contentLength() const override; + int flags() const override; + uint16_t method() const override; + const char* methodName() const override; + const char* errnoName() override; + bool usesOldImpl() const override { return false; } + +private: + // PImpl pattern is used to contain the llhttp library to avoid global namespace collisions. + class Impl; + std::unique_ptr impl_; +}; + +} // namespace Http1 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/parser.h b/source/common/http/http1/parser.h new file mode 100644 index 000000000000..04ba0edbbd7f --- /dev/null +++ b/source/common/http/http1/parser.h @@ -0,0 +1,124 @@ +#pragma once + +#include + +#include "envoy/common/pure.h" + +/** + * This is a temporary shim to easily allow switching between llhttp and http-parser at compile + * time by providing a consistent interface, then adapting them to the respective implementations. + * + * When http-parser is ready to be removed, this shim should also disappear and the llhttp_* methods + * moved into the codec implementation. + */ + +namespace Envoy { +namespace Http { +namespace Http1 { + +enum class ParserType { + Legacy, + LlHttp +}; + +enum class MessageType { + Request, + Response +}; + +/** + * Callbacks base class. Any users of the HTTP1 parser who want to use the parsed data needs to implement + * this interface and pass the object to the `data` parameter of ParserFactory::create. + */ +class ParserCallbacks { +public: + virtual ~ParserCallbacks() = default; + /** + * Called when a request/response is beginning. + */ + virtual int onMessageBegin() PURE; + + /** + * Called when URL data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + virtual int onUrl(const char* data, size_t length) PURE; + virtual int onStatus() PURE; + + /** + * Called when header field data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + virtual int onHeaderField(const char* data, size_t length) PURE; + + /** + * Called when header value data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + virtual int onHeaderValue(const char* data, size_t length) PURE; + + /** + * Called when headers are complete. + * @return 0 if no error, 1 if there should be no body. + */ + virtual int onHeadersComplete() PURE; + + /** + * Called when body data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + virtual int onBody(const char* data, size_t length) PURE; + + /** + * Called when the request/response is complete. + */ + virtual int onMessageComplete() PURE; + virtual int onChunkHeader() PURE; // shrug + virtual int onChunkComplete() PURE; // shrug +}; + +/** + * Parser interface. + */ +class Parser { +public: + virtual ~Parser() = default; + virtual int execute(const char* slice, int len) PURE; + virtual void resume() PURE; + virtual int pause() PURE; + virtual int getErrno() PURE; + virtual int statusCode() const PURE; + virtual int httpMajor() const PURE; + virtual int httpMinor() const PURE; + virtual uint64_t contentLength() const PURE; + virtual int flags() const PURE; + virtual uint16_t method() const PURE; + virtual const char* methodName() const PURE; + virtual const char* errnoName() PURE; + virtual bool usesOldImpl() const PURE; +}; + +enum class Flags { + Chunked = 1, +}; + +enum class ParserStatus { + Ok = 0, + Paused = 31, +}; + +enum class Method { + Head = 2, + Connect = 5, + Options = 6, +}; + +using ParserPtr = std::unique_ptr; + +} // namespace Http1 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/parser_adapter.h b/source/common/http/http1/parser_adapter.h deleted file mode 100644 index 8b617157804e..000000000000 --- a/source/common/http/http1/parser_adapter.h +++ /dev/null @@ -1,82 +0,0 @@ -#pragma once - -#ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER -#include -#else -#include -#endif /* ENVOY_ENABLE_LEGACY_HTTP_PARSER */ - -/** - * This is a temporary shim to easily allow switching between llhttp and http-parser at compile - * time by providing a consistent interface, then adapting them to the respective implementations. - * - * When http-parser is ready to be removed, this shim should also disappear and the llhttp_* methods - * moved into the codec implementation. - */ - -namespace Envoy { -namespace Http { -namespace Http1 { - -#ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER - -using parser_type_t = enum http_parser_type; // NOLINT(readability-identifier-naming) -using parser_errno_t = enum http_errno; // NOLINT(readability-identifier-naming) -using parser_settings_t = http_parser_settings; // NOLINT(readability-identifier-naming) -using parser_t = http_parser; // NOLINT(readability-identifier-naming) -using parser_method = http_method; // NOLINT(readability-identifier-naming) - -inline void parser_init(parser_t* parser, parser_type_t parser_type, parser_settings_t*) { - http_parser_init(parser, parser_type); -} -const auto parser_execute = http_parser_execute; - -inline void parser_resume(parser_t* parser) { - http_parser_pause(parser, 0); -} - -inline parser_errno_t parser_get_errno(parser_t* parser) { - return HTTP_PARSER_ERRNO(parser); -} - -const auto parser_errno_name = http_errno_name; -const auto parser_method_name = http_method_str; - -#else - -using parser_type_t = llhttp_type_t; // NOLINT(readability-identifier-naming) -using parser_errno_t = llhttp_errno_t; // NOLINT(readability-identifier-naming) -using parser_settings_t = llhttp_settings_s; // NOLINT(readability-identifier-naming) -using parser_t = llhttp_t; // NOLINT(readability-identifier-naming) -using parser_method = llhttp_method; // NOLINT(readability-identifier-naming) - -const auto parser_init = llhttp_init; -inline size_t parser_execute(parser_t* parser, parser_settings_t*, const char* slice, int len) { - parser_errno_t err; - if (slice == nullptr || len == 0) { - err = llhttp_finish(parser); - } else { - err = llhttp_execute(parser, slice, len); - } - - size_t nread = len; - if (err != HPE_OK) { - nread = llhttp_get_error_pos(parser) - slice; - if (err == HPE_PAUSED_UPGRADE) { - err = HPE_OK; - llhttp_resume_after_upgrade(parser); - } - } - - return nread; -} -const auto parser_resume = llhttp_resume; -const auto parser_get_errno = llhttp_get_errno; -const auto parser_errno_name = llhttp_errno_name; -const auto parser_method_name = llhttp_method_name; - -#endif /* ENVOY_ENABLE_LEGACY_HTTP_PARSER */ - -} // namespace Http1 -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http1/parser_factory.cc b/source/common/http/http1/parser_factory.cc new file mode 100644 index 000000000000..d2be218239ce --- /dev/null +++ b/source/common/http/http1/parser_factory.cc @@ -0,0 +1,32 @@ +#include "common/http/http1/parser_factory.h" + +#include + +#include "common/http/http1/llhttp_parser.h" +#include "common/http/http1/legacy_http_parser.h" + +namespace Envoy { +namespace Http { +namespace Http1 { + +bool ParserFactory::use_legacy_parser_ = false; + +ParserPtr ParserFactory::create(MessageType type, void* data) { + if (usesLegacyParser()) { + return std::make_unique(type, data); + } + + return std::make_unique(type, data); +} + +bool ParserFactory::usesLegacyParser() { + return use_legacy_parser_; +} + +void ParserFactory::useLegacy(bool use_legacy_parser) { + use_legacy_parser_ = use_legacy_parser; +} + +} // namespace Http1 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/parser_factory.h b/source/common/http/http1/parser_factory.h new file mode 100644 index 000000000000..3a116368910c --- /dev/null +++ b/source/common/http/http1/parser_factory.h @@ -0,0 +1,25 @@ +#pragma once + +#include "common/http/http1/parser.h" + +namespace Envoy { +namespace Http { +namespace Http1 { + +class ParserFactory { +public: + /** + * Creates a new parser implementation. + */ + static ParserPtr create(MessageType type, void* data); + + static bool usesLegacyParser(); + + static void useLegacy(bool use_legacy_parser); +private: + static bool use_legacy_parser_; +}; + +} // namespace Http1 +} // namespace Http +} // namespace Envoy diff --git a/source/server/BUILD b/source/server/BUILD index eb35de629aa3..f97a93739645 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -410,6 +410,7 @@ envoy_cc_library( "//source/common/grpc:context_lib", "//source/common/http:codes_lib", "//source/common/http:context_lib", + "//source/common/http/http1:parser_factory_lib", "//source/common/init:manager_lib", "//source/common/local_info:local_info_lib", "//source/common/memory:heap_shrinker_lib", diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index 59aceafddd61..cd5fd9bb0d3e 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -116,6 +116,7 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, TCLAP::ValueArg use_fake_symbol_table("", "use-fake-symbol-table", "Use fake symbol table implementation", false, true, "bool", cmd); + TCLAP::ValueArg use_legacy_http_parser("", "use-legacy-http-parser", "Use the legacy Node HTTP parser implementation", false, true, "bool", cmd); cmd.setExceptionHandling(false); try { cmd.parse(argc, argv); @@ -139,6 +140,7 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, mutex_tracing_enabled_ = enable_mutex_tracing.getValue(); fake_symbol_table_enabled_ = use_fake_symbol_table.getValue(); + legacy_http_parser_enabled_ = use_legacy_http_parser.getValue(); cpuset_threads_ = cpuset_threads.getValue(); log_level_ = default_log_level; @@ -306,6 +308,6 @@ OptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& service_zone_(service_zone), file_flush_interval_msec_(10000), drain_time_(600), parent_shutdown_time_(900), mode_(Server::Mode::Serve), hot_restart_disabled_(false), signal_handling_enabled_(true), mutex_tracing_enabled_(false), cpuset_threads_(false), - fake_symbol_table_enabled_(false) {} + fake_symbol_table_enabled_(false), legacy_http_parser_enabled_(false) {} } // namespace Envoy diff --git a/source/server/options_impl.h b/source/server/options_impl.h index 2d635fd91b9c..4d34142ad0fd 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -121,6 +121,7 @@ class OptionsImpl : public Server::Options, protected Logger::LoggableregisteredNames(), ", ")); } + Http::Http1::ParserFactory::useLegacy(options.legacyHttpParserEnabled()); + ENVOY_LOG(info, "http implementation: {}", Http::Http1::ParserFactory::usesLegacyParser() ? "old (http-parser" : "new (llhttp)"); + // Handle configuration that needs to take place prior to the main configuration load. InstanceUtil::loadBootstrapConfig(bootstrap_, options, messageValidationContext().staticValidationVisitor(), *api_); diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index e117fa725623..2b5e86bacecf 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -40,6 +40,8 @@ envoy_cc_test( ], ) +envoy_cc_test + envoy_cc_test( name = "conn_pool_test", srcs = ["conn_pool_test.cc"], diff --git a/test/common/http/http1/codec_fuzz_test.cc b/test/common/http/http1/codec_fuzz_test.cc new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 8b70e7fa3063..94fe83acd16d 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -42,8 +42,34 @@ std::string createHeaderFragment(int num_headers) { } } // namespace -class Http1ServerConnectionImplTest : public testing::Test { +enum class CodecImplementation { + Old, // original node http-parser + New // llhttp +}; + +class Http1CodecParamTest : public testing::TestWithParam { +protected: + Http1CodecParamTest() { + ParserFactory::useLegacy(GetParam() == CodecImplementation::Old); + } + ~Http1CodecParamTest() override = default; + + /** Verify that a parser has been constructed using the expected implementation. */ + void verifyImplementation(const Parser& parser) { + switch (GetParam()) { + case CodecImplementation::Old: + ASSERT_TRUE(parser.usesOldImpl()); + break; + case CodecImplementation::New: + ASSERT_FALSE(parser.usesOldImpl()); + break; + } + } +}; + +class Http1ServerConnectionImplTest : public Http1CodecParamTest { public: + ~Http1ServerConnectionImplTest() override = default; void initialize() { codec_ = std::make_unique(connection_, store_, callbacks_, codec_settings_, @@ -146,7 +172,10 @@ void Http1ServerConnectionImplTest::testRequestHeadersAccepted(std::string heade codec_->dispatch(buffer); } -TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { +INSTANTIATE_TEST_SUITE_P(Http1ServerConnectionImplTest, Http1ServerConnectionImplTest, + testing::ValuesIn({CodecImplementation::Old, CodecImplementation::New})); + +TEST_P(Http1ServerConnectionImplTest, EmptyHeader) { initialize(); InSequence sequence; @@ -167,7 +196,7 @@ TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, Http10) { +TEST_P(Http1ServerConnectionImplTest, Http10) { initialize(); InSequence sequence; @@ -184,7 +213,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10) { EXPECT_EQ(Protocol::Http10, codec_->protocol()); } -TEST_F(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { +TEST_P(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { initialize(); TestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; @@ -192,7 +221,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http10Absolute) { +TEST_P(Http1ServerConnectionImplTest, Http10Absolute) { initialize(); TestHeaderMapImpl expected_headers{ @@ -201,7 +230,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10Absolute) { expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { +TEST_P(Http1ServerConnectionImplTest, Http10MultipleResponses) { initialize(); Http::MockStreamDecoder decoder; @@ -245,7 +274,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { } } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath1) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath1) { initialize(); TestHeaderMapImpl expected_headers{ @@ -254,7 +283,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath1) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath2) { initialize(); TestHeaderMapImpl expected_headers{ @@ -263,7 +292,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { initialize(); TestHeaderMapImpl expected_headers{ @@ -273,7 +302,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { initialize(); TestHeaderMapImpl expected_headers{ @@ -282,7 +311,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { +TEST_P(Http1ServerConnectionImplTest, Http11InvalidRequest) { initialize(); // Invalid because www.somewhere.com is not an absolute path nor an absolute url @@ -290,7 +319,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { expect400(Protocol::Http11, true, buffer); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { initialize(); TestHeaderMapImpl expected_headers{ @@ -299,21 +328,21 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { initialize(); Buffer::OwnedImpl buffer("GET * HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) { initialize(); Buffer::OwnedImpl buffer("GET http://foobar.com:1000000 HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer); } -TEST_F(Http1ServerConnectionImplTest, Http11RelativeOnly) { +TEST_P(Http1ServerConnectionImplTest, Http11RelativeOnly) { initialize(); TestHeaderMapImpl expected_headers{ @@ -322,7 +351,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11RelativeOnly) { expectHeadersTest(Protocol::Http11, false, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11Options) { +TEST_P(Http1ServerConnectionImplTest, Http11Options) { initialize(); TestHeaderMapImpl expected_headers{ @@ -331,7 +360,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11Options) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, SimpleGet) { +TEST_P(Http1ServerConnectionImplTest, SimpleGet) { initialize(); InSequence sequence; @@ -347,7 +376,7 @@ TEST_F(Http1ServerConnectionImplTest, SimpleGet) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { +TEST_P(Http1ServerConnectionImplTest, BadRequestNoStream) { initialize(); std::string output; @@ -365,7 +394,7 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { +TEST_P(Http1ServerConnectionImplTest, BadRequestStartedStream) { initialize(); std::string output; @@ -382,7 +411,7 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { +TEST_P(Http1ServerConnectionImplTest, HostHeaderTranslation) { initialize(); InSequence sequence; @@ -401,7 +430,7 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { #ifdef ENVOY_ENABLE_LEGACY_HTTP_PARSER // Ensures that requests with invalid HTTP header values are not rejected // when the runtime guard is not enabled for the feature. -TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRuntimeGuard) { +TEST_P(Http1ServerConnectionImplTest, HeaderInvalidCharsRuntimeGuard) { TestScopedRuntime scoped_runtime; // When the runtime-guarded feature is NOT enabled, invalid header values // should be accepted by the codec. @@ -421,7 +450,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRuntimeGuard) { // Ensures that requests with invalid HTTP header values are properly rejected // when the runtime guard is enabled for the feature. -TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { +TEST_P(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { TestScopedRuntime scoped_runtime; // When the runtime-guarded feature is enabled, invalid header values // should result in a rejection. @@ -447,7 +476,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { // Regression test for http-parser allowing embedded NULs in header values, // verify we reject them. -TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { +TEST_P(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.strict_header_validation", "false"}}); @@ -471,7 +500,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { // Mutate an HTTP GET with embedded NULs, this should always be rejected in some // way (not necessarily with "head value contains NUL" though). -TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { +TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (size_t n = 1; n < example_input.size(); ++n) { @@ -492,7 +521,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { // Mutate an HTTP GET with CR or LF. These can cause an exception or maybe // result in a valid decodeHeaders(). In any case, the validHeaderString() // ASSERTs should validate we never have any embedded CR or LF. -TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { +TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (const char c : {'\r', '\n'}) { @@ -514,7 +543,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { } } -TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { +TEST_P(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { initialize(); InSequence sequence; @@ -534,7 +563,7 @@ TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { EXPECT_NE(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { +TEST_P(Http1ServerConnectionImplTest, PostWithContentLength) { initialize(); InSequence sequence; @@ -556,7 +585,7 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponse) { initialize(); NiceMock decoder; @@ -579,7 +608,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -604,7 +633,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { initialize(); NiceMock decoder; @@ -627,7 +656,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { EXPECT_EQ("HTTP/1.1 204 No Content\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { initialize(); NiceMock decoder; @@ -657,7 +686,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, MetadataTest) { +TEST_P(Http1ServerConnectionImplTest, MetadataTest) { initialize(); NiceMock decoder; @@ -679,7 +708,7 @@ TEST_F(Http1ServerConnectionImplTest, MetadataTest) { EXPECT_EQ(1, store_.counter("http1.metadata_not_supported_error").value()); } -TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { +TEST_P(Http1ServerConnectionImplTest, ChunkedResponse) { initialize(); NiceMock decoder; @@ -706,7 +735,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { output); } -TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { +TEST_P(Http1ServerConnectionImplTest, ContentLengthResponse) { initialize(); NiceMock decoder; @@ -732,7 +761,7 @@ TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 11\r\n\r\nHello World", output); } -TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { +TEST_P(Http1ServerConnectionImplTest, HeadRequestResponse) { initialize(); NiceMock decoder; @@ -755,7 +784,7 @@ TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 5\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { +TEST_P(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { initialize(); NiceMock decoder; @@ -778,7 +807,7 @@ TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { +TEST_P(Http1ServerConnectionImplTest, DoubleRequest) { initialize(); NiceMock decoder; @@ -803,7 +832,7 @@ TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, RequestWithTrailers) { +TEST_P(Http1ServerConnectionImplTest, RequestWithTrailers) { initialize(); NiceMock decoder; @@ -820,7 +849,7 @@ TEST_F(Http1ServerConnectionImplTest, RequestWithTrailers) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { initialize(); TestHeaderMapImpl expected_headers{ @@ -831,7 +860,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { initialize(); TestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -844,7 +873,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { initialize(); TestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -857,7 +886,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequest) { initialize(); InSequence sequence; @@ -880,7 +909,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { codec_->dispatch(websocket_payload); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { initialize(); InSequence sequence; @@ -895,7 +924,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { codec_->dispatch(buffer); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { initialize(); InSequence sequence; @@ -912,7 +941,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { codec_->dispatch(buffer); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { initialize(); InSequence sequence; @@ -929,7 +958,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { codec_->dispatch(buffer); } -TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { +TEST_P(Http1ServerConnectionImplTest, WatermarkTest) { EXPECT_CALL(connection_, bufferLimit()).Times(1).WillOnce(Return(10)); initialize(); @@ -963,8 +992,9 @@ TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } -class Http1ClientConnectionImplTest : public testing::Test { +class Http1ClientConnectionImplTest : public Http1CodecParamTest { public: + ~Http1ClientConnectionImplTest() override = default; void initialize() { codec_ = std::make_unique(connection_, store_, callbacks_, codec_settings_, max_response_headers_count_); @@ -980,7 +1010,10 @@ class Http1ClientConnectionImplTest : public testing::Test { uint32_t max_response_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; }; -TEST_F(Http1ClientConnectionImplTest, SimpleGet) { +INSTANTIATE_TEST_SUITE_P(Http1ClientConnectionImplTest, Http1ClientConnectionImplTest, + testing::ValuesIn({CodecImplementation::Old, CodecImplementation::New})); + +TEST_P(Http1ClientConnectionImplTest, SimpleGet) { initialize(); Http::MockStreamDecoder response_decoder; @@ -994,7 +1027,7 @@ TEST_F(Http1ClientConnectionImplTest, SimpleGet) { EXPECT_EQ("GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { +TEST_P(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -1010,7 +1043,7 @@ TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { EXPECT_EQ("GET / HTTP/1.1\r\nMy-Custom-Header: hey\r\nContent-Length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, HostHeaderTranslate) { +TEST_P(Http1ClientConnectionImplTest, HostHeaderTranslate) { initialize(); Http::MockStreamDecoder response_decoder; @@ -1024,7 +1057,7 @@ TEST_F(Http1ClientConnectionImplTest, HostHeaderTranslate) { EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, Reset) { +TEST_P(Http1ClientConnectionImplTest, Reset) { initialize(); Http::MockStreamDecoder response_decoder; @@ -1038,7 +1071,7 @@ TEST_F(Http1ClientConnectionImplTest, Reset) { // Verify that we correctly enable reads on the connection when the final pipeline response is // received. -TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { +TEST_P(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { initialize(); Http::MockStreamDecoder response_decoder; @@ -1079,14 +1112,14 @@ TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { codec_->dispatch(response2); } -TEST_F(Http1ClientConnectionImplTest, PrematureResponse) { +TEST_P(Http1ClientConnectionImplTest, PrematureResponse) { initialize(); Buffer::OwnedImpl response("HTTP/1.1 408 Request Timeout\r\nConnection: Close\r\n\r\n"); EXPECT_THROW(codec_->dispatch(response), PrematureResponseException); } -TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { +TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse503) { initialize(); NiceMock response_decoder; @@ -1099,7 +1132,7 @@ TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { codec_->dispatch(response); } -TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { +TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse200) { initialize(); NiceMock response_decoder; @@ -1112,7 +1145,7 @@ TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { codec_->dispatch(response); } -TEST_F(Http1ClientConnectionImplTest, HeadRequest) { +TEST_P(Http1ClientConnectionImplTest, HeadRequest) { initialize(); NiceMock response_decoder; @@ -1125,7 +1158,7 @@ TEST_F(Http1ClientConnectionImplTest, HeadRequest) { codec_->dispatch(response); } -TEST_F(Http1ClientConnectionImplTest, 204Response) { +TEST_P(Http1ClientConnectionImplTest, 204Response) { initialize(); NiceMock response_decoder; @@ -1138,7 +1171,7 @@ TEST_F(Http1ClientConnectionImplTest, 204Response) { codec_->dispatch(response); } -TEST_F(Http1ClientConnectionImplTest, 100Response) { +TEST_P(Http1ClientConnectionImplTest, 100Response) { initialize(); NiceMock response_decoder; @@ -1157,7 +1190,7 @@ TEST_F(Http1ClientConnectionImplTest, 100Response) { codec_->dispatch(response); } -TEST_F(Http1ClientConnectionImplTest, BadEncodeParams) { +TEST_P(Http1ClientConnectionImplTest, BadEncodeParams) { initialize(); NiceMock response_decoder; @@ -1170,7 +1203,7 @@ TEST_F(Http1ClientConnectionImplTest, BadEncodeParams) { CodecClientException); } -TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { +TEST_P(Http1ClientConnectionImplTest, NoContentLengthResponse) { initialize(); NiceMock response_decoder; @@ -1191,7 +1224,7 @@ TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { codec_->dispatch(empty); } -TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { +TEST_P(Http1ClientConnectionImplTest, ResponseWithTrailers) { initialize(); NiceMock response_decoder; @@ -1205,7 +1238,7 @@ TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { EXPECT_EQ(0UL, response.length()); } -TEST_F(Http1ClientConnectionImplTest, GiantPath) { +TEST_P(Http1ClientConnectionImplTest, GiantPath) { initialize(); NiceMock response_decoder; @@ -1219,7 +1252,7 @@ TEST_F(Http1ClientConnectionImplTest, GiantPath) { codec_->dispatch(response); } -TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { +TEST_P(Http1ClientConnectionImplTest, UpgradeResponse) { initialize(); InSequence s; @@ -1250,7 +1283,7 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { +TEST_P(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { initialize(); InSequence s; @@ -1269,7 +1302,7 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { codec_->dispatch(response); } -TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { +TEST_P(Http1ClientConnectionImplTest, WatermarkTest) { EXPECT_CALL(connection_, bufferLimit()).Times(1).WillOnce(Return(10)); initialize(); @@ -1304,7 +1337,7 @@ TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { // caller attempts to close the connection. This causes the network connection to attempt to write // pending data, even in the no flush scenario, which can cause us to go below low watermark // which then raises callbacks for a stream that no longer exists. -TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { +TEST_P(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { initialize(); InSequence s; @@ -1334,19 +1367,19 @@ TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { static_cast(codec_.get()) ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; testRequestHeadersExceedLimit(long_string); } // Tests that the default limit for the number of request headers is 100. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) { // Send a request with 101 headers. testRequestHeadersExceedLimit(createHeaderFragment(101)); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { // Default limit of 60 KiB initialize(); @@ -1373,7 +1406,7 @@ TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { // Tests that the 101th request header causes overflow with the default max number of request // headers. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { // Default limit of 100. initialize(); @@ -1397,27 +1430,27 @@ TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { max_request_headers_kb_ = 65; std::string long_string = "big: " + std::string(64 * 1024, 'q') + "\r\n"; testRequestHeadersAccepted(long_string); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) { max_request_headers_kb_ = 96; std::string long_string = "big: " + std::string(95 * 1024, 'q') + "\r\n"; testRequestHeadersAccepted(long_string); } // Tests that the number of request headers is configurable. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { max_request_headers_count_ = 150; // Create a request with 150 headers. testRequestHeadersAccepted(createHeaderFragment(150)); } // Tests that response headers of 80 kB fails. -TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { +TEST_P(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { initialize(); NiceMock response_decoder; @@ -1433,7 +1466,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { } // Tests that the size of response headers for HTTP/1 must be under 80 kB. -TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { +TEST_P(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { initialize(); NiceMock response_decoder; @@ -1449,7 +1482,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { } // Exception called when the number of response headers exceeds the default value of 100. -TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { +TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { initialize(); NiceMock response_decoder; @@ -1464,7 +1497,7 @@ TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { } // Tests that the number of response headers is configurable. -TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { +TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { max_response_headers_count_ = 152; initialize(); diff --git a/test/common/http/http1/codec_speed_test.cc b/test/common/http/http1/codec_speed_test.cc new file mode 100644 index 000000000000..aa1a2221042f --- /dev/null +++ b/test/common/http/http1/codec_speed_test.cc @@ -0,0 +1,58 @@ + +#include + +#include "test/mocks/network/mocks.h" + +#include "benchmark/benchmark.h" + +namespace Envoy { +namespace Http { + +class CodecUtilitySpeedTest { +public: + CodecUtilitySpeedTest() { + codec_ = std::make_unique(connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_); + } + + void sendMessage(const std::string& message) { + + } + + NiceMock connection_; + NiceMock callbacks_; + NiceMock codec_settings_; + Http::ServerConnectionPtr codec_; +protected: + uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; + uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; + Stats::IsolatedStoreImpl store_; +private: + Parser parser_; + codec codec_; +}; + +static void BM_SimpleHttpRequest(benchmark::State& state) { + CodecUtilitySpeedTest context; + const std::string request = R"(HTTP/1.1 GET / +Foo: baz +content-length: 0 + +)"; + for (auto _ : state) { + context.sendMessage(request); + } +} +BENCHMARK(BM_SimpleHttpRequest); + +} // namespace Http +} // namespace Envoy + +// Boilerplate main(), which discovers benchmarks in the same file and runs them. +int main(int argc, char** argv) { + benchmark::Initialize(&argc, argv); + + if (benchmark::ReportUnrecognizedArguments(argc, argv)) { + return 1; + } + benchmark::RunSpecifiedBenchmarks(); +} From bb7071e7bb355eee025f4b23ede3f559b645fdc1 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Tue, 26 Nov 2019 23:59:57 -0600 Subject: [PATCH 08/14] remove speed test for now Signed-off-by: Derek Argueta --- test/common/http/http1/codec_speed_test.cc | 58 ---------------------- 1 file changed, 58 deletions(-) delete mode 100644 test/common/http/http1/codec_speed_test.cc diff --git a/test/common/http/http1/codec_speed_test.cc b/test/common/http/http1/codec_speed_test.cc deleted file mode 100644 index aa1a2221042f..000000000000 --- a/test/common/http/http1/codec_speed_test.cc +++ /dev/null @@ -1,58 +0,0 @@ - -#include - -#include "test/mocks/network/mocks.h" - -#include "benchmark/benchmark.h" - -namespace Envoy { -namespace Http { - -class CodecUtilitySpeedTest { -public: - CodecUtilitySpeedTest() { - codec_ = std::make_unique(connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_); - } - - void sendMessage(const std::string& message) { - - } - - NiceMock connection_; - NiceMock callbacks_; - NiceMock codec_settings_; - Http::ServerConnectionPtr codec_; -protected: - uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; - uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; - Stats::IsolatedStoreImpl store_; -private: - Parser parser_; - codec codec_; -}; - -static void BM_SimpleHttpRequest(benchmark::State& state) { - CodecUtilitySpeedTest context; - const std::string request = R"(HTTP/1.1 GET / -Foo: baz -content-length: 0 - -)"; - for (auto _ : state) { - context.sendMessage(request); - } -} -BENCHMARK(BM_SimpleHttpRequest); - -} // namespace Http -} // namespace Envoy - -// Boilerplate main(), which discovers benchmarks in the same file and runs them. -int main(int argc, char** argv) { - benchmark::Initialize(&argc, argv); - - if (benchmark::ReportUnrecognizedArguments(argc, argv)) { - return 1; - } - benchmark::RunSpecifiedBenchmarks(); -} From de604e3a7a48a2498d3310ed6ee2e5502aed6ed8 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Wed, 27 Nov 2019 00:12:17 -0600 Subject: [PATCH 09/14] format Signed-off-by: Derek Argueta --- source/common/http/http1/codec_impl.cc | 23 +-- source/common/http/http1/codec_impl.h | 11 +- .../common/http/http1/legacy_http_parser.cc | 136 +++++++----------- source/common/http/http1/legacy_http_parser.h | 4 +- source/common/http/http1/llhttp_parser.cc | 76 +++------- source/common/http/http1/llhttp_parser.h | 6 +- source/common/http/http1/parser.h | 18 +-- source/common/http/http1/parser_factory.cc | 10 +- source/common/http/http1/parser_factory.h | 11 ++ source/server/options_impl.cc | 4 +- source/server/server.cc | 3 +- test/common/http/http1/BUILD | 2 - test/common/http/http1/codec_impl_test.cc | 11 +- 13 files changed, 123 insertions(+), 192 deletions(-) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 51f796dad6fc..4100ef80cb23 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -319,9 +319,8 @@ const ToLowerTable& ConnectionImpl::toLowerTable() { return *table; } -ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, - MessageType, uint32_t max_headers_kb, - const uint32_t max_headers_count, +ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, MessageType, + uint32_t max_headers_kb, const uint32_t max_headers_count, HeaderKeyFormatterPtr&& header_key_formatter) : connection_(connection), stats_{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(stats, "http1."))}, header_key_formatter_(std::move(header_key_formatter)), handling_upgrade_(false), @@ -408,10 +407,10 @@ size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { ASSERT(parser_ != nullptr); const size_t bytes_read = parser_->execute(slice, len); - if (parser_->getErrno() != static_cast(ParserStatus::Ok) && parser_->getErrno() != static_cast(ParserStatus::Paused)) { + if (parser_->getErrno() != static_cast(ParserStatus::Ok) && + parser_->getErrno() != static_cast(ParserStatus::Paused)) { sendProtocolError(); - throw CodecProtocolException("http/1.1 protocol error: " + - std::string(parser_->errnoName())); + throw CodecProtocolException("http/1.1 protocol error: " + std::string(parser_->errnoName())); } return bytes_read; @@ -445,7 +444,8 @@ int ConnectionImpl::onHeaderValueBase(const char* data, size_t length) { sendProtocolError(); throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); } - } else if (ParserFactory::usesLegacyParser() && header_value.find('\0') != absl::string_view::npos) { + } else if (ParserFactory::usesLegacyParser() && + header_value.find('\0') != absl::string_view::npos) { // http-parser should filter for this // (https://tools.ietf.org/html/rfc7230#section-3.2.6), but it doesn't today. HeaderStrings // have an invariant that they must not contain embedded zero characters @@ -577,7 +577,8 @@ void ServerConnectionImpl::handlePath(HeaderMapImpl& headers, unsigned int metho // The url is relative or a wildcard when the method is OPTIONS. Nothing to do here. if (!active_request_->request_url_.getStringView().empty() && (active_request_->request_url_.getStringView()[0] == '/' || - ((method == static_cast(Method::Options)) && active_request_->request_url_.getStringView()[0] == '*'))) { + ((method == static_cast(Method::Options)) && + active_request_->request_url_.getStringView()[0] == '*'))) { headers.addViaMove(std::move(path), std::move(active_request_->request_url_)); return; } @@ -624,7 +625,8 @@ int ServerConnectionImpl::onHeadersComplete() { // Inform the response encoder about any HEAD method, so it can set content // length and transfer encoding headers correctly. - active_request_->response_encoder_.isResponseToHeadRequest(parser_->method() == static_cast(Method::Head)); + active_request_->response_encoder_.isResponseToHeadRequest(parser_->method() == + static_cast(Method::Head)); // Currently, CONNECT is not supported, however; llhttp_parse_url needs to know about // CONNECT @@ -640,7 +642,8 @@ int ServerConnectionImpl::onHeadersComplete() { // scenario where the higher layers stream through and implicitly switch to chunked transfer // encoding because end stream with zero body length has not yet been indicated. if (parser_->flags() & static_cast(Flags::Chunked) || - (parser_->contentLength() > 0 && parser_->contentLength() != ULLONG_MAX) || handling_upgrade_) { + (parser_->contentLength() > 0 && parser_->contentLength() != ULLONG_MAX) || + handling_upgrade_) { active_request_->request_decoder_->decodeHeaders(std::move(current_header_map_), false); // If the connection has been closed (or is closing) after decoding headers, pause the parser diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 21488dc0b3ec..7189abde9e0c 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -196,7 +196,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable - #include +#include + #include "common/common/assert.h" #include "common/http/http1/parser.h" @@ -18,39 +18,37 @@ class LegacyHttpParserImpl::Impl { // https://github.com/nodejs/http-parser/blob/master/http_parser.h#L320 // so far unused - Impl(http_parser_type type) { - http_parser_init(&parser_, type); - } + Impl(http_parser_type type) { http_parser_init(&parser_, type); } Impl(http_parser_type type, void* data) : Impl(type) { parser_.data = data; settings_ = { - [](http_parser* parser) -> int { - std::cout << "message begin callback" << std::endl; - return static_cast(parser->data)->onMessageBegin(); - }, - [](http_parser* parser, const char* at, size_t length) -> int { - return static_cast(parser->data)->onUrl(at, length); - }, - // TODO(dereka) onStatus - nullptr, - [](http_parser* parser, const char* at, size_t length) -> int { - return static_cast(parser->data)->onHeaderField(at, length); - }, - [](http_parser* parser, const char* at, size_t length) -> int { - return static_cast(parser->data)->onHeaderValue(at, length); - }, - [](http_parser* parser) -> int { - return static_cast(parser->data)->onHeadersComplete(); - }, - [](http_parser* parser, const char* at, size_t length) -> int { - return static_cast(parser->data)->onBody(at, length); - }, - [](http_parser* parser) -> int { - return static_cast(parser->data)->onMessageComplete(); - }, - nullptr, // TODO(dereka) onChunkHeader - nullptr // TODO(dereka) onChunkComplete + [](http_parser* parser) -> int { + std::cout << "message begin callback" << std::endl; + return static_cast(parser->data)->onMessageBegin(); + }, + [](http_parser* parser, const char* at, size_t length) -> int { + return static_cast(parser->data)->onUrl(at, length); + }, + // TODO(dereka) onStatus + nullptr, + [](http_parser* parser, const char* at, size_t length) -> int { + return static_cast(parser->data)->onHeaderField(at, length); + }, + [](http_parser* parser, const char* at, size_t length) -> int { + return static_cast(parser->data)->onHeaderValue(at, length); + }, + [](http_parser* parser) -> int { + return static_cast(parser->data)->onHeadersComplete(); + }, + [](http_parser* parser, const char* at, size_t length) -> int { + return static_cast(parser->data)->onBody(at, length); + }, + [](http_parser* parser) -> int { + return static_cast(parser->data)->onMessageComplete(); + }, + nullptr, // TODO(dereka) onChunkHeader + nullptr // TODO(dereka) onChunkComplete }; } @@ -58,42 +56,26 @@ class LegacyHttpParserImpl::Impl { return http_parser_execute(&parser_, &settings_, slice, len); } - void resume() { - http_parser_pause(&parser_, 0); - } + void resume() { http_parser_pause(&parser_, 0); } int pause() { http_parser_pause(&parser_, 1); return HPE_PAUSED; } - int getErrno() { - return HTTP_PARSER_ERRNO(&parser_); - } + int getErrno() { return HTTP_PARSER_ERRNO(&parser_); } - int statusCode() const { - return parser_.status_code; - } + int statusCode() const { return parser_.status_code; } - int httpMajor() const { - return parser_.http_major; - } + int httpMajor() const { return parser_.http_major; } - int httpMinor() const { - return parser_.http_minor; - } + int httpMinor() const { return parser_.http_minor; } - uint64_t contentLength() const { - return parser_.content_length; - } + uint64_t contentLength() const { return parser_.content_length; } - int flags() const { - return parser_.flags; - } + int flags() const { return parser_.flags; } - uint16_t method() const { - return parser_.method; - } + uint16_t method() const { return parser_.method; } const char* methodName() const { return http_method_str(static_cast(parser_.method)); @@ -123,49 +105,27 @@ LegacyHttpParserImpl::LegacyHttpParserImpl(MessageType type, void* data) { // same compilation unit so that the destructor has a complete definition of Impl. LegacyHttpParserImpl::~LegacyHttpParserImpl() = default; -int LegacyHttpParserImpl::execute(const char* slice, int len) { - return impl_->execute(slice, len); -} +int LegacyHttpParserImpl::execute(const char* slice, int len) { return impl_->execute(slice, len); } -void LegacyHttpParserImpl::resume() { - impl_->resume(); -} +void LegacyHttpParserImpl::resume() { impl_->resume(); } -int LegacyHttpParserImpl::pause() { - return impl_->pause(); -} +int LegacyHttpParserImpl::pause() { return impl_->pause(); } -int LegacyHttpParserImpl::getErrno() { - return impl_->getErrno(); -} +int LegacyHttpParserImpl::getErrno() { return impl_->getErrno(); } -int LegacyHttpParserImpl::statusCode() const { - return impl_->statusCode(); -} +int LegacyHttpParserImpl::statusCode() const { return impl_->statusCode(); } -int LegacyHttpParserImpl::httpMajor() const { - return impl_->httpMajor(); -} +int LegacyHttpParserImpl::httpMajor() const { return impl_->httpMajor(); } -int LegacyHttpParserImpl::httpMinor() const { - return impl_->httpMinor(); -} +int LegacyHttpParserImpl::httpMinor() const { return impl_->httpMinor(); } -uint64_t LegacyHttpParserImpl::contentLength() const { - return impl_->contentLength(); -} +uint64_t LegacyHttpParserImpl::contentLength() const { return impl_->contentLength(); } -int LegacyHttpParserImpl::flags() const { - return impl_->flags(); -} +int LegacyHttpParserImpl::flags() const { return impl_->flags(); } -uint16_t LegacyHttpParserImpl::method() const { - return impl_->method(); -} +uint16_t LegacyHttpParserImpl::method() const { return impl_->method(); } -const char* LegacyHttpParserImpl::methodName() const { - return impl_->methodName(); -} +const char* LegacyHttpParserImpl::methodName() const { return impl_->methodName(); } const char* LegacyHttpParserImpl::errnoName() { return http_errno_name(static_cast(impl_->getErrno())); diff --git a/source/common/http/http1/legacy_http_parser.h b/source/common/http/http1/legacy_http_parser.h index a39ad2276dd1..4742c507bba6 100644 --- a/source/common/http/http1/legacy_http_parser.h +++ b/source/common/http/http1/legacy_http_parser.h @@ -1,9 +1,9 @@ #pragma once -#include "common/http/http1/parser.h" - #include +#include "common/http/http1/parser.h" + namespace Envoy { namespace Http { namespace Http1 { diff --git a/source/common/http/http1/llhttp_parser.cc b/source/common/http/http1/llhttp_parser.cc index c83753fc7df7..1df6ca673366 100644 --- a/source/common/http/http1/llhttp_parser.cc +++ b/source/common/http/http1/llhttp_parser.cc @@ -1,10 +1,10 @@ #include "common/http/http1/llhttp_parser.h" +#include + #include "common/common/assert.h" #include "common/http/http1/parser.h" -#include - namespace Envoy { namespace Http { namespace Http1 { @@ -36,37 +36,21 @@ class LlHttpParserImpl::Impl { return nread; } - void resume() { - llhttp_resume(&parser_); - } + void resume() { llhttp_resume(&parser_); } - int getErrno() { - return llhttp_get_errno(&parser_); - } + int getErrno() { return llhttp_get_errno(&parser_); } - int statusCode() const { - return parser_.status_code; - } + int statusCode() const { return parser_.status_code; } - int httpMajor() const { - return parser_.http_major; - } + int httpMajor() const { return parser_.http_major; } - int httpMinor() const { - return parser_.http_minor; - } + int httpMinor() const { return parser_.http_minor; } - uint64_t contentLength() const { - return parser_.content_length; - } + uint64_t contentLength() const { return parser_.content_length; } - int flags() const { - return parser_.flags; - } + int flags() const { return parser_.flags; } - uint16_t method() const { - return parser_.method; - } + uint16_t method() const { return parser_.method; } const char* methodName() const { return llhttp_method_name(static_cast(parser_.method)); @@ -96,50 +80,30 @@ LlHttpParserImpl::LlHttpParserImpl(MessageType type, void* data) { LlHttpParserImpl::~LlHttpParserImpl() = default; -int LlHttpParserImpl::execute(const char* slice, int len) { - return impl_->execute(slice, len); -} +int LlHttpParserImpl::execute(const char* slice, int len) { return impl_->execute(slice, len); } -void LlHttpParserImpl::resume() { - impl_->resume(); -} +void LlHttpParserImpl::resume() { impl_->resume(); } int LlHttpParserImpl::pause() { // TODO(dereka) do we actually need to call llhttp_pause(&parser_); ? return HPE_PAUSED; } -int LlHttpParserImpl::getErrno() { - return impl_->getErrno(); -} +int LlHttpParserImpl::getErrno() { return impl_->getErrno(); } -int LlHttpParserImpl::statusCode() const { - return impl_->statusCode(); -} +int LlHttpParserImpl::statusCode() const { return impl_->statusCode(); } -int LlHttpParserImpl::httpMajor() const { - return impl_->httpMajor(); -} +int LlHttpParserImpl::httpMajor() const { return impl_->httpMajor(); } -int LlHttpParserImpl::httpMinor() const { - return impl_->httpMinor(); -} +int LlHttpParserImpl::httpMinor() const { return impl_->httpMinor(); } -uint64_t LlHttpParserImpl::contentLength() const { - return impl_->contentLength(); -} +uint64_t LlHttpParserImpl::contentLength() const { return impl_->contentLength(); } -int LlHttpParserImpl::flags() const { - return impl_->flags(); -} +int LlHttpParserImpl::flags() const { return impl_->flags(); } -uint16_t LlHttpParserImpl::method() const { - return impl_->method(); -} +uint16_t LlHttpParserImpl::method() const { return impl_->method(); } -const char* LlHttpParserImpl::methodName() const { - return impl_->methodName(); -} +const char* LlHttpParserImpl::methodName() const { return impl_->methodName(); } const char* LlHttpParserImpl::errnoName() { return llhttp_errno_name(static_cast(impl_->getErrno())); diff --git a/source/common/http/http1/llhttp_parser.h b/source/common/http/http1/llhttp_parser.h index a15626222cd6..e49a0b3e92e5 100644 --- a/source/common/http/http1/llhttp_parser.h +++ b/source/common/http/http1/llhttp_parser.h @@ -1,9 +1,9 @@ #pragma once -#include "common/http/http1/parser.h" - #include +#include "common/http/http1/parser.h" + namespace Envoy { namespace Http { namespace Http1 { @@ -17,7 +17,7 @@ class LlHttpParserImpl : public Parser { int pause() override; int getErrno() override; int statusCode() const override; - int httpMajor() const override; + int httpMajor() const override; int httpMinor() const override; uint64_t contentLength() const override; int flags() const override; diff --git a/source/common/http/http1/parser.h b/source/common/http/http1/parser.h index 04ba0edbbd7f..b3b7bf12047f 100644 --- a/source/common/http/http1/parser.h +++ b/source/common/http/http1/parser.h @@ -16,19 +16,13 @@ namespace Envoy { namespace Http { namespace Http1 { -enum class ParserType { - Legacy, - LlHttp -}; +enum class ParserType { Legacy, LlHttp }; -enum class MessageType { - Request, - Response -}; +enum class MessageType { Request, Response }; /** - * Callbacks base class. Any users of the HTTP1 parser who want to use the parsed data needs to implement - * this interface and pass the object to the `data` parameter of ParserFactory::create. + * Callbacks base class. Any users of the HTTP1 parser who want to use the parsed data needs to + * implement this interface and pass the object to the `data` parameter of ParserFactory::create. */ class ParserCallbacks { public: @@ -77,8 +71,8 @@ class ParserCallbacks { * Called when the request/response is complete. */ virtual int onMessageComplete() PURE; - virtual int onChunkHeader() PURE; // shrug - virtual int onChunkComplete() PURE; // shrug + virtual int onChunkHeader() PURE; // shrug + virtual int onChunkComplete() PURE; // shrug }; /** diff --git a/source/common/http/http1/parser_factory.cc b/source/common/http/http1/parser_factory.cc index d2be218239ce..4a5c95af8a5f 100644 --- a/source/common/http/http1/parser_factory.cc +++ b/source/common/http/http1/parser_factory.cc @@ -2,8 +2,8 @@ #include -#include "common/http/http1/llhttp_parser.h" #include "common/http/http1/legacy_http_parser.h" +#include "common/http/http1/llhttp_parser.h" namespace Envoy { namespace Http { @@ -19,13 +19,9 @@ ParserPtr ParserFactory::create(MessageType type, void* data) { return std::make_unique(type, data); } -bool ParserFactory::usesLegacyParser() { - return use_legacy_parser_; -} +bool ParserFactory::usesLegacyParser() { return use_legacy_parser_; } -void ParserFactory::useLegacy(bool use_legacy_parser) { - use_legacy_parser_ = use_legacy_parser; -} +void ParserFactory::useLegacy(bool use_legacy_parser) { use_legacy_parser_ = use_legacy_parser; } } // namespace Http1 } // namespace Http diff --git a/source/common/http/http1/parser_factory.h b/source/common/http/http1/parser_factory.h index 3a116368910c..d8a6e1e189e0 100644 --- a/source/common/http/http1/parser_factory.h +++ b/source/common/http/http1/parser_factory.h @@ -6,6 +6,10 @@ namespace Envoy { namespace Http { namespace Http1 { +/** + * A temporary factory class to allow switching between constructing a parser using the legacy + * http-parser library or llhttp. + */ class ParserFactory { public: /** @@ -13,9 +17,16 @@ class ParserFactory { */ static ParserPtr create(MessageType type, void* data); + /** + * @return whether the factory is configured to return the legacy HTTP parser. + */ static bool usesLegacyParser(); + /** + * Sets whether to construct the legacy HTTP parser or newer llhttp parser. + */ static void useLegacy(bool use_legacy_parser); + private: static bool use_legacy_parser_; }; diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index cd5fd9bb0d3e..afcbec77683c 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -116,7 +116,9 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, TCLAP::ValueArg use_fake_symbol_table("", "use-fake-symbol-table", "Use fake symbol table implementation", false, true, "bool", cmd); - TCLAP::ValueArg use_legacy_http_parser("", "use-legacy-http-parser", "Use the legacy Node HTTP parser implementation", false, true, "bool", cmd); + TCLAP::ValueArg use_legacy_http_parser("", "use-legacy-http-parser", + "Use the legacy Node HTTP parser implementation", + false, true, "bool", cmd); cmd.setExceptionHandling(false); try { cmd.parse(argc, argv); diff --git a/source/server/server.cc b/source/server/server.cc index 0a23c20ab8cf..13e80bac5364 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -258,7 +258,8 @@ void InstanceImpl::initialize(const Options& options, } Http::Http1::ParserFactory::useLegacy(options.legacyHttpParserEnabled()); - ENVOY_LOG(info, "http implementation: {}", Http::Http1::ParserFactory::usesLegacyParser() ? "old (http-parser" : "new (llhttp)"); + ENVOY_LOG(info, "http implementation: {}", + Http::Http1::ParserFactory::usesLegacyParser() ? "old (http-parser" : "new (llhttp)"); // Handle configuration that needs to take place prior to the main configuration load. InstanceUtil::loadBootstrapConfig(bootstrap_, options, diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index 2b5e86bacecf..e117fa725623 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -40,8 +40,6 @@ envoy_cc_test( ], ) -envoy_cc_test - envoy_cc_test( name = "conn_pool_test", srcs = ["conn_pool_test.cc"], diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 94fe83acd16d..12b4b3cf3602 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -43,15 +43,13 @@ std::string createHeaderFragment(int num_headers) { } // namespace enum class CodecImplementation { - Old, // original node http-parser - New // llhttp + Old, // original node http-parser + New // llhttp }; class Http1CodecParamTest : public testing::TestWithParam { protected: - Http1CodecParamTest() { - ParserFactory::useLegacy(GetParam() == CodecImplementation::Old); - } + Http1CodecParamTest() { ParserFactory::useLegacy(GetParam() == CodecImplementation::Old); } ~Http1CodecParamTest() override = default; /** Verify that a parser has been constructed using the expected implementation. */ @@ -389,7 +387,8 @@ TEST_P(Http1ServerConnectionImplTest, BadRequestNoStream) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); #endif - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, "http/1.1 protocol error: HPE_INVALID_METHOD"); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, + "http/1.1 protocol error: HPE_INVALID_METHOD"); EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } From 1ed643bdc38f8f19aed384442421ce93c8b8f3e2 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Wed, 27 Nov 2019 00:28:05 -0600 Subject: [PATCH 10/14] fix BUILD formatting Signed-off-by: Derek Argueta --- source/common/http/http1/BUILD | 18 +++++++++--------- source/common/http/http1/legacy_http_parser.cc | 5 ----- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index a4ff68c4581a..ac873eecc7d3 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -19,8 +19,8 @@ envoy_cc_library( srcs = ["codec_impl.cc"], hdrs = ["codec_impl.h"], deps = [ - ":parser_interface", ":parser_factory_lib", + ":parser_interface", "//include/envoy/buffer:buffer_interface", "//include/envoy/http:codec_interface", "//include/envoy/http:header_map_interface", @@ -82,26 +82,26 @@ envoy_cc_library( name = "llhttp_lib", srcs = ["llhttp_parser.cc"], hdrs = ["llhttp_parser.h"], + external_deps = [ + "llhttp", + ], deps = [ ":parser_interface", "//source/common/common:assert_lib", ], - external_deps = [ - "llhttp", - ], ) envoy_cc_library( name = "legacy_http_parser_lib", srcs = ["legacy_http_parser.cc"], hdrs = ["legacy_http_parser.h"], + external_deps = [ + "http_parser" + ], deps = [ ":parser_interface", "//source/common/common:assert_lib", ], - external_deps = [ - "http_parser" - ] ) envoy_cc_library( @@ -109,8 +109,8 @@ envoy_cc_library( srcs = ["parser_factory.cc"], hdrs = ["parser_factory.h"], deps = [ - ":parser_interface", ":legacy_http_parser_lib", ":llhttp_lib", - ] + ":parser_interface", + ], ) diff --git a/source/common/http/http1/legacy_http_parser.cc b/source/common/http/http1/legacy_http_parser.cc index b3cdd86b65bb..7c87b4e4bb21 100644 --- a/source/common/http/http1/legacy_http_parser.cc +++ b/source/common/http/http1/legacy_http_parser.cc @@ -13,11 +13,6 @@ namespace Http1 { class LegacyHttpParserImpl::Impl { public: - // Possible idea: have an overload that doesn't accept `data` which appears - // to just be used for callbacks? If no data, then leave settings as nullptrs? - // https://github.com/nodejs/http-parser/blob/master/http_parser.h#L320 - - // so far unused Impl(http_parser_type type) { http_parser_init(&parser_, type); } Impl(http_parser_type type, void* data) : Impl(type) { From c4981fc121c9dee5c89666fe5cde0468bc1405f2 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Wed, 27 Nov 2019 00:39:05 -0600 Subject: [PATCH 11/14] cleanup Signed-off-by: Derek Argueta --- include/envoy/server/options.h | 2 +- source/common/http/http1/codec_impl.cc | 23 ++++++++++++----------- source/common/http/http1/codec_impl.h | 17 +++++++++++------ 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index 1bd369b88276..a2f5bb5c0735 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -190,7 +190,7 @@ class Options { virtual bool fakeSymbolTableEnabled() const PURE; /** - * @return whether to use the fake symbol table implementation. + * @return whether to use the legacy HTTP/1.x parser. */ virtual bool legacyHttpParserEnabled() const PURE; diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 4100ef80cb23..ed2998811212 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -416,10 +416,10 @@ size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { return bytes_read; } -int ConnectionImpl::onHeaderFieldBase(const char* data, size_t length) { +void ConnectionImpl::onHeaderFieldBase(const char* data, size_t length) { if (header_parsing_state_ == HeaderParsingState::Done) { // Ignore trailers. - return 0; + return; } if (header_parsing_state_ == HeaderParsingState::Value) { @@ -427,13 +427,12 @@ int ConnectionImpl::onHeaderFieldBase(const char* data, size_t length) { } current_header_field_.append(data, length); - return 0; } -int ConnectionImpl::onHeaderValueBase(const char* data, size_t length) { +void ConnectionImpl::onHeaderValueBase(const char* data, size_t length) { if (header_parsing_state_ == HeaderParsingState::Done) { // Ignore trailers. - return 0; + return; } const absl::string_view header_value = absl::string_view(data, length); @@ -466,8 +465,6 @@ int ConnectionImpl::onHeaderValueBase(const char* data, size_t length) { sendProtocolError(); throw CodecProtocolException("headers size exceeds limit"); } - - return 0; } void ConnectionImpl::onHeadersCompleteBase() { @@ -686,11 +683,13 @@ int ServerConnectionImpl::onUrl(const char* data, size_t length) { } int ServerConnectionImpl::onHeaderField(const char* data, size_t length) { - return onHeaderFieldBase(data, length); + onHeaderFieldBase(data, length); + return 0; } int ServerConnectionImpl::onHeaderValue(const char* data, size_t length) { - return onHeaderValueBase(data, length); + onHeaderValueBase(data, length); + return 0; } int ServerConnectionImpl::onBody(const char* data, size_t length) { @@ -805,11 +804,13 @@ int ClientConnectionImpl::onMessageBegin() { } int ClientConnectionImpl::onHeaderField(const char* data, size_t length) { - return onHeaderFieldBase(data, length); + onHeaderFieldBase(data, length); + return 0; } int ClientConnectionImpl::onHeaderValue(const char* data, size_t length) { - return onHeaderValueBase(data, length); + onHeaderValueBase(data, length); + return 0; } int ClientConnectionImpl::onHeadersComplete() { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 7189abde9e0c..1d236aba13ea 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -192,9 +192,15 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable Date: Wed, 27 Nov 2019 00:44:31 -0600 Subject: [PATCH 12/14] remove outdated comment Signed-off-by: Derek Argueta --- source/common/http/http1/parser.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/source/common/http/http1/parser.h b/source/common/http/http1/parser.h index b3b7bf12047f..d3c1a6c4fcc7 100644 --- a/source/common/http/http1/parser.h +++ b/source/common/http/http1/parser.h @@ -4,14 +4,6 @@ #include "envoy/common/pure.h" -/** - * This is a temporary shim to easily allow switching between llhttp and http-parser at compile - * time by providing a consistent interface, then adapting them to the respective implementations. - * - * When http-parser is ready to be removed, this shim should also disappear and the llhttp_* methods - * moved into the codec implementation. - */ - namespace Envoy { namespace Http { namespace Http1 { From cdee9acb5463d769c513bec7809188ad1a4a9143 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Wed, 27 Nov 2019 01:06:57 -0600 Subject: [PATCH 13/14] fmt Signed-off-by: Derek Argueta --- source/common/http/http1/BUILD | 2 +- tools/spelling_dictionary.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index ac873eecc7d3..f8713018ff67 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -96,7 +96,7 @@ envoy_cc_library( srcs = ["legacy_http_parser.cc"], hdrs = ["legacy_http_parser.h"], external_deps = [ - "http_parser" + "http_parser", ], deps = [ ":parser_interface", diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index 76a9f05aadc1..3c98f0eb85b2 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -146,6 +146,7 @@ LDS LEV LF LHS +LLHTTP LRS MB MD From 09551b5ec0985398d4e216a3a93aa85ea9cd75a4 Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Wed, 6 May 2020 00:45:48 -0700 Subject: [PATCH 14/14] fix version_history, repository_locations, bump llhttp to 2.0.5 Signed-off-by: Derek Argueta --- bazel/repository_locations.bzl | 8 +- docs/root/intro/version_history.rst | 1171 ------------------------- docs/root/version_history/current.rst | 1 + 3 files changed, 6 insertions(+), 1174 deletions(-) delete mode 100644 docs/root/intro/version_history.rst diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 73a55246c599..52fc2a96d8d4 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -291,9 +291,11 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_nodejs_llhttp = dict( - sha256 = "76100c5b7948c8e49e8afa8c658e2a477bcb831e18854cf134aa3bfd44a5c669", - strip_prefix = "llhttp-release-v2.0.1", - urls = ["https://github.com/nodejs/llhttp/archive/release/v2.0.1.tar.gz"], + sha256 = "48f882f0b6cecc48aec8f81072ee4d80fe9a4b5e1bce009e3cf8aecbe5892c1a", + strip_prefix = "llhttp-release-v2.0.5", + urls = ["https://github.com/nodejs/llhttp/archive/release/v2.0.5.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", ), com_github_pallets_jinja = dict( sha256 = "db49236731373e4f3118af880eb91bb0aa6978bc0cf8b35760f6a026f1a9ffc4", diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst deleted file mode 100644 index ae90d860890d..000000000000 --- a/docs/root/intro/version_history.rst +++ /dev/null @@ -1,1171 +0,0 @@ -Version history ---------------- - -1.14.0 (Pending) -================ -* access loggers: access logger extensions use the "envoy.access_loggers" name space. A mapping - of extension names is available in the :ref:`deprecated ` documentation. -* adaptive concurrency: fixed bug that allowed concurrency limits to drop below the configured - minimum. -* admin: added support for displaying ip address subject alternate names in :ref:`certs` end point. -* buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation. -* config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration `. -* dns: the STRICT_DNS cluster now only resolves to 0 hosts if DNS resolution successfully returns 0 hosts. -* dns: added support for :ref:`dns_failure_refresh_rate ` for the :ref:`dns cache ` to set the DNS refresh rate during failures. -* http filters: http filter extensions use the "envoy.filters.http" name space. A mapping - of extension names is available in the :ref:`deprecated ` documentation. -* ext_authz: disabled the use of lowercase string matcher for headers matching in HTTP-based `ext_authz`. - Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher` to false. -* http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature `envoy.reloadable_features.http1_flood_protection` -* http: fixing a bug in HTTP/1.0 responses where Connection: keep-alive was not appended for connections which were kept alive. -* http: fixed a bug that could send extra METADATA frames and underflow memory when encoding METADATA frames on a connection that was dispatching data. -* http: connection header sanitizing has been modified to always sanitize if there is no upgrade, including when an h2c upgrade attempt has been removed. -* http: add llhttp as an alternative http parser -* listener filters: listener filter extensions use the "envoy.filters.listener" name space. A - mapping of extension names is available in the :ref:`deprecated ` documentation. -* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. -* lua: added a parameter to `httpCall` that makes it possible to have the call be asynchronous. -* mongo: the stat emitted for queries without a max time set in the :ref:`MongoDB filter` was modified to emit correctly for Mongo v3.2+. -* network filters: network filter extensions use the "envoy.filters.network" name space. A mapping - of extension names is available in the :ref:`deprecated ` documentation. -* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. -* retry: added a retry predicate that :ref:`rejects hosts based on metadata. ` -* router: added :ref:`auto_san_validation ` to support overrriding SAN validation to transport socket for new upstream connections based on the downstream HTTP host/authority header. -* router: added the ability to match a route based on whether a downstream TLS connection certificate has been - :ref:`validated `. -* router: added support for :ref:`regex_rewrite - ` for path rewriting using regular expressions and capture groups. -* router: don't ignore :ref:`per_try_timeout ` when the :ref:`global route timeout ` is disabled. -* runtime: enabling the runtime feature "envoy.deprecated_features.allow_deprecated_extension_names" - disables the use of deprecated extension names. -* runtime: integer values may now be parsed as booleans. -* sds: added :ref:`GenericSecret ` to support secret of generic type. -* sds: fix the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases. -* stat sinks: stat sink extensions use the "envoy.stat_sinks" name space. A mapping of extension - names is available in the :ref:`deprecated ` documentation. -* thrift_proxy: add router filter stats to docs. -* tracers: tracer extensions use the "envoy.tracers" name space. A mapping of extension names is - available in the :ref:`deprecated ` documentation. -* tracing: added gRPC service configuration to the OpenCensus Stackdriver and OpenCensus Agent tracers. -* upstream: combined HTTP/1 and HTTP/2 connection pool code. This means that circuit breaker - limits for both requests and connections apply to both pool types. Also, HTTP/2 now has - the option to limit concurrent requests on a connection, and allow multiple draining - connections. The old behavior is deprecated, but can be used during the deprecation - period by disabling runtime feature "envoy.reloadable_features.new_http1_connection_pool_behavior" or - "envoy.reloadable_features.new_http2_connection_pool_behavior" and then re-configure your clusters or - restart Envoy. The behavior will not switch until the connection pools are recreated. The new - circuit breaker behavior is described :ref:`here `. -* upstream: changed load distribution algorithm when all priorities enter :ref:`panic mode`. - -1.13.0 (January 20, 2020) -========================= -* access log: added FILTER_STATE :ref:`access log formatters ` and gRPC access logger. -* admin: added the ability to filter :ref:`/config_dump `. -* access log: added a :ref:`typed JSON logging mode ` to output access logs in JSON format with non-string values -* access log: fixed UPSTREAM_LOCAL_ADDRESS :ref:`access log formatters ` to work for http requests -* access log: added HOSTNAME. -* api: remove all support for v1 -* api: added ability to specify `mode` for :ref:`Pipe `. -* api: support for the v3 xDS API added. See :ref:`api_supported_versions`. -* aws_request_signing: added new alpha :ref:`HTTP AWS request signing filter `. -* buffer: remove old implementation -* build: official released binary is now built against libc++. -* cluster: added :ref:`aggregate cluster ` that allows load balancing between clusters. -* config: all category names of internal envoy extensions are prefixed with the 'envoy.' prefix to follow the reverse DNS naming notation. -* decompressor: remove decompressor hard assert failure and replace with an error flag. -* ext_authz: added :ref:`configurable ability` to send the :ref:`certificate` to the `ext_authz` service. -* fault: fixed an issue where the http fault filter would repeatedly check the percentage of abort/delay when the `x-envoy-downstream-service-cluster` header was included in the request to ensure that the actual percentage of abort/delay matches the configuration of the filter. -* health check: gRPC health checker sets the gRPC deadline to the configured timeout duration. -* health check: added :ref:`TlsOptions ` to allow TLS configuration overrides. -* health check: added :ref:`service_name_matcher ` to better compare the service name patterns for health check identity. -* http: added strict validation that CONNECT is refused as it is not yet implemented. This can be reversed temporarily by setting the runtime feature `envoy.reloadable_features.strict_method_validation` to false. -* http: added support for http1 trailers. To enable use :ref:`enable_trailers `. -* http: added the ability to sanitize headers nominated by the Connection header. This new behavior is guarded by envoy.reloadable_features.connection_header_sanitization which defaults to true. -* http: blocks unsupported transfer-encodings. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.reject_unsupported_transfer_encodings` to false. -* http: support :ref:`auto_host_rewrite_header` in the dynamic forward proxy. -* jwt_authn: added :ref:`allow_missing` option that accepts request without token but rejects bad request with bad tokens. -* jwt_authn: added :ref:`bypass_cors_preflight` to allow bypassing the CORS preflight request. -* lb_subset_config: new fallback policy for selectors: :ref:`KEYS_SUBSET` -* listeners: added :ref:`reuse_port` option. -* logger: added :ref:`--log-format-escaped ` command line option to escape newline characters in application logs. -* ratelimit: added :ref:`local rate limit ` network filter. -* rbac: added support for matching all subject alt names instead of first in :ref:`principal_name `. -* redis: performance improvement for larger split commands by avoiding string copies. -* redis: correctly follow MOVE/ASK redirection for mirrored clusters. -* redis: add :ref:`host_degraded_refresh_threshold ` and :ref:`failure_refresh_threshold ` to refresh topology when nodes are degraded or when requests fails. -* router: added histograms to show timeout budget usage to the :ref:`cluster stats `. -* router check tool: added support for testing and marking coverage for routes of runtime fraction 0. -* router: added :ref:`request_mirror_policies` to support sending multiple mirrored requests in one route. -* router: added support for REQ(header-name) :ref:`header formatter `. -* router: added support for percentage-based :ref:`retry budgets ` -* router: allow using a :ref:`query parameter ` for HTTP consistent hashing. -* router: exposed DOWNSTREAM_REMOTE_ADDRESS as custom HTTP request/response headers. -* router: added support for :ref:`max_internal_redirects ` for configurable maximum internal redirect hops. -* router: skip the Location header when the response code is not a 201 or a 3xx. -* router: added :ref:`auto_sni ` to support setting SNI to transport socket for new upstream connections based on the downstream HTTP host/authority header. -* router: added support for HOSTNAME :ref:`header formatter - `. -* server: added the :option:`--disable-extensions` CLI option, to disable extensions at startup. -* server: fixed a bug in config validation for configs with runtime layers. -* server: added :ref:`workers_started ` that indicates whether listeners have been fully initialized on workers. -* tcp_proxy: added :ref:`ClusterWeight.metadata_match`. -* tcp_proxy: added :ref:`hash_policy`. -* thrift_proxy: added support for cluster header based routing. -* thrift_proxy: added stats to the router filter. -* tls: remove TLS 1.0 and 1.1 from client defaults -* tls: added support for :ref:`generic string matcher ` for subject alternative names. -* tracing: added the ability to set custom tags on both the :ref:`HTTP connection manager` and the :ref:`HTTP route `. -* tracing: added upstream_address tag. -* tracing: added initial support for AWS X-Ray (local sampling rules only) :ref:`X-Ray Tracing `. -* tracing: added tags for gRPC request path, authority, content-type and timeout. -* udp: added initial support for :ref:`UDP proxy ` - -1.12.2 (December 10, 2019) -========================== -* http: fixed CVE-2019-18801 by allocating sufficient memory for request headers. -* http: fixed CVE-2019-18802 by implementing stricter validation of HTTP/1 headers. -* http: trim LWS at the end of header keys, for correct HTTP/1.1 header parsing. -* http: added strict authority checking. This can be reversed temporarily by setting the runtime feature `envoy.reloadable_features.strict_authority_validation` to false. -* route config: fixed CVE-2019-18838 by checking for presence of host/path headers. - -1.12.1 (November 8, 2019) -========================= -* listener: fixed CVE-2019-18836 by clearing accept filters before connection creation. - -1.12.0 (October 31, 2019) -========================= -* access log: added a new flag for :ref:`downstream protocol error `. -* access log: added :ref:`buffering ` and :ref:`periodical flushing ` support to gRPC access logger. Defaults to 16KB buffer and flushing every 1 second. -* access log: added DOWNSTREAM_DIRECT_REMOTE_ADDRESS and DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT :ref:`access log formatters ` and gRPC access logger. -* access log: gRPC Access Log Service (ALS) support added for :ref:`TCP access logs `. -* access log: reintroduced :ref:`filesystem ` stats and added the `write_failed` counter to track failed log writes. -* admin: added ability to configure listener :ref:`socket options `. -* admin: added config dump support for Secret Discovery Service :ref:`SecretConfigDump `. -* admin: added support for :ref:`draining ` listeners via admin interface. -* admin: added :http:get:`/stats/recentlookups`, :http:post:`/stats/recentlookups/clear`, :http:post:`/stats/recentlookups/disable`, and :http:post:`/stats/recentlookups/enable` endpoints. -* api: added :ref:`set_node_on_first_message_only ` option to omit the node identifier from the subsequent discovery requests on the same stream. -* buffer filter: now populates content-length header if not present. This behavior can be temporarily disabled using the runtime feature `envoy.reloadable_features.buffer_filter_populate_content_length`. -* build: official released binary is now PIE so it can be run with ASLR. -* config: added support for :ref:`delta xDS ` (including ADS) delivery. -* config: enforcing that terminal filters (e.g. HttpConnectionManager for L4, router for L7) be the last in their respective filter chains. -* config: added access log :ref:`extension filter`. -* config: added support for :option:`--reject-unknown-dynamic-fields`, providing independent control - over whether unknown fields are rejected in static and dynamic configuration. By default, unknown - fields in static configuration are rejected and are allowed in dynamic configuration. Warnings - are logged for the first use of any unknown field and these occurrences are counted in the - :ref:`server.static_unknown_fields ` and :ref:`server.dynamic_unknown_fields - ` statistics. -* config: added async data access for local and remote data sources. -* config: changed the default value of :ref:`initial_fetch_timeout ` from 0s to 15s. This is a change in behaviour in the sense that Envoy will move to the next initialization phase, even if the first config is not delivered in 15s. Refer to :ref:`initialization process ` for more details. -* config: added stat :ref:`init_fetch_timeout `. -* config: tls_context in Cluster and FilterChain are deprecated in favor of transport socket. See :ref:`deprecated documentation` for more information. -* csrf: added PATCH to supported methods. -* dns: added support for configuring :ref:`dns_failure_refresh_rate ` to set the DNS refresh rate during failures. -* ext_authz: added :ref:`configurable ability ` to send dynamic metadata to the `ext_authz` service. -* ext_authz: added :ref:`filter_enabled RuntimeFractionalPercent flag ` to filter. -* ext_authz: added tracing to the HTTP client. -* ext_authz: deprecated :ref:`cluster scope stats ` in favour of filter scope stats. -* fault: added overrides for default runtime keys in :ref:`HTTPFault ` filter. -* grpc: added :ref:`AWS IAM grpc credentials extension ` for AWS-managed xDS. -* grpc: added :ref:`gRPC stats filter ` for collecting stats about gRPC calls and streaming message counts. -* grpc-json: added support for :ref:`ignoring unknown query parameters`. -* grpc-json: added support for :ref:`the grpc-status-details-bin header`. -* header to metadata: added :ref:`PROTOBUF_VALUE ` and :ref:`ValueEncode ` to support protobuf Value and Base64 encoding. -* http: added a default one hour idle timeout to upstream and downstream connections. HTTP connections with no streams and no activity will be closed after one hour unless the default idle_timeout is overridden. To disable upstream idle timeouts, set the :ref:`idle_timeout ` to zero in Cluster :ref:`http_protocol_options`. To disable downstream idle timeouts, either set :ref:`idle_timeout ` to zero in the HttpConnectionManager :ref:`common_http_protocol_options ` or set the deprecated :ref:`connection manager ` field to zero. -* http: added the ability to format HTTP/1.1 header keys using :ref:`header_key_format `. -* http: added the ability to reject HTTP/1.1 requests with invalid HTTP header values, using the runtime feature `envoy.reloadable_features.strict_header_validation`. -* http: changed Envoy to forward existing x-forwarded-proto from upstream trusted proxies. Guarded by `envoy.reloadable_features.trusted_forwarded_proto` which defaults true. -* http: added the ability to configure the behavior of the server response header, via the :ref:`server_header_transformation` field. -* http: added the ability to :ref:`merge adjacent slashes` in the path. -* http: :ref:`AUTO ` codec protocol inference now requires the H2 magic bytes to be the first bytes transmitted by a downstream client. -* http: remove h2c upgrade headers for HTTP/1 as h2c upgrades are currently not supported. -* http: absolute URL support is now on by default. The prior behavior can be reinstated by setting :ref:`allow_absolute_url ` to false. -* http: support :ref:`host rewrite ` in the dynamic forward proxy. -* http: support :ref:`disabling the filter per route ` in the grpc http1 reverse bridge filter. -* http: added the ability to :ref:`configure max connection duration ` for downstream connections. -* listeners: added :ref:`continue_on_listener_filters_timeout ` to configure whether a listener will still create a connection when listener filters time out. -* listeners: added :ref:`HTTP inspector listener filter `. -* listeners: added :ref:`connection balancer ` - configuration for TCP listeners. -* listeners: listeners now close the listening socket as part of the draining stage as soon as workers stop accepting their connections. -* lua: extended `httpCall()` and `respond()` APIs to accept headers with entry values that can be a string or table of strings. -* lua: extended `dynamicMetadata:set()` to allow setting complex values. -* metrics_service: added support for flushing histogram buckets. -* outlier_detector: added :ref:`support for the grpc-status response header ` by mapping it to HTTP status. Guarded by envoy.reloadable_features.outlier_detection_support_for_grpc_status which defaults to true. -* performance: new buffer implementation enabled by default (to disable add "--use-libevent-buffers 1" to the command-line arguments when starting Envoy). -* performance: stats symbol table implementation (disabled by default; to test it, add "--use-fake-symbol-table 0" to the command-line arguments when starting Envoy). -* rbac: added support for DNS SAN as :ref:`principal_name `. -* redis: added :ref:`enable_command_stats ` to enable :ref:`per command statistics ` for upstream clusters. -* redis: added :ref:`read_policy ` to allow reading from redis replicas for Redis Cluster deployments. -* redis: fixed a bug where the redis health checker ignored the upstream auth password. -* redis: enable_hashtaging is always enabled when the upstream uses open source Redis cluster protocol. -* regex: introduced new :ref:`RegexMatcher ` type that - provides a safe regex implementation for untrusted user input. This type is now used in all - configuration that processes user provided input. See :ref:`deprecated configuration details - ` for more information. -* rbac: added conditions to the policy, see :ref:`condition `. -* router: added :ref:`rq_retry_skipped_request_not_complete ` counter stat to router stats. -* router: :ref:`scoped routing ` is supported. -* router: added new :ref:`retriable-headers ` retry policy. Retries can now be configured to trigger by arbitrary response header matching. -* router: added ability for most specific header mutations to take precedence, see :ref:`route configuration's most specific - header mutations wins flag `. -* router: added :ref:`respect_expected_rq_timeout ` that instructs ingress Envoy to respect :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress Envoy, when deriving timeout for upstream cluster. -* router: added new :ref:`retriable request headers ` to route configuration, to allow limiting buffering for retries and shadowing. -* router: added new :ref:`retriable request headers ` to retry policies. Retries can now be configured to only trigger on request header match. -* router: added the ability to match a route based on whether a TLS certificate has been - :ref:`presented ` by the - downstream connection. -* router check tool: added coverage reporting & enforcement. -* router check tool: added comprehensive coverage reporting. -* router check tool: added deprecated field check. -* router check tool: added flag for only printing results of failed tests. -* router check tool: added support for outputting missing tests in the detailed coverage report. -* router check tool: added coverage reporting for direct response routes. -* runtime: allows for the ability to parse boolean values. -* runtime: allows for the ability to parse integers as double values and vice-versa. -* sds: added :ref:`session_ticket_keys_sds_secret_config ` for loading TLS Session Ticket Encryption Keys using SDS API. -* server: added a post initialization lifecycle event, in addition to the existing startup and shutdown events. -* server: added :ref:`per-handler listener stats ` and - :ref:`per-worker watchdog stats ` to help diagnosing event - loop imbalance and general performance issues. -* stats: added unit support to histogram. -* tcp_proxy: the default :ref:`idle_timeout - ` is now 1 hour. -* thrift_proxy: fixed crashing bug on invalid transport/protocol framing. -* thrift_proxy: added support for stripping service name from method when using the multiplexed protocol. -* tls: added verification of IP address SAN fields in certificates against configured SANs in the certificate validation context. -* tracing: added support to the Zipkin reporter for sending list of spans as Zipkin JSON v2 and protobuf message over HTTP. - certificate validation context. -* tracing: added tags for gRPC response status and message. -* tracing: added :ref:`max_path_tag_length ` to support customizing the length of the request path included in the extracted `http.url `_ tag. -* upstream: added :ref:`an option ` that allows draining HTTP, TCP connection pools on cluster membership change. -* upstream: added :ref:`transport_socket_matches `, support using different transport socket config when connecting to different upstream endpoints within a cluster. -* upstream: added network filter chains to upstream connections, see :ref:`filters`. -* upstream: added new :ref:`failure-percentage based outlier detection` mode. -* upstream: uses p2c to select hosts for least-requests load balancers if all host weights are the same, even in cases where weights are not equal to 1. -* upstream: added :ref:`fail_traffic_on_panic ` to allow failing all requests to a cluster during panic state. -* zookeeper: parses responses and emits latency stats. - -1.11.2 (October 8, 2019) -======================== -* http: fixed CVE-2019-15226 by adding a cached byte size in HeaderMap. -* http: added :ref:`max headers count ` for http connections. The default limit is 100. -* upstream: runtime feature `envoy.reloadable_features.max_response_headers_count` overrides the default limit for upstream :ref:`max headers count ` -* http: added :ref:`common_http_protocol_options ` - Runtime feature `envoy.reloadable_features.max_request_headers_count` overrides the default limit for downstream :ref:`max headers count ` -* regex: backported safe regex matcher fix for CVE-2019-15225. - -1.11.1 (August 13, 2019) -======================== -* http: added mitigation of client initiated attacks that result in flooding of the downstream HTTP/2 connections. Those attacks can be logged at the "warning" level when the runtime feature `http.connection_manager.log_flood_exception` is enabled. The runtime setting defaults to disabled to avoid log spam when under attack. -* http: added :ref:`inbound_empty_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. - Runtime feature `envoy.reloadable_features.http2_protocol_options.max_consecutive_inbound_frames_with_empty_payload` overrides :ref:`max_consecutive_inbound_frames_with_empty_payload setting `. Large override value (i.e. 2147483647) effectively disables mitigation of inbound frames with empty payload. -* http: added :ref:`inbound_priority_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound PRIORITY frames. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting `. - Runtime feature `envoy.reloadable_features.http2_protocol_options.max_inbound_priority_frames_per_stream` overrides :ref:`max_inbound_priority_frames_per_stream setting `. Large override value effectively disables flood mitigation of inbound PRIORITY frames. -* http: added :ref:`inbound_window_update_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound WINDOW_UPDATE frames. The limit is configured by setting the :ref:`max_inbound_window_update_frames_per_data_frame_sent config setting `. - Runtime feature `envoy.reloadable_features.http2_protocol_options.max_inbound_window_update_frames_per_data_frame_sent` overrides :ref:`max_inbound_window_update_frames_per_data_frame_sent setting `. Large override value effectively disables flood mitigation of inbound WINDOW_UPDATE frames. -* http: added :ref:`outbound_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit. The limit is configured by setting the :ref:`max_outbound_frames config setting ` - Runtime feature `envoy.reloadable_features.http2_protocol_options.max_outbound_frames` overrides :ref:`max_outbound_frames config setting `. Large override value effectively disables flood mitigation of outbound frames of all types. -* http: added :ref:`outbound_control_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit for PING, SETTINGS and RST_STREAM frames. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `. - Runtime feature `envoy.reloadable_features.http2_protocol_options.max_outbound_control_frames` overrides :ref:`max_outbound_control_frames config setting `. Large override value effectively disables flood mitigation of outbound frames of types PING, SETTINGS and RST_STREAM. -* http: enabled strict validation of HTTP/2 messaging. Previous behavior can be restored using :ref:`stream_error_on_invalid_http_messaging config setting `. - Runtime feature `envoy.reloadable_features.http2_protocol_options.stream_error_on_invalid_http_messaging` overrides :ref:`stream_error_on_invalid_http_messaging config setting `. - -1.11.0 (July 11, 2019) -====================== -* access log: added a new field for downstream TLS session ID to file and gRPC access logger. -* access log: added a new field for route name to file and gRPC access logger. -* access log: added a new field for response code details in :ref:`file access logger` and :ref:`gRPC access logger`. -* access log: added several new variables for exposing information about the downstream TLS connection to :ref:`file access logger` and :ref:`gRPC access logger`. -* access log: added a new flag for request rejected due to failed strict header check. -* admin: the administration interface now includes a :ref:`/ready endpoint ` for easier readiness checks. -* admin: extend :ref:`/runtime_modify endpoint ` to support parameters within the request body. -* admin: the :ref:`/listener endpoint ` now returns :ref:`listeners.proto` which includes listener names and ports. -* admin: added host priority to :http:get:`/clusters` and :http:get:`/clusters?format=json` endpoint response -* admin: the :ref:`/clusters endpoint ` now shows hostname - for each host, useful for DNS based clusters. -* api: track and report requests issued since last load report. -* build: releases are built with Clang and linked with LLD. -* config: added :ref:stats_server_version_override` ` in bootstrap, that can be used to override :ref:`server.version statistic `. -* control-plane: management servers can respond with HTTP 304 to indicate that config is up to date for Envoy proxies polling a :ref:`REST API Config Type ` -* csrf: added support for whitelisting additional source origins. -* dns: added support for getting DNS record TTL which is used by STRICT_DNS/LOGICAL_DNS cluster as DNS refresh rate. -* dubbo_proxy: support the :ref:`dubbo proxy filter `. -* dynamo_request_parser: adding support for transactions. Adds check for new types of dynamodb operations (TransactWriteItems, TransactGetItems) and awareness for new types of dynamodb errors (IdempotentParameterMismatchException, TransactionCanceledException, TransactionInProgressException). -* eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. -* eds: removed max limit for `load_balancing_weight`. -* event: added :ref:`loop duration and poll delay statistics `. -* ext_authz: added a `x-envoy-auth-partial-body` metadata header set to `false|true` indicating if there is a partial body sent in the authorization request message. -* ext_authz: added configurable status code that allows customizing HTTP responses on filter check status errors. -* ext_authz: added option to `ext_authz` that allows the filter clearing route cache. -* grpc-json: added support for :ref:`auto mapping - `. -* health check: added :ref:`initial jitter ` to add jitter to the first health check in order to prevent thundering herd on Envoy startup. -* hot restart: stats are no longer shared between hot restart parent/child via shared memory, but rather by RPC. Hot restart version incremented to 11. -* http: added the ability to pass a URL encoded PEM encoded peer certificate chain in the - :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header. -* http: fixed a bug where large unbufferable responses were not tracked in stats and logs correctly. -* http: fixed a crashing bug where gRPC local replies would cause segfaults when upstream access logging was on. -* http: mitigated a race condition with the :ref:`delayed_close_timeout` where it could trigger while actively flushing a pending write buffer for a downstream connection. -* http: added support for :ref:`preserve_external_request_id` that represents whether the x-request-id should not be reset on edge entry inside mesh -* http: changed `sendLocalReply` to send percent-encoded `GrpcMessage`. -* http: added a :ref:header_prefix` ` configuration option to allow Envoy to send and process x-custom- prefixed headers rather than x-envoy. -* http: added :ref:`dynamic forward proxy ` support. -* http: tracking the active stream and dumping state in Envoy crash handlers. This can be disabled by building with `--define disable_object_dump_on_signal_trace=disabled` -* jwt_authn: make filter's parsing of JWT more flexible, allowing syntax like ``jwt=eyJhbGciOiJS...ZFnFIw,extra=7,realm=123`` -* listener: added :ref:`source IP ` - and :ref:`source port ` filter - chain matching. -* lua: exposed functions to Lua to verify digital signature. -* original_src filter: added the :ref:`filter`. -* outlier_detector: added configuration :ref:`outlier_detection.split_external_local_origin_errors` to distinguish locally and externally generated errors. See :ref:`arch_overview_outlier_detection` for full details. -* rbac: migrated from v2alpha to v2. -* redis: add support for Redis cluster custom cluster type. -* redis: automatically route commands using cluster slots for Redis cluster. -* redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. -* redis: added :ref:`request mirror policy ` to enable shadow traffic and/or dual writes. -* redis: add support for zpopmax and zpopmin commands. -* redis: added - :ref:`max_buffer_size_before_flush ` to batch commands together until the encoder buffer hits a certain size, and - :ref:`buffer_flush_timeout ` to control how quickly the buffer is flushed if it is not full. -* redis: added auth support :ref:`downstream_auth_password ` for downstream client authentication, and :ref:`auth_password ` to configure authentication passwords for upstream server clusters. -* retry: added a retry predicate that :ref:`rejects canary hosts. ` -* router: add support for configuring a :ref:`gRPC timeout offset ` on incoming requests. -* router: added ability to control retry back-off intervals via :ref:`retry policy `. -* router: added ability to issue a hedged retry in response to a per try timeout via a :ref:`hedge policy `. -* router: added a route name field to each http route in route.Route list -* router: added several new variables for exposing information about the downstream TLS connection via :ref:`header - formatters `. -* router: per try timeouts will no longer start before the downstream request has been received in full by the router.This ensures that the per try timeout does not account for slow downstreams and that will not start before the global timeout. -* router: added :ref:`RouteAction's auto_host_rewrite_header ` to allow upstream host header substitution with some other header's value -* router: added support for UPSTREAM_REMOTE_ADDRESS :ref:`header formatter - `. -* router: add ability to reject a request that includes invalid values for - headers configured in :ref:`strict_check_headers ` -* runtime: added support for :ref:`flexible layering configuration - `. -* runtime: added support for statically :ref:`specifying the runtime in the bootstrap configuration - `. -* runtime: :ref:`Runtime Discovery Service (RTDS) ` support added to layered runtime configuration. -* sandbox: added :ref:`CSRF sandbox `. -* server: ``--define manual_stamp=manual_stamp`` was added to allow server stamping outside of binary rules. - more info in the `bazel docs `_. -* server: added :ref:`server state ` statistic. -* server: added :ref:`initialization_time_ms` statistic. -* subset: added :ref:`list_as_any` option to - the subset lb which allows matching metadata against any of the values in a list value - on the endpoints. -* tools: added :repo:`proto ` support for :ref:`router check tool ` tests. -* tracing: add trace sampling configuration to the route, to override the route level. -* upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. -* upstream: an EDS management server can now force removal of a host that is still passing active - health checking by first marking the host as failed via EDS health check and subsequently removing - it in a future update. This is a mechanism to work around a race condition in which an EDS - implementation may remove a host before it has stopped passing active HC, thus causing the host - to become stranded until a future update. -* upstream: added :ref:`an option ` - that allows ignoring new hosts for the purpose of load balancing calculations until they have - been health checked for the first time. -* upstream: added runtime error checking to prevent setting dns type to STRICT_DNS or LOGICAL_DNS when custom resolver name is specified. -* upstream: added possibility to override fallback_policy per specific selector in :ref:`subset load balancer `. -* upstream: the :ref:`logical DNS cluster ` now - displays the current resolved IP address in admin output instead of 0.0.0.0. - -1.10.0 (Apr 5, 2019) -==================== -* access log: added a new flag for upstream retry count exceeded. -* access log: added a :ref:`gRPC filter ` to allow filtering on gRPC status. -* access log: added a new flag for stream idle timeout. -* access log: added a new field for upstream transport failure reason in :ref:`file access logger` and - :ref:`gRPC access logger` for HTTP access logs. -* access log: added new fields for downstream x509 information (URI sans and subject) to file and gRPC access logger. -* admin: the admin server can now be accessed via HTTP/2 (prior knowledge). -* admin: changed HTTP response status code from 400 to 405 when attempting to GET a POST-only route (such as /quitquitquit). -* buffer: fix vulnerabilities when allocation fails. -* build: releases are built with GCC-7 and linked with LLD. -* build: dev docker images :ref:`have been split ` from tagged images for easier - discoverability in Docker Hub. Additionally, we now build images for point releases. -* config: added support of using google.protobuf.Any in opaque configs for extensions. -* config: logging warnings when deprecated fields are in use. -* config: removed deprecated --v2-config-only from command line config. -* config: removed deprecated_v1 sds_config from :ref:`Bootstrap config `. -* config: removed the deprecated_v1 config option from :ref:`ring hash `. -* config: removed REST_LEGACY as a valid :ref:`ApiType `. -* config: finish cluster warming only when a named response i.e. ClusterLoadAssignment associated to the cluster being warmed comes in the EDS response. This is a behavioural change from the current implementation where warming of cluster completes on missing load assignments also. -* config: use Envoy cpuset size to set the default number or worker threads if :option:`--cpuset-threads` is enabled. -* config: added support for :ref:`initial_fetch_timeout `. The timeout is disabled by default. -* cors: added :ref:`filter_enabled & shadow_enabled RuntimeFractionalPercent flags ` to filter. -* csrf: added :ref:`CSRF filter `. -* ext_authz: added support for buffering request body. -* ext_authz: migrated from v2alpha to v2 and improved docs. -* ext_authz: added a configurable option to make the gRPC service cross-compatible with V2Alpha. Note that this feature is already deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. -* ext_authz: migrated from v2alpha to v2 and improved the documentation. -* ext_authz: authorization request and response configuration has been separated into two distinct objects: :ref:`authorization request - ` and :ref:`authorization response - `. In addition, :ref:`client headers - ` and :ref:`upstream headers - ` replaces the previous *allowed_authorization_headers* object. - All the control header lists now support :ref:`string matcher ` instead of standard string. -* fault: added the :ref:`max_active_faults - ` setting, as well as - :ref:`statistics ` for the number of active faults - and the number of faults the overflowed. -* fault: added :ref:`response rate limit - ` fault injection. -* fault: added :ref:`HTTP header fault configuration - ` to the HTTP fault filter. -* governance: extending Envoy deprecation policy from 1 release (0-3 months) to 2 releases (3-6 months). -* health check: expected response codes in http health checks are now :ref:`configurable `. -* http: added new grpc_http1_reverse_bridge filter for converting gRPC requests into HTTP/1.1 requests. -* http: fixed a bug where Content-Length:0 was added to HTTP/1 204 responses. -* http: added :ref:`max request headers size `. The default behaviour is unchanged. -* http: added modifyDecodingBuffer/modifyEncodingBuffer to allow modifying the buffered request/response data. -* http: added encodeComplete/decodeComplete. These are invoked at the end of the stream, after all data has been encoded/decoded respectively. Default implementation is a no-op. -* outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. -* mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to :ref:`MySQL proxy` for more details. -* performance: new buffer implementation (disabled by default; to test it, add "--use-libevent-buffers 0" to the command-line arguments when starting Envoy). -* jwt_authn: added :ref:`filter_state_rules ` to allow specifying requirements from filterState by other filters. -* ratelimit: removed deprecated rate limit configuration from bootstrap. -* redis: added :ref:`hashtagging ` to guarantee a given key's upstream. -* redis: added :ref:`latency stats ` for commands. -* redis: added :ref:`success and error stats ` for commands. -* redis: migrate hash function for host selection to `MurmurHash2 `_ from std::hash. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. -* redis: added :ref:`latency_in_micros ` to specify the redis commands stats time unit in microseconds. -* router: added ability to configure a :ref:`retry policy ` at the - virtual host level. -* router: added reset reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:` -* router: added :ref:`rq_reset_after_downstream_response_started ` counter stat to router stats. -* router: added per-route configuration of :ref:`internal redirects `. -* router: removed deprecated route-action level headers_to_add/remove. -* router: made :ref:`max retries header ` take precedence over the number of retries in route and virtual host retry policies. -* router: added support for prefix wildcards in :ref:`virtual host domains` -* stats: added support for histograms in prometheus -* stats: added usedonly flag to prometheus stats to only output metrics which have been - updated at least once. -* stats: added gauges tracking remaining resources before circuit breakers open. -* tap: added new alpha :ref:`HTTP tap filter `. -* tls: enabled TLS 1.3 on the server-side (non-FIPS builds). -* upstream: add hash_function to specify the hash function for :ref:`ring hash` as either xxHash or `murmurHash2 `_. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. -* upstream: added :ref:`degraded health value` which allows - routing to certain hosts only when there are insufficient healthy hosts available. -* upstream: add cluster factory to allow creating and registering :ref:`custom cluster type`. -* upstream: added a :ref:`circuit breaker ` to limit the number of concurrent connection pools in use. -* tracing: added :ref:`verbose ` to support logging annotations on spans. -* upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size` config parameter to strictly bound the ring size. -* zookeeper: added a ZooKeeper proxy filter that parses ZooKeeper messages (requests/responses/events). - Refer to :ref:`ZooKeeper proxy` for more details. -* upstream: added configuration option to select any host when the fallback policy fails. -* upstream: stopped incrementing upstream_rq_total for HTTP/1 conn pool when request is circuit broken. - -1.9.1 (Apr 2, 2019) -=================== -* http: fixed CVE-2019-9900 by rejecting HTTP/1.x headers with embedded NUL characters. -* http: fixed CVE-2019-9901 by normalizing HTTP paths prior to routing or L7 data plane processing. - This defaults off and is configurable via either HTTP connection manager :ref:`normalize_path - ` - or the :ref:`runtime `. - -1.9.0 (Dec 20, 2018) -==================== -* access log: added a :ref:`JSON logging mode ` to output access logs in JSON format. -* access log: added dynamic metadata to access log messages streamed over gRPC. -* access log: added DOWNSTREAM_CONNECTION_TERMINATION. -* admin: :http:post:`/logging` now responds with 200 while there are no params. -* admin: added support for displaying subject alternate names in :ref:`certs` end point. -* admin: added host weight to the :http:get:`/clusters?format=json` end point response. -* admin: :http:get:`/server_info` now responds with a JSON object instead of a single string. -* admin: :http:get:`/server_info` now exposes what stage of initialization the server is currently in. -* admin: added support for displaying command line options in :http:get:`/server_info` end point. -* circuit-breaker: added cx_open, rq_pending_open, rq_open and rq_retry_open gauges to expose live - state via :ref:`circuit breakers statistics `. -* cluster: set a default of 1s for :ref:`option `. -* config: removed support for the v1 API. -* config: added support for :ref:`rate limiting` discovery request calls. -* cors: added :ref:`invalid/valid stats ` to filter. -* ext-authz: added support for providing per route config - optionally disable the filter and provide context extensions. -* fault: removed integer percentage support. -* grpc-json: added support for :ref:`ignoring query parameters - `. -* health check: added :ref:`logging health check failure events `. -* health check: added ability to set :ref:`authority header value - ` for gRPC health check. -* http: added HTTP/2 WebSocket proxying via :ref:`extended CONNECT `. -* http: added limits to the number and length of header modifications in all fields request_headers_to_add and response_headers_to_add. These limits are very high and should only be used as a last-resort safeguard. -* http: added support for a :ref:`request timeout `. The timeout is disabled by default. -* http: no longer adding whitespace when appending X-Forwarded-For headers. **Warning**: this is not - compatible with 1.7.0 builds prior to `9d3a4eb4ac44be9f0651fcc7f87ad98c538b01ee `_. - See `#3611 `_ for details. -* http: augmented the `sendLocalReply` filter API to accept an optional `GrpcStatus` - value to override the default HTTP to gRPC status mapping. -* http: no longer close the TCP connection when a HTTP/1 request is retried due - to a response with empty body. -* http: added support for more gRPC content-type headers in :ref:`gRPC bridge filter `, like application/grpc+proto. -* listeners: all listener filters are now governed by the :ref:`listener_filters_timeout - ` setting. The hard coded 15s timeout in - the :ref:`TLS inspector listener filter ` is superseded by - this setting. -* listeners: added the ability to match :ref:`FilterChain ` using :ref:`source_type `. -* load balancer: added a `configuration ` option to specify the number of choices made in P2C. -* logging: added missing [ in log prefix. -* mongo_proxy: added :ref:`dynamic metadata `. -* network: removed the reference to `FilterState` in `Connection` in favor of `StreamInfo`. -* rate-limit: added :ref:`configuration ` - to specify whether the `GrpcStatus` status returned should be `RESOURCE_EXHAUSTED` or - `UNAVAILABLE` when a gRPC call is rate limited. -* rate-limit: removed support for the legacy ratelimit service and made the data-plane-api - :ref:`rls.proto ` based implementation default. -* rate-limit: removed the deprecated cluster_name attribute in :ref:`rate limit service configuration `. -* rate-limit: added :ref:`rate_limit_service ` configuration to filters. -* rbac: added dynamic metadata to the network level filter. -* rbac: added support for permission matching by :ref:`requested server name `. -* redis: static cluster configuration is no longer required. Redis proxy will work with clusters - delivered via CDS. -* router: added ability to configure arbitrary :ref:`retriable status codes. ` -* router: added ability to set attempt count in upstream requests, see :ref:`virtual host's include request - attempt count flag `. -* router: added internal :ref:`grpc-retry-on ` policy. -* router: added :ref:`scheme_redirect ` and - :ref:`port_redirect ` to define the respective - scheme and port rewriting RedirectAction. -* router: when :ref:`max_grpc_timeout ` - is set, Envoy will now add or update the grpc-timeout header to reflect Envoy's expected timeout. -* router: per try timeouts now starts when an upstream stream is ready instead of when the request has - been fully decoded by Envoy. -* router: added support for not retrying :ref:`rate limited requests`. Rate limit filter now sets the :ref:`x-envoy-ratelimited` - header so the rate limited requests that may have been retried earlier will not be retried with this change. -* router: added support for enabling upgrades on a :ref:`per-route ` basis. -* router: support configuring a default fraction of mirror traffic via - :ref:`runtime_fraction `. -* sandbox: added :ref:`cors sandbox `. -* server: added `SIGINT` (Ctrl-C) handler to gracefully shutdown Envoy like `SIGTERM`. -* stats: added :ref:`stats_matcher ` to the bootstrap config for granular control of stat instantiation. -* stream: renamed the `RequestInfo` namespace to `StreamInfo` to better match - its behaviour within TCP and HTTP implementations. -* stream: renamed `perRequestState` to `filterState` in `StreamInfo`. -* stream: added `downstreamDirectRemoteAddress` to `StreamInfo`. -* thrift_proxy: introduced thrift rate limiter filter. -* tls: added ssl.curves., ssl.sigalgs. and ssl.versions. to - :ref:`listener metrics ` to track TLS algorithms and versions in use. -* tls: added support for :ref:`client-side session resumption `. -* tls: added support for CRLs in :ref:`trusted_ca `. -* tls: added support for :ref:`multiple server TLS certificates `. -* tls: added support for :ref:`password encrypted private keys `. -* tls: added the ability to build :ref:`BoringSSL FIPS ` using ``--define boringssl=fips`` Bazel option. -* tls: removed support for ECDSA certificates with curves other than P-256. -* tls: removed support for RSA certificates with keys smaller than 2048-bits. -* tracing: added support to the Zipkin tracer for the :ref:`b3 ` single header format. -* tracing: added support for :ref:`Datadog ` tracer. -* upstream: added :ref:`scale_locality_weight` to enable - scaling locality weights by number of hosts removed by subset lb predicates. -* upstream: changed how load calculation for :ref:`priority levels` and :ref:`panic thresholds` interact. As long as normalized total health is 100% panic thresholds are disregarded. -* upstream: changed the default hash for :ref:`ring hash ` from std::hash to `xxHash `_. -* upstream: when using active health checking and STRICT_DNS with several addresses that resolve - to the same hosts, Envoy will now health check each host independently. - -1.8.0 (Oct 4, 2018) -=================== -* access log: added :ref:`response flag filter ` - to filter based on the presence of Envoy response flags. -* access log: added RESPONSE_DURATION and RESPONSE_TX_DURATION. -* access log: added REQUESTED_SERVER_NAME for SNI to tcp_proxy and http -* admin: added :http:get:`/hystrix_event_stream` as an endpoint for monitoring envoy's statistics - through `Hystrix dashboard `_. -* cli: added support for :ref:`component log level ` command line option for configuring log levels of individual components. -* cluster: added :ref:`option ` to merge - health check/weight/metadata updates within the given duration. -* config: regex validation added to limit to a maximum of 1024 characters. -* config: v1 disabled by default. v1 support remains available until October via flipping --v2-config-only=false. -* config: v1 disabled by default. v1 support remains available until October via deprecated flag --allow-deprecated-v1-api. -* config: fixed stat inconsistency between xDS and ADS implementation. :ref:`update_failure ` - stat is incremented in case of network failure and :ref:`update_rejected ` stat is incremented - in case of schema/validation error. -* config: added a stat :ref:`connected_state ` that indicates current connected state of Envoy with - management server. -* ext_authz: added support for configuring additional :ref:`authorization headers ` - to be sent from Envoy to the authorization service. -* fault: added support for fractional percentages in :ref:`FaultDelay ` - and in :ref:`FaultAbort `. -* grpc-json: added support for building HTTP response from - `google.api.HttpBody `_. -* health check: added support for :ref:`custom health check `. -* health check: added support for :ref:`specifying jitter as a percentage `. -* health_check: added support for :ref:`health check event logging `. -* health_check: added :ref:`timestamp ` - to the :ref:`health check event ` definition. -* health_check: added support for specifying :ref:`custom request headers ` - to HTTP health checker requests. -* http: added support for a :ref:`per-stream idle timeout - `. This applies at both :ref:`connection manager - ` - and :ref:`per-route granularity `. The timeout - defaults to 5 minutes; if you have other timeouts (e.g. connection idle timeout, upstream - response per-retry) that are longer than this in duration, you may want to consider setting a - non-default per-stream idle timeout. -* http: added upstream_rq_completed counter for :ref:`total requests completed ` to dynamic HTTP counters. -* http: added downstream_rq_completed counter for :ref:`total requests completed `, including on a :ref:`per-listener basis `. -* http: added generic :ref:`Upgrade support - `. -* http: better handling of HEAD requests. Now sending transfer-encoding: chunked rather than content-length: 0. -* http: fixed missing support for appending to predefined inline headers, e.g. - *authorization*, in features that interact with request and response headers, - e.g. :ref:`request_headers_to_add - `. For example, a - request header *authorization: token1* will appear as *authorization: - token1,token2*, after having :ref:`request_headers_to_add - ` with *authorization: - token2* applied. -* http: response filters not applied to early error paths such as http_parser generated 400s. -* http: restrictions added to reject *:*-prefixed pseudo-headers in :ref:`custom - request headers `. -* http: :ref:`hpack_table_size ` now controls - dynamic table size of both: encoder and decoder. -* http: added support for removing request headers using :ref:`request_headers_to_remove - `. -* http: added support for a :ref:`delayed close timeout` to mitigate race conditions when closing connections to downstream HTTP clients. The timeout defaults to 1 second. -* jwt-authn filter: add support for per route JWT requirements. -* listeners: added the ability to match :ref:`FilterChain ` using - :ref:`destination_port ` and - :ref:`prefix_ranges `. -* lua: added :ref:`connection() ` wrapper and *ssl()* API. -* lua: added :ref:`streamInfo() ` wrapper and *protocol()* API. -* lua: added :ref:`streamInfo():dynamicMetadata() ` API. -* network: introduced :ref:`sni_cluster ` network filter that forwards connections to the - upstream cluster specified by the SNI value presented by the client during a TLS handshake. -* proxy_protocol: added support for HAProxy Proxy Protocol v2 (AF_INET/AF_INET6 only). -* ratelimit: added support for :repo:`api/envoy/service/ratelimit/v2/rls.proto`. - Lyft's reference implementation of the `ratelimit `_ service also supports the data-plane-api proto as of v1.1.0. - Envoy can use either proto to send client requests to a ratelimit server with the use of the - `use_data_plane_proto` boolean flag in the ratelimit configuration. - Support for the legacy proto `source/common/ratelimit/ratelimit.proto` is deprecated and will be removed at the start of the 1.9.0 release cycle. -* ratelimit: added :ref:`failure_mode_deny ` option to control traffic flow in - case of rate limit service error. -* rbac config: added a :ref:`principal_name ` field and - removed the old `name` field to give more flexibility for matching certificate identity. -* rbac network filter: a :ref:`role-based access control network filter ` has been added. -* rest-api: added ability to set the :ref:`request timeout ` for REST API requests. -* route checker: added v2 config support and removed support for v1 configs. -* router: added ability to set request/response headers at the :ref:`envoy_api_msg_route.Route` level. -* stats: added :ref:`option to configure the DogStatsD metric name prefix` to DogStatsdSink. -* tcp_proxy: added support for :ref:`weighted clusters `. -* thrift_proxy: introduced thrift routing, moved configuration to correct location -* thrift_proxy: introduced thrift configurable decoder filters -* tls: implemented :ref:`Secret Discovery Service `. -* tracing: added support for configuration of :ref:`tracing sampling - `. -* upstream: added configuration option to the subset load balancer to take locality weights into account when - selecting a host from a subset. -* upstream: require opt-in to use the :ref:`x-envoy-original-dst-host ` header - for overriding destination address when using the :ref:`Original Destination ` - load balancing policy. - -1.7.0 (Jun 21, 2018) -==================== -* access log: added ability to log response trailers. -* access log: added ability to format START_TIME. -* access log: added DYNAMIC_METADATA :ref:`access log formatter `. -* access log: added :ref:`HeaderFilter ` - to filter logs based on request headers. -* access log: added `%([1-9])?f` as one of START_TIME specifiers to render subseconds. -* access log: gRPC Access Log Service (ALS) support added for :ref:`HTTP access logs - `. -* access log: improved WebSocket logging. -* admin: added :http:get:`/config_dump` for dumping the current configuration and associated xDS - version information (if applicable). -* admin: added :http:get:`/clusters?format=json` for outputing a JSON-serialized proto detailing - the current status of all clusters. -* admin: added :http:get:`/stats/prometheus` as an alternative endpoint for getting stats in prometheus format. -* admin: added :ref:`/runtime_modify endpoint ` to add or change runtime values. -* admin: mutations must be sent as POSTs, rather than GETs. Mutations include: - :http:post:`/cpuprofiler`, :http:post:`/healthcheck/fail`, :http:post:`/healthcheck/ok`, - :http:post:`/logging`, :http:post:`/quitquitquit`, :http:post:`/reset_counters`, - :http:post:`/runtime_modify?key1=value1&key2=value2&keyN=valueN`. -* admin: removed `/routes` endpoint; route configs can now be found at the :ref:`/config_dump endpoint `. -* buffer filter: the buffer filter can be optionally - :ref:`disabled ` or - :ref:`overridden ` with - route-local configuration. -* cli: added --config-yaml flag to the Envoy binary. When set its value is interpreted as a yaml - representation of the bootstrap config and overrides --config-path. -* cluster: added :ref:`option ` - to close tcp_proxy upstream connections when health checks fail. -* cluster: added :ref:`option ` to drain - connections from hosts after they are removed from service discovery, regardless of health status. -* cluster: fixed bug preventing the deletion of all endpoints in a priority -* debug: added symbolized stack traces (where supported) -* ext-authz filter: added support to raw HTTP authorization. -* ext-authz filter: added support to gRPC responses to carry HTTP attributes. -* grpc: support added for the full set of :ref:`Google gRPC call credentials - `. -* gzip filter: added :ref:`stats ` to the filter. -* gzip filter: sending *accept-encoding* header as *identity* no longer compresses the payload. -* health check: added ability to set :ref:`additional HTTP headers - ` for HTTP health check. -* health check: added support for EDS delivered :ref:`endpoint health status - `. -* health check: added interval overrides for health state transitions from :ref:`healthy to unhealthy - `, :ref:`unhealthy to healthy - ` and for subsequent checks on - :ref:`unhealthy hosts `. -* health check: added support for :ref:`custom health check `. -* health check: health check connections can now be configured to use http/2. -* health check http filter: added - :ref:`generic header matching ` - to trigger health check response. Deprecated the endpoint option. -* http: filters can now optionally support - :ref:`virtual host `, - :ref:`route `, and - :ref:`weighted cluster ` - local configuration. -* http: added the ability to pass DNS type Subject Alternative Names of the client certificate in the - :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header. -* http: local responses to gRPC requests are now sent as trailers-only gRPC responses instead of plain HTTP responses. - Notably the HTTP response code is always "200" in this case, and the gRPC error code is carried in "grpc-status" - header, optionally accompanied with a text message in "grpc-message" header. -* http: added support for :ref:`via header - ` - append. -* http: added a :ref:`configuration option - ` - to elide *x-forwarded-for* header modifications. -* http: fixed a bug in inline headers where addCopy and addViaMove didn't add header values when - encountering inline headers with multiple instances. -* listeners: added :ref:`tcp_fast_open_queue_length ` option. -* listeners: added the ability to match :ref:`FilterChain ` using - :ref:`application_protocols ` - (e.g. ALPN for TLS protocol). -* listeners: `sni_domains` has been deprecated/renamed to :ref:`server_names `. -* listeners: removed restriction on all filter chains having identical filters. -* load balancer: added :ref:`weighted round robin - ` support. The round robin - scheduler now respects endpoint weights and also has improved fidelity across - picks. -* load balancer: :ref:`locality weighted load balancing - ` is now supported. -* load balancer: ability to configure zone aware load balancer settings :ref:`through the API - `. -* load balancer: the :ref:`weighted least request - ` load balancing algorithm has been improved - to have better balance when operating in weighted mode. -* logger: added the ability to optionally set the log format via the :option:`--log-format` option. -* logger: all :ref:`logging levels ` can be configured - at run-time: trace debug info warning error critical. -* rbac http filter: a :ref:`role-based access control http filter ` has been added. -* router: the behavior of per-try timeouts have changed in the case where a portion of the response has - already been proxied downstream when the timeout occurs. Previously, the response would be reset - leading to either an HTTP/2 reset or an HTTP/1 closed connection and a partial response. Now, the - timeout will be ignored and the response will continue to proxy up to the global request timeout. -* router: changed the behavior of :ref:`source IP routing ` - to ignore the source port. -* router: added an :ref:`prefix_match ` match type - to explicitly match based on the prefix of a header value. -* router: added an :ref:`suffix_match ` match type - to explicitly match based on the suffix of a header value. -* router: added an :ref:`present_match ` match type - to explicitly match based on a header's presence. -* router: added an :ref:`invert_match ` config option - which supports inverting all other match types to match based on headers which are not a desired value. -* router: allow :ref:`cookie routing ` to - generate session cookies. -* router: added START_TIME as one of supported variables in :ref:`header - formatters `. -* router: added a :ref:`max_grpc_timeout ` - config option to specify the maximum allowable value for timeouts decoded from gRPC header field - `grpc-timeout`. -* router: added a :ref:`configuration option - ` to disable *x-envoy-* - header generation. -* router: added 'unavailable' to the retriable gRPC status codes that can be specified - through :ref:`x-envoy-retry-grpc-on `. -* sockets: added :ref:`tap transport socket extension ` to support - recording plain text traffic and PCAP generation. -* sockets: added `IP_FREEBIND` socket option support for :ref:`listeners - ` and upstream connections via - :ref:`cluster manager wide - ` and - :ref:`cluster specific ` options. -* sockets: added `IP_TRANSPARENT` socket option support for :ref:`listeners - `. -* sockets: added `SO_KEEPALIVE` socket option for upstream connections - :ref:`per cluster `. -* stats: added support for histograms. -* stats: added :ref:`option to configure the statsd prefix`. -* stats: updated stats sink interface to flush through a single call. -* tls: added support for - :ref:`verify_certificate_spki `. -* tls: added support for multiple - :ref:`verify_certificate_hash ` - values. -* tls: added support for using - :ref:`verify_certificate_spki ` - and :ref:`verify_certificate_hash ` - without :ref:`trusted_ca `. -* tls: added support for allowing expired certificates with - :ref:`allow_expired_certificate `. -* tls: added support for :ref:`renegotiation ` - when acting as a client. -* tls: removed support for legacy SHA-2 CBC cipher suites. -* tracing: the sampling decision is now delegated to the tracers, allowing the tracer to decide when and if - to use it. For example, if the :ref:`x-b3-sampled ` header - is supplied with the client request, its value will override any sampling decision made by the Envoy proxy. -* websocket: support configuring idle_timeout and max_connect_attempts. -* upstream: added support for host override for a request in :ref:`Original destination host request header `. -* header to metadata: added :ref:`HTTP Header to Metadata filter`. - -1.6.0 (March 20, 2018) -====================== - -* access log: added DOWNSTREAM_REMOTE_ADDRESS, DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, and - DOWNSTREAM_LOCAL_ADDRESS :ref:`access log formatters `. - DOWNSTREAM_ADDRESS access log formatter has been deprecated. -* access log: added less than or equal (LE) :ref:`comparison filter - `. -* access log: added configuration to :ref:`runtime filter - ` to set default sampling rate, divisor, - and whether to use independent randomness or not. -* admin: added :ref:`/runtime ` admin endpoint to read the - current runtime values. -* build: added support for :repo:`building Envoy with exported symbols - `. This change allows scripts loaded with the Lua filter to - load shared object libraries such as those installed via `LuaRocks `_. -* config: added support for sending error details as - `grpc.rpc.Status `_ - in :ref:`DiscoveryRequest `. -* config: added support for :ref:`inline delivery ` of TLS - certificates and private keys. -* config: added restrictions for the backing :ref:`config sources ` - of xDS resources. For filesystem based xDS the file must exist at configuration time. For cluster - based xDS the backing cluster must be statically defined and be of non-EDS type. -* grpc: the Google gRPC C++ library client is now supported as specified in the :ref:`gRPC services - overview ` and :ref:`GrpcService `. -* grpc-json: added support for :ref:`inline descriptors - `. -* health check: added :ref:`gRPC health check ` - based on `grpc.health.v1.Health `_ - service. -* health check: added ability to set :ref:`host header value - ` for http health check. -* health check: extended the health check filter to support computation of the health check response - based on the :ref:`percentage of healthy servers in upstream clusters - `. -* health check: added setting for :ref:`no-traffic - interval`. -* http: added idle timeout for :ref:`upstream http connections - `. -* http: added support for :ref:`proxying 100-Continue responses - `. -* http: added the ability to pass a URL encoded PEM encoded peer certificate in the - :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header. -* http: added support for trusting additional hops in the - :ref:`config_http_conn_man_headers_x-forwarded-for` request header. -* http: added support for :ref:`incoming HTTP/1.0 - `. -* hot restart: added SIGTERM propagation to children to :ref:`hot-restarter.py - `, which enables using it as a parent of containers. -* ip tagging: added :ref:`HTTP IP Tagging filter`. -* listeners: added support for :ref:`listening for both IPv4 and IPv6 - ` when binding to ::. -* listeners: added support for listening on :ref:`UNIX domain sockets - `. -* listeners: added support for :ref:`abstract unix domain sockets ` on - Linux. The abstract namespace can be used by prepending '@' to a socket path. -* load balancer: added cluster configuration for :ref:`healthy panic threshold - ` percentage. -* load balancer: added :ref:`Maglev ` consistent hash - load balancer. -* load balancer: added support for - :ref:`LocalityLbEndpoints` priorities. -* lua: added headers :ref:`replace() ` API. -* lua: extended to support :ref:`metadata object ` API. -* redis: added local `PING` support to the :ref:`Redis filter `. -* redis: added `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO` to the :ref:`Redis command splitter - ` whitelist. -* router: added DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, DOWNSTREAM_LOCAL_ADDRESS, - DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT, PROTOCOL, and UPSTREAM_METADATA :ref:`header - formatters `. The CLIENT_IP header formatter - has been deprecated. -* router: added gateway-error :ref:`retry-on ` policy. -* router: added support for route matching based on :ref:`URL query string parameters - `. -* router: added support for more granular weighted cluster routing by allowing the :ref:`total_weight - ` to be specified in configuration. -* router: added support for :ref:`custom request/response headers - ` with mixed static and dynamic values. -* router: added support for :ref:`direct responses `. - I.e., sending a preconfigured HTTP response without proxying anywhere. -* router: added support for :ref:`HTTPS redirects - ` on specific routes. -* router: added support for :ref:`prefix_rewrite - ` for redirects. -* router: added support for :ref:`stripping the query string - ` for redirects. -* router: added support for downstream request/upstream response - :ref:`header manipulation ` in :ref:`weighted - cluster `. -* router: added support for :ref:`range based header matching - ` for request routing. -* squash: added support for the :ref:`Squash microservices debugger `. - Allows debugging an incoming request to a microservice in the mesh. -* stats: added metrics service API implementation. -* stats: added native :ref:`DogStatsd ` support. -* stats: added support for :ref:`fixed stats tag values - ` which will be added to all metrics. -* tcp proxy: added support for specifying a :ref:`metadata matcher - ` for upstream - clusters in the tcp filter. -* tcp proxy: improved TCP proxy to correctly proxy TCP half-close. -* tcp proxy: added :ref:`idle timeout - `. -* tcp proxy: access logs now bring an IP address without a port when using DOWNSTREAM_ADDRESS. - Use :ref:`DOWNSTREAM_REMOTE_ADDRESS ` instead. -* tracing: added support for dynamically loading an :ref:`OpenTracing tracer - `. -* tracing: when using the Zipkin tracer, it is now possible for clients to specify the sampling - decision (using the :ref:`x-b3-sampled ` header) and - have the decision propagated through to subsequently invoked services. -* tracing: when using the Zipkin tracer, it is no longer necessary to propagate the - :ref:`x-ot-span-context ` header. - See more on trace context propagation :ref:`here `. -* transport sockets: added transport socket interface to allow custom implementations of transport - sockets. A transport socket provides read and write logic with buffer encryption and decryption - (if applicable). The existing TLS implementation has been refactored with the interface. -* upstream: added support for specifying an :ref:`alternate stats name - ` while emitting stats for clusters. -* Many small bug fixes and performance improvements not listed. - -1.5.0 (December 4, 2017) -======================== - -* access log: added fields for :ref:`UPSTREAM_LOCAL_ADDRESS and DOWNSTREAM_ADDRESS - `. -* admin: added :ref:`JSON output ` for stats admin endpoint. -* admin: added basic :ref:`Prometheus output ` for stats admin - endpoint. Histograms are not currently output. -* admin: added ``version_info`` to the :ref:`/clusters admin endpoint`. -* config: the :ref:`v2 API ` is now considered production ready. -* config: added --v2-config-only CLI flag. -* cors: added :ref:`CORS filter `. -* health check: added :ref:`x-envoy-immediate-health-check-fail - ` header support. -* health check: added :ref:`reuse_connection ` option. -* http: added :ref:`per-listener stats `. -* http: end-to-end HTTP flow control is now complete across both connections, streams, and filters. -* load balancer: added :ref:`subset load balancer `. -* load balancer: added ring size and hash :ref:`configuration options - `. This used to be configurable via runtime. The runtime - configuration was deleted without deprecation as we are fairly certain no one is using it. -* log: added the ability to optionally log to a file instead of stderr via the - :option:`--log-path` option. -* listeners: added :ref:`drain_type ` option. -* lua: added experimental :ref:`Lua filter `. -* mongo filter: added :ref:`fault injection `. -* mongo filter: added :ref:`"drain close" ` support. -* outlier detection: added :ref:`HTTP gateway failure type `. - See :ref:`deprecated log ` - for outlier detection stats deprecations in this release. -* redis: the :ref:`redis proxy filter ` is now considered - production ready. -* redis: added :ref:`"drain close" ` functionality. -* router: added :ref:`x-envoy-overloaded ` support. -* router: added :ref:`regex ` route matching. -* router: added :ref:`custom request headers ` - for upstream requests. -* router: added :ref:`downstream IP hashing - ` for HTTP ketama routing. -* router: added :ref:`cookie hashing `. -* router: added :ref:`start_child_span ` option - to create child span for egress calls. -* router: added optional :ref:`upstream logs `. -* router: added complete :ref:`custom append/override/remove support - ` of request/response headers. -* router: added support to :ref:`specify response code during redirect - `. -* router: added :ref:`configuration ` - to return either a 404 or 503 if the upstream cluster does not exist. -* runtime: added :ref:`comment capability `. -* server: change default log level (:option:`-l`) to `info`. -* stats: maximum stat/name sizes and maximum number of stats are now variable via the - `--max-obj-name-len` and `--max-stats` options. -* tcp proxy: added :ref:`access logging `. -* tcp proxy: added :ref:`configurable connect retries - `. -* tcp proxy: enable use of :ref:`outlier detector `. -* tls: added :ref:`SNI support `. -* tls: added support for specifying :ref:`TLS session ticket keys - `. -* tls: allow configuration of the :ref:`min - ` and :ref:`max - ` TLS protocol versions. -* tracing: added :ref:`custom trace span decorators `. -* Many small bug fixes and performance improvements not listed. - -1.4.0 (August 24, 2017) -======================= - -* macOS is :repo:`now supported `. (A few features - are missing such as hot restart and original destination routing). -* YAML is now directly supported for config files. -* Added /routes admin endpoint. -* End-to-end flow control is now supported for TCP proxy, HTTP/1, and HTTP/2. HTTP flow control - that includes filter buffering is incomplete and will be implemented in 1.5.0. -* Log verbosity :repo:`compile time flag ` added. -* Hot restart :repo:`compile time flag ` added. -* Original destination :ref:`cluster ` - and :ref:`load balancer ` added. -* :ref:`WebSocket ` is now supported. -* Virtual cluster priorities have been hard removed without deprecation as we are reasonably sure - no one is using this feature. -* Route `validate_clusters` option added. -* :ref:`x-envoy-downstream-service-node ` - header added. -* :ref:`x-forwarded-client-cert ` header - added. -* Initial HTTP/1 forward proxy support for absolute URLs has been added. -* HTTP/2 codec settings are now configurable. -* gRPC/JSON transcoder :ref:`filter ` added. -* gRPC web :ref:`filter ` added. -* Configurable timeout for the rate limit service call in the :ref:`network - ` and :ref:`HTTP ` rate limit - filters. -* :ref:`x-envoy-retry-grpc-on ` header added. -* :ref:`LDS API ` added. -* TLS :`require_client_certificate` option added. -* :ref:`Configuration check tool ` added. -* :ref:`JSON schema check tool ` added. -* Config validation mode added via the :option:`--mode` option. -* :option:`--local-address-ip-version` option added. -* IPv6 support is now complete. -* UDP `statsd_ip_address` option added. -* Per-cluster DNS resolvers added. -* :ref:`Fault filter ` enhancements and fixes. -* Several features are :ref:`deprecated as of the 1.4.0 release `. They - will be removed at the beginning of the 1.5.0 release cycle. We explicitly call out that the - `HttpFilterConfigFactory` filter API has been deprecated in favor of - `NamedHttpFilterConfigFactory`. -* Many small bug fixes and performance improvements not listed. - -1.3.0 (May 17, 2017) -==================== - -* As of this release, we now have an official :repo:`breaking change policy - `. Note that there are numerous breaking configuration - changes in this release. They are not listed here. Future releases will adhere to the policy and - have clear documentation on deprecations and changes. -* Bazel is now the canonical build system (replacing CMake). There have been a huge number of - changes to the development/build/test flow. See :repo:`/bazel/README.md` and - :repo:`/ci/README.md` for more information. -* :ref:`Outlier detection ` has been expanded to include success - rate variance, and all parameters are now configurable in both runtime and in the JSON - configuration. -* TCP level listener and cluster connections now have configurable receive buffer - limits at which point connection level back pressure is applied. - Full end to end flow control will be available in a future release. -* :ref:`Redis health checking ` has been added as an active - health check type. Full Redis support will be documented/supported in 1.4.0. -* :ref:`TCP health checking ` now supports a - "connect only" mode that only checks if the remote server can be connected to without - writing/reading any data. -* `BoringSSL `_ is now the only supported TLS provider. - The default cipher suites and ECDH curves have been updated with more modern defaults for both - listener and cluster connections. -* The `header value match` rate limit action has been expanded to include an `expect - match` parameter. -* Route level HTTP rate limit configurations now do not inherit the virtual host level - configurations by default. Use `include_vh_rate_limits` to inherit the virtual host - level options if desired. -* HTTP routes can now add request headers on a per route and per virtual host basis via the - :ref:`request_headers_to_add ` option. -* The :ref:`example configurations ` have been refreshed to demonstrate the - latest features. -* `per_try_timeout_ms` can now be configured in - a route's retry policy in addition to via the :ref:`x-envoy-upstream-rq-per-try-timeout-ms - ` HTTP header. -* HTTP virtual host matching now includes support for prefix wildcard domains (e.g., `*.lyft.com`). -* The default for tracing random sampling has been changed to 100% and is still configurable in - :ref:`runtime `. -* HTTP tracing configuration has been extended to allow tags - to be populated from arbitrary HTTP headers. -* The :ref:`HTTP rate limit filter ` can now be applied to internal, - external, or all requests via the `request_type` option. -* :ref:`Listener binding ` now requires specifying an `address` field. This can be - used to bind a listener to both a specific address as well as a port. -* The :ref:`MongoDB filter ` now emits a stat for queries that - do not have `$maxTimeMS` set. -* The :ref:`MongoDB filter ` now emits logs that are fully valid - JSON. -* The CPU profiler output path is now configurable. -* A watchdog system has been added that can kill the server if a deadlock is detected. -* A :ref:`route table checking tool ` has been added that can - be used to test route tables before use. -* We have added an :ref:`example repo ` that shows how to compile/link a custom filter. -* Added additional cluster wide information related to outlier detection to the :ref:`/clusters - admin endpoint `. -* Multiple SANs can now be verified via the `verify_subject_alt_name` setting. - Additionally, URI type SANs can be verified. -* HTTP filters can now be passed opaque configuration specified on a per route basis. -* By default Envoy now has a built in crash handler that will print a back trace. This behavior can - be disabled if desired via the ``--define=signal_trace=disabled`` Bazel option. -* Zipkin has been added as a supported :ref:`tracing provider `. -* Numerous small changes and fixes not listed here. - -1.2.0 (March 7, 2017) -===================== - -* :ref:`Cluster discovery service (CDS) API `. -* :ref:`Outlier detection ` (passive health checking). -* Envoy configuration is now checked against a JSON schema. -* :ref:`Ring hash ` consistent load balancer, as well as HTTP - consistent hash routing based on a policy. -* Vastly :ref:`enhanced global rate limit configuration ` via the HTTP - rate limiting filter. -* HTTP routing to a cluster retrieved from a header. -* Weighted cluster HTTP routing. -* Auto host rewrite during HTTP routing. -* Regex header matching during HTTP routing. -* HTTP access log runtime filter. -* LightStep tracer :ref:`parent/child span association `. -* :ref:`Route discovery service (RDS) API `. -* HTTP router :ref:`x-envoy-upstream-rq-timeout-alt-response header - ` support. -* *use_original_dst* and *bind_to_port* :ref:`listener options ` (useful for - iptables based transparent proxy support). -* TCP proxy filter :ref:`route table support `. -* Configurable stats flush interval. -* Various :ref:`third party library upgrades `, including using BoringSSL as - the default SSL provider. -* No longer maintain closed HTTP/2 streams for priority calculations. Leads to substantial memory - savings for large meshes. -* Numerous small changes and fixes not listed here. - -1.1.0 (November 30, 2016) -========================= - -* Switch from Jannson to RapidJSON for our JSON library (allowing for a configuration schema in - 1.2.0). -* Upgrade :ref:`recommended version ` of various other libraries. -* Configurable DNS refresh rate for DNS service discovery types. -* Upstream circuit breaker configuration can be :ref:`overridden via runtime - `. -* :ref:`Zone aware routing support `. -* Generic header matching routing rule. -* HTTP/2 graceful connection draining (double GOAWAY). -* DynamoDB filter :ref:`per shard statistics ` (pre-release AWS - feature). -* Initial release of the :ref:`fault injection HTTP filter `. -* HTTP :ref:`rate limit filter ` enhancements (note that the - configuration for HTTP rate limiting is going to be overhauled in 1.2.0). -* Added :ref:`refused-stream retry policy `. -* Multiple :ref:`priority queues ` for upstream clusters - (configurable on a per route basis, with separate connection pools, circuit breakers, etc.). -* Added max connection circuit breaking to the :ref:`TCP proxy filter `. -* Added :ref:`CLI ` options for setting the logging file flush interval as well - as the drain/shutdown time during hot restart. -* A very large number of performance enhancements for core HTTP/TCP proxy flows as well as a - few new configuration flags to allow disabling expensive features if they are not needed - (specifically request ID generation and dynamic response code stats). -* Support Mongo 3.2 in the :ref:`Mongo sniffing filter `. -* Lots of other small fixes and enhancements not listed. - -1.0.0 (September 12, 2016) -========================== - -Initial open source release. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index a8b6e0f694c3..47a43924a890 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -24,6 +24,7 @@ Changes Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. * http: remove legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and `envoy.reloadable_features.new_http2_connection_pool_behavior`. +* http: add llhttp as an alternative http parser * listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false. Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts.