Skip to content

Commit 68ed58a

Browse files
committed
Review feedback
Signed-off-by: Otto van der Schaaf <[email protected]>
1 parent 00b6861 commit 68ed58a

File tree

12 files changed

+61
-49
lines changed

12 files changed

+61
-49
lines changed

README.md

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,9 @@ bazel build -c opt //:nighthawk
4343
```
4444
USAGE:
4545
46-
bazel-bin/nighthawk_client [--response-header-with-latency-input
47-
<string>] [--stats-flush-interval
48-
<uint32_t>] [--stats-sinks <string>] ...
46+
bazel-bin/nighthawk_client [--response-latency-header-name <string>]
47+
[--stats-flush-interval <uint32_t>]
48+
[--stats-sinks <string>] ...
4949
[--no-duration] [--simple-warmup]
5050
[--request-source <uri format>] [--label
5151
<string>] ... [--multi-target-use-https]
@@ -81,11 +81,12 @@ format>
8181
8282
Where:
8383
84-
--response-header-with-latency-input <string>
85-
Set an optional response header name, whose values will be tracked in
86-
a latency histogram if set. Can be used in tandem with the test server
87-
"emit_previous_request_delta_in_response_header" option to get a sense
88-
of elapsed time between request arrivals. Default: ""
84+
--response-latency-header-name <string>
85+
Set an optional header name that will be returned in responses, whose
86+
values will be tracked in a latency histogram if set. Can be used in
87+
tandem with the test server's response option
88+
"emit_previous_request_delta_in_response_header" to record elapsed
89+
time between request arrivals. Default: ""
8990
9091
--stats-flush-interval <uint32_t>
9192
Time interval (in seconds) between flushes to configured stats sinks.

api/client/options.proto

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -202,9 +202,9 @@ message CommandLineOptions {
202202
// specified the default is 5 seconds. Time interval must be at least 1s and at most 300s.
203203
google.protobuf.UInt32Value stats_flush_interval = 35
204204
[(validate.rules).uint32 = {gte: 1, lte: 300}];
205-
// Set an optional response header name, whose values will be tracked in a latency histogram if
206-
// set. Can be used in tandem with the test server's
207-
// "emit_previous_request_delta_in_response_header" option to get a sense of elapsed time between
208-
// request arrivals.
209-
google.protobuf.StringValue response_header_with_latency_input = 36;
205+
// Set an optional header name that will be returned in responses, whose values will be tracked in
206+
// a latency histogram if set. Can be used in tandem with the test server's response option
207+
// "emit_previous_request_delta_in_response_header" to record elapsed time between request
208+
// arrivals.
209+
google.protobuf.StringValue response_latency_header_name = 36;
210210
}

source/client/benchmark_client_impl.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,14 +84,14 @@ BenchmarkClientHttpImpl::BenchmarkClientHttpImpl(
8484
Envoy::Upstream::ClusterManagerPtr& cluster_manager,
8585
Envoy::Tracing::HttpTracerSharedPtr& http_tracer, absl::string_view cluster_name,
8686
RequestGenerator request_generator, const bool provide_resource_backpressure,
87-
absl::string_view response_header_with_latency_input)
87+
absl::string_view response_latency_header_name)
8888
: api_(api), dispatcher_(dispatcher), scope_(scope.createScope("benchmark.")),
8989
statistic_(std::move(statistic)), use_h2_(use_h2),
9090
benchmark_client_counters_({ALL_BENCHMARK_CLIENT_COUNTERS(POOL_COUNTER(*scope_))}),
9191
cluster_manager_(cluster_manager), http_tracer_(http_tracer),
9292
cluster_name_(std::string(cluster_name)), request_generator_(std::move(request_generator)),
9393
provide_resource_backpressure_(provide_resource_backpressure),
94-
response_header_with_latency_input_(response_header_with_latency_input) {
94+
response_latency_header_name_(response_latency_header_name) {
9595
statistic_.connect_statistic->setId("benchmark_http_client.queue_to_connect");
9696
statistic_.response_statistic->setId("benchmark_http_client.request_to_response");
9797
statistic_.response_header_size_statistic->setId("benchmark_http_client.response_header_size");
@@ -168,7 +168,7 @@ bool BenchmarkClientHttpImpl::tryStartRequest(CompletionCallback caller_completi
168168
*statistic_.connect_statistic, *statistic_.response_statistic,
169169
*statistic_.response_header_size_statistic, *statistic_.response_body_size_statistic,
170170
*statistic_.origin_latency_statistic, request->header(), shouldMeasureLatencies(),
171-
content_length, generator_, http_tracer_, response_header_with_latency_input_);
171+
content_length, generator_, http_tracer_, response_latency_header_name_);
172172
requests_initiated_++;
173173
pool_ptr->newStream(*stream_decoder, *stream_decoder);
174174
return true;

source/client/benchmark_client_impl.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ class BenchmarkClientHttpImpl : public BenchmarkClient,
107107
Envoy::Tracing::HttpTracerSharedPtr& http_tracer,
108108
absl::string_view cluster_name, RequestGenerator request_generator,
109109
const bool provide_resource_backpressure,
110-
absl::string_view response_header_with_latency_input);
110+
absl::string_view response_latency_header_name);
111111
void setConnectionLimit(uint32_t connection_limit) { connection_limit_ = connection_limit; }
112112
void setMaxPendingRequests(uint32_t max_pending_requests) {
113113
max_pending_requests_ = max_pending_requests;
@@ -163,7 +163,7 @@ class BenchmarkClientHttpImpl : public BenchmarkClient,
163163
std::string cluster_name_;
164164
const RequestGenerator request_generator_;
165165
const bool provide_resource_backpressure_;
166-
const std::string response_header_with_latency_input_;
166+
const std::string response_latency_header_name_;
167167
};
168168

169169
} // namespace Client

source/client/options_impl.cc

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -294,12 +294,14 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) {
294294
stats_flush_interval_),
295295
false, 5, "uint32_t", cmd);
296296

297-
TCLAP::ValueArg<std::string> response_header_with_latency_input(
298-
"", "response-header-with-latency-input",
299-
"Set an optional response header name, whose values will be tracked in a latency histogram "
300-
"if set. Can be used in tandem with the test server "
301-
"\"emit_previous_request_delta_in_response_header\" option to get a sense of elapsed time "
302-
"between request arrivals. Default: \"\"",
297+
TCLAP::ValueArg<std::string> response_latency_header_name(
298+
"", "response-latency-header-name",
299+
"Set an optional header name that will be returned in responses, whose values will be "
300+
"tracked in a latency histogram if set. "
301+
"Can be used in tandem with the test server's response option "
302+
"\"emit_previous_request_delta_in_response_header\" to record elapsed time between request "
303+
"arrivals. "
304+
"Default: \"\"",
303305
false, "", "string", cmd);
304306

305307
Utility::parseCommand(cmd, argc, argv);
@@ -433,7 +435,7 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv) {
433435
}
434436
}
435437
TCLAP_SET_IF_SPECIFIED(stats_flush_interval, stats_flush_interval_);
436-
TCLAP_SET_IF_SPECIFIED(response_header_with_latency_input, response_header_with_latency_input_);
438+
TCLAP_SET_IF_SPECIFIED(response_latency_header_name, response_latency_header_name_);
437439

438440
// CLI-specific tests.
439441
// TODO(oschaaf): as per mergconflicts's remark, it would be nice to aggregate
@@ -619,8 +621,8 @@ OptionsImpl::OptionsImpl(const nighthawk::client::CommandLineOptions& options) {
619621
no_duration_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(options, no_duration, no_duration_);
620622
}
621623
std::copy(options.labels().begin(), options.labels().end(), std::back_inserter(labels_));
622-
response_header_with_latency_input_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(
623-
options, response_header_with_latency_input, response_header_with_latency_input_);
624+
response_latency_header_name_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(
625+
options, response_latency_header_name, response_latency_header_name_);
624626

625627
validate();
626628
}
@@ -793,8 +795,8 @@ CommandLineOptionsPtr OptionsImpl::toCommandLineOptionsInternal() const {
793795
*command_line_options->add_stats_sinks() = stats_sink;
794796
}
795797
command_line_options->mutable_stats_flush_interval()->set_value(stats_flush_interval_);
796-
command_line_options->mutable_response_header_with_latency_input()->set_value(
797-
response_header_with_latency_input_);
798+
command_line_options->mutable_response_latency_header_name()->set_value(
799+
response_latency_header_name_);
798800
return command_line_options;
799801
}
800802

source/client/options_impl.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable<Envoy::Logger
8686
}
8787
uint32_t statsFlushInterval() const override { return stats_flush_interval_; }
8888
std::string responseHeaderWithLatencyInput() const override {
89-
return response_header_with_latency_input_;
89+
return response_latency_header_name_;
9090
};
9191

9292
private:
@@ -141,7 +141,7 @@ class OptionsImpl : public Options, public Envoy::Logger::Loggable<Envoy::Logger
141141
bool no_duration_{false};
142142
std::vector<envoy::config::metrics::v3::StatsSink> stats_sinks_;
143143
uint32_t stats_flush_interval_{5};
144-
std::string response_header_with_latency_input_;
144+
std::string response_latency_header_name_;
145145
};
146146

147147
} // namespace Client

source/client/stream_decoder.cc

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,8 @@ void StreamDecoder::decodeHeaders(Envoy::Http::ResponseHeaderMapPtr&& headers, b
1919
response_header_sizes_statistic_.addValue(response_headers_->byteSize());
2020
const uint64_t response_code = Envoy::Http::Utility::getResponseStatus(*response_headers_);
2121
stream_info_.response_code_ = static_cast<uint32_t>(response_code);
22-
if (!response_header_with_latency_input_.empty()) {
23-
const auto timing_header_name =
24-
Envoy::Http::LowerCaseString(response_header_with_latency_input_);
22+
if (!response_latency_header_name_.empty()) {
23+
const auto timing_header_name = Envoy::Http::LowerCaseString(response_latency_header_name_);
2524
const Envoy::Http::HeaderEntry* timing_header = response_headers_->get(timing_header_name);
2625
if (timing_header != nullptr) {
2726
absl::string_view timing_value = timing_header->value().getStringView();

source/client/stream_decoder.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder,
4747
HeaderMapPtr request_headers, bool measure_latencies, uint32_t request_body_size,
4848
Envoy::Random::RandomGenerator& random_generator,
4949
Envoy::Tracing::HttpTracerSharedPtr& http_tracer,
50-
std::string response_header_with_latency_input)
50+
absl::string_view response_latency_header_name)
5151
: dispatcher_(dispatcher), time_source_(time_source),
5252
decoder_completion_callback_(decoder_completion_callback),
5353
caller_completion_callback_(std::move(caller_completion_callback)),
@@ -59,7 +59,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder,
5959
complete_(false), measure_latencies_(measure_latencies),
6060
request_body_size_(request_body_size), stream_info_(time_source_),
6161
random_generator_(random_generator), http_tracer_(http_tracer),
62-
response_header_with_latency_input_(std::move(response_header_with_latency_input)) {
62+
response_latency_header_name_(response_latency_header_name) {
6363
if (measure_latencies_ && http_tracer_ != nullptr) {
6464
setupForTracing();
6565
}
@@ -121,7 +121,7 @@ class StreamDecoder : public Envoy::Http::ResponseDecoder,
121121
Envoy::Tracing::HttpTracerSharedPtr& http_tracer_;
122122
Envoy::Tracing::SpanPtr active_span_;
123123
Envoy::StreamInfo::UpstreamTiming upstream_timing_;
124-
const std::string response_header_with_latency_input_;
124+
const std::string response_latency_header_name_;
125125
};
126126

127127
} // namespace Client

test/benchmark_http_client_test.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,8 +178,9 @@ class BenchmarkClientHttpTest : public Test {
178178
// verifyBenchmarkClientProcessesExpectedInflightRequests.
179179
void setupBenchmarkClient(const RequestGenerator& request_generator) {
180180
client_ = std::make_unique<Client::BenchmarkClientHttpImpl>(
181-
*api_, *dispatcher_, store_, statistic_, false, cluster_manager_, http_tracer_, "benchmark",
182-
request_generator, true, "");
181+
*api_, *dispatcher_, store_, statistic_, /*use_h2*/ false, cluster_manager_, http_tracer_,
182+
"benchmark", request_generator, /*provide_resource_backpressure*/ true,
183+
/*response_header_with_latency_input=*/"");
183184
}
184185

185186
uint64_t getCounter(absl::string_view name) {

test/integration/configurations/nighthawk_track_timings.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
# Envoy configuration template for testing the time-tracking http filter extension.
2+
# Sets up the time-tracking extension plus the test-server extension for generating
3+
# responses.
14
admin:
25
access_log_path: $tmpdir/nighthawk-test-server-admin-access.log
36
profile_path: $tmpdir/nighthawk-test-server.prof
@@ -23,6 +26,7 @@ static_resources:
2326
domains:
2427
- "*"
2528
http_filters:
29+
# Here we set up the time-tracking extension to emit request-arrival delta timings in a response header.
2630
- name: time-tracking
2731
config:
2832
emit_previous_request_delta_in_response_header: x-origin-request-receipt-delta

0 commit comments

Comments
 (0)