| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299 |
- // Copyright 2015 gRPC authors.
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- syntax = "proto3";
- import "grpc/testing/payloads.proto";
- import "grpc/testing/stats.proto";
- import "google/protobuf/timestamp.proto";
- package grpc.testing;
- option java_multiple_files = true;
- option java_package = "io.grpc.testing";
- option java_outer_classname = "ControlProto";
- enum ClientType {
- // Many languages support a basic distinction between using
- // sync or async client, and this allows the specification
- SYNC_CLIENT = 0;
- ASYNC_CLIENT = 1;
- OTHER_CLIENT = 2; // used for some language-specific variants
- CALLBACK_CLIENT = 3;
- }
- enum ServerType {
- SYNC_SERVER = 0;
- ASYNC_SERVER = 1;
- ASYNC_GENERIC_SERVER = 2;
- OTHER_SERVER = 3; // used for some language-specific variants
- CALLBACK_SERVER = 4;
- }
- enum RpcType {
- UNARY = 0;
- STREAMING = 1;
- STREAMING_FROM_CLIENT = 2;
- STREAMING_FROM_SERVER = 3;
- STREAMING_BOTH_WAYS = 4;
- }
- // Parameters of poisson process distribution, which is a good representation
- // of activity coming in from independent identical stationary sources.
- message PoissonParams {
- // The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
- double offered_load = 1;
- }
- // Once an RPC finishes, immediately start a new one.
- // No configuration parameters needed.
- message ClosedLoopParams {}
- message LoadParams {
- oneof load {
- ClosedLoopParams closed_loop = 1;
- PoissonParams poisson = 2;
- };
- }
- // presence of SecurityParams implies use of TLS
- message SecurityParams {
- bool use_test_ca = 1;
- string server_host_override = 2;
- string cred_type = 3;
- }
- message ChannelArg {
- string name = 1;
- oneof value {
- string str_value = 2;
- int32 int_value = 3;
- }
- }
- message ClientConfig {
- // List of targets to connect to. At least one target needs to be specified.
- repeated string server_targets = 1;
- ClientType client_type = 2;
- SecurityParams security_params = 3;
- // How many concurrent RPCs to start for each channel.
- // For synchronous client, use a separate thread for each outstanding RPC.
- int32 outstanding_rpcs_per_channel = 4;
- // Number of independent client channels to create.
- // i-th channel will connect to server_target[i % server_targets.size()]
- int32 client_channels = 5;
- // Only for async client. Number of threads to use to start/manage RPCs.
- int32 async_client_threads = 7;
- RpcType rpc_type = 8;
- // The requested load for the entire client (aggregated over all the threads).
- LoadParams load_params = 10;
- PayloadConfig payload_config = 11;
- HistogramParams histogram_params = 12;
- // Specify the cores we should run the client on, if desired
- repeated int32 core_list = 13;
- int32 core_limit = 14;
- // If we use an OTHER_CLIENT client_type, this string gives more detail
- string other_client_api = 15;
- repeated ChannelArg channel_args = 16;
- // Number of threads that share each completion queue
- int32 threads_per_cq = 17;
- // Number of messages on a stream before it gets finished/restarted
- int32 messages_per_stream = 18;
- // Use coalescing API when possible.
- bool use_coalesce_api = 19;
- // If 0, disabled. Else, specifies the period between gathering latency
- // medians in milliseconds.
- int32 median_latency_collection_interval_millis = 20;
- // Number of client processes. 0 indicates no restriction.
- int32 client_processes = 21;
- }
- message ClientStatus { ClientStats stats = 1; }
- // Request current stats
- message Mark {
- // if true, the stats will be reset after taking their snapshot.
- bool reset = 1;
- }
- message ClientArgs {
- oneof argtype {
- ClientConfig setup = 1;
- Mark mark = 2;
- }
- }
- message ServerConfig {
- ServerType server_type = 1;
- SecurityParams security_params = 2;
- // Port on which to listen. Zero means pick unused port.
- int32 port = 4;
- // Only for async server. Number of threads used to serve the requests.
- int32 async_server_threads = 7;
- // Specify the number of cores to limit server to, if desired
- int32 core_limit = 8;
- // payload config, used in generic server.
- // Note this must NOT be used in proto (non-generic) servers. For proto servers,
- // 'response sizes' must be configured from the 'response_size' field of the
- // 'SimpleRequest' objects in RPC requests.
- PayloadConfig payload_config = 9;
- // Specify the cores we should run the server on, if desired
- repeated int32 core_list = 10;
- // If we use an OTHER_SERVER client_type, this string gives more detail
- string other_server_api = 11;
- // Number of threads that share each completion queue
- int32 threads_per_cq = 12;
- // c++-only options (for now) --------------------------------
- // Buffer pool size (no buffer pool specified if unset)
- int32 resource_quota_size = 1001;
- repeated ChannelArg channel_args = 1002;
- // Number of server processes. 0 indicates no restriction.
- int32 server_processes = 21;
- }
- message ServerArgs {
- oneof argtype {
- ServerConfig setup = 1;
- Mark mark = 2;
- }
- }
- message ServerStatus {
- ServerStats stats = 1;
- // the port bound by the server
- int32 port = 2;
- // Number of cores available to the server
- int32 cores = 3;
- }
- message CoreRequest {
- }
- message CoreResponse {
- // Number of cores available on the server
- int32 cores = 1;
- }
- message Void {
- }
- // A single performance scenario: input to qps_json_driver
- message Scenario {
- // Human readable name for this scenario
- string name = 1;
- // Client configuration
- ClientConfig client_config = 2;
- // Number of clients to start for the test
- int32 num_clients = 3;
- // Server configuration
- ServerConfig server_config = 4;
- // Number of servers to start for the test
- int32 num_servers = 5;
- // Warmup period, in seconds
- int32 warmup_seconds = 6;
- // Benchmark time, in seconds
- int32 benchmark_seconds = 7;
- // Number of workers to spawn locally (usually zero)
- int32 spawn_local_worker_count = 8;
- }
- // A set of scenarios to be run with qps_json_driver
- message Scenarios {
- repeated Scenario scenarios = 1;
- }
- // Basic summary that can be computed from ClientStats and ServerStats
- // once the scenario has finished.
- message ScenarioResultSummary
- {
- // Total number of operations per second over all clients. What is counted as 1 'operation' depends on the benchmark scenarios:
- // For unary benchmarks, an operation is processing of a single unary RPC.
- // For streaming benchmarks, an operation is processing of a single ping pong of request and response.
- double qps = 1;
- // QPS per server core.
- double qps_per_server_core = 2;
- // The total server cpu load based on system time across all server processes, expressed as percentage of a single cpu core.
- // For example, 85 implies 85% of a cpu core, 125 implies 125% of a cpu core. Since we are accumulating the cpu load across all the server
- // processes, the value could > 100 when there are multiple servers or a single server using multiple threads and cores.
- // Same explanation for the total client cpu load below.
- double server_system_time = 3;
- // The total server cpu load based on user time across all server processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
- double server_user_time = 4;
- // The total client cpu load based on system time across all client processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
- double client_system_time = 5;
- // The total client cpu load based on user time across all client processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
- double client_user_time = 6;
- // X% latency percentiles (in nanoseconds)
- double latency_50 = 7;
- double latency_90 = 8;
- double latency_95 = 9;
- double latency_99 = 10;
- double latency_999 = 11;
- // server cpu usage percentage
- double server_cpu_usage = 12;
- // Number of requests that succeeded/failed
- double successful_requests_per_second = 13;
- double failed_requests_per_second = 14;
- // Number of polls called inside completion queue per request
- double client_polls_per_request = 15;
- double server_polls_per_request = 16;
- // Queries per CPU-sec over all servers or clients
- double server_queries_per_cpu_sec = 17;
- double client_queries_per_cpu_sec = 18;
- // Start and end time for the test scenario
- google.protobuf.Timestamp start_time = 19;
- google.protobuf.Timestamp end_time =20;
- }
- // Results of a single benchmark scenario.
- message ScenarioResult {
- // Inputs used to run the scenario.
- Scenario scenario = 1;
- // Histograms from all clients merged into one histogram.
- HistogramData latencies = 2;
- // Client stats for each client
- repeated ClientStats client_stats = 3;
- // Server stats for each server
- repeated ServerStats server_stats = 4;
- // Number of cores available to each server
- repeated int32 server_cores = 5;
- // An after-the-fact computed summary
- ScenarioResultSummary summary = 6;
- // Information on success or failure of each worker
- repeated bool client_success = 7;
- repeated bool server_success = 8;
- // Number of failed requests (one row per status code seen)
- repeated RequestResultCount request_results = 9;
- }
|