control.proto 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. // Copyright 2015 gRPC authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. import "grpc/testing/payloads.proto";
  16. import "grpc/testing/stats.proto";
  17. import "google/protobuf/timestamp.proto";
  18. package grpc.testing;
  19. option java_multiple_files = true;
  20. option java_package = "io.grpc.testing";
  21. option java_outer_classname = "ControlProto";
  22. enum ClientType {
  23. // Many languages support a basic distinction between using
  24. // sync or async client, and this allows the specification
  25. SYNC_CLIENT = 0;
  26. ASYNC_CLIENT = 1;
  27. OTHER_CLIENT = 2; // used for some language-specific variants
  28. CALLBACK_CLIENT = 3;
  29. }
  30. enum ServerType {
  31. SYNC_SERVER = 0;
  32. ASYNC_SERVER = 1;
  33. ASYNC_GENERIC_SERVER = 2;
  34. OTHER_SERVER = 3; // used for some language-specific variants
  35. CALLBACK_SERVER = 4;
  36. }
  37. enum RpcType {
  38. UNARY = 0;
  39. STREAMING = 1;
  40. STREAMING_FROM_CLIENT = 2;
  41. STREAMING_FROM_SERVER = 3;
  42. STREAMING_BOTH_WAYS = 4;
  43. }
  44. // Parameters of poisson process distribution, which is a good representation
  45. // of activity coming in from independent identical stationary sources.
  46. message PoissonParams {
  47. // The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
  48. double offered_load = 1;
  49. }
  50. // Once an RPC finishes, immediately start a new one.
  51. // No configuration parameters needed.
  52. message ClosedLoopParams {}
  53. message LoadParams {
  54. oneof load {
  55. ClosedLoopParams closed_loop = 1;
  56. PoissonParams poisson = 2;
  57. };
  58. }
  59. // presence of SecurityParams implies use of TLS
  60. message SecurityParams {
  61. bool use_test_ca = 1;
  62. string server_host_override = 2;
  63. string cred_type = 3;
  64. }
  65. message ChannelArg {
  66. string name = 1;
  67. oneof value {
  68. string str_value = 2;
  69. int32 int_value = 3;
  70. }
  71. }
  72. message ClientConfig {
  73. // List of targets to connect to. At least one target needs to be specified.
  74. repeated string server_targets = 1;
  75. ClientType client_type = 2;
  76. SecurityParams security_params = 3;
  77. // How many concurrent RPCs to start for each channel.
  78. // For synchronous client, use a separate thread for each outstanding RPC.
  79. int32 outstanding_rpcs_per_channel = 4;
  80. // Number of independent client channels to create.
  81. // i-th channel will connect to server_target[i % server_targets.size()]
  82. int32 client_channels = 5;
  83. // Only for async client. Number of threads to use to start/manage RPCs.
  84. int32 async_client_threads = 7;
  85. RpcType rpc_type = 8;
  86. // The requested load for the entire client (aggregated over all the threads).
  87. LoadParams load_params = 10;
  88. PayloadConfig payload_config = 11;
  89. HistogramParams histogram_params = 12;
  90. // Specify the cores we should run the client on, if desired
  91. repeated int32 core_list = 13;
  92. int32 core_limit = 14;
  93. // If we use an OTHER_CLIENT client_type, this string gives more detail
  94. string other_client_api = 15;
  95. repeated ChannelArg channel_args = 16;
  96. // Number of threads that share each completion queue
  97. int32 threads_per_cq = 17;
  98. // Number of messages on a stream before it gets finished/restarted
  99. int32 messages_per_stream = 18;
  100. // Use coalescing API when possible.
  101. bool use_coalesce_api = 19;
  102. // If 0, disabled. Else, specifies the period between gathering latency
  103. // medians in milliseconds.
  104. int32 median_latency_collection_interval_millis = 20;
  105. // Number of client processes. 0 indicates no restriction.
  106. int32 client_processes = 21;
  107. }
  108. message ClientStatus { ClientStats stats = 1; }
  109. // Request current stats
  110. message Mark {
  111. // if true, the stats will be reset after taking their snapshot.
  112. bool reset = 1;
  113. }
  114. message ClientArgs {
  115. oneof argtype {
  116. ClientConfig setup = 1;
  117. Mark mark = 2;
  118. }
  119. }
  120. message ServerConfig {
  121. ServerType server_type = 1;
  122. SecurityParams security_params = 2;
  123. // Port on which to listen. Zero means pick unused port.
  124. int32 port = 4;
  125. // Only for async server. Number of threads used to serve the requests.
  126. int32 async_server_threads = 7;
  127. // Specify the number of cores to limit server to, if desired
  128. int32 core_limit = 8;
  129. // payload config, used in generic server.
  130. // Note this must NOT be used in proto (non-generic) servers. For proto servers,
  131. // 'response sizes' must be configured from the 'response_size' field of the
  132. // 'SimpleRequest' objects in RPC requests.
  133. PayloadConfig payload_config = 9;
  134. // Specify the cores we should run the server on, if desired
  135. repeated int32 core_list = 10;
  136. // If we use an OTHER_SERVER client_type, this string gives more detail
  137. string other_server_api = 11;
  138. // Number of threads that share each completion queue
  139. int32 threads_per_cq = 12;
  140. // c++-only options (for now) --------------------------------
  141. // Buffer pool size (no buffer pool specified if unset)
  142. int32 resource_quota_size = 1001;
  143. repeated ChannelArg channel_args = 1002;
  144. // Number of server processes. 0 indicates no restriction.
  145. int32 server_processes = 21;
  146. }
  147. message ServerArgs {
  148. oneof argtype {
  149. ServerConfig setup = 1;
  150. Mark mark = 2;
  151. }
  152. }
  153. message ServerStatus {
  154. ServerStats stats = 1;
  155. // the port bound by the server
  156. int32 port = 2;
  157. // Number of cores available to the server
  158. int32 cores = 3;
  159. }
  160. message CoreRequest {
  161. }
  162. message CoreResponse {
  163. // Number of cores available on the server
  164. int32 cores = 1;
  165. }
  166. message Void {
  167. }
  168. // A single performance scenario: input to qps_json_driver
  169. message Scenario {
  170. // Human readable name for this scenario
  171. string name = 1;
  172. // Client configuration
  173. ClientConfig client_config = 2;
  174. // Number of clients to start for the test
  175. int32 num_clients = 3;
  176. // Server configuration
  177. ServerConfig server_config = 4;
  178. // Number of servers to start for the test
  179. int32 num_servers = 5;
  180. // Warmup period, in seconds
  181. int32 warmup_seconds = 6;
  182. // Benchmark time, in seconds
  183. int32 benchmark_seconds = 7;
  184. // Number of workers to spawn locally (usually zero)
  185. int32 spawn_local_worker_count = 8;
  186. }
  187. // A set of scenarios to be run with qps_json_driver
  188. message Scenarios {
  189. repeated Scenario scenarios = 1;
  190. }
  191. // Basic summary that can be computed from ClientStats and ServerStats
  192. // once the scenario has finished.
  193. message ScenarioResultSummary
  194. {
  195. // Total number of operations per second over all clients. What is counted as 1 'operation' depends on the benchmark scenarios:
  196. // For unary benchmarks, an operation is processing of a single unary RPC.
  197. // For streaming benchmarks, an operation is processing of a single ping pong of request and response.
  198. double qps = 1;
  199. // QPS per server core.
  200. double qps_per_server_core = 2;
  201. // The total server cpu load based on system time across all server processes, expressed as percentage of a single cpu core.
  202. // For example, 85 implies 85% of a cpu core, 125 implies 125% of a cpu core. Since we are accumulating the cpu load across all the server
  203. // processes, the value could > 100 when there are multiple servers or a single server using multiple threads and cores.
  204. // Same explanation for the total client cpu load below.
  205. double server_system_time = 3;
  206. // The total server cpu load based on user time across all server processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
  207. double server_user_time = 4;
  208. // The total client cpu load based on system time across all client processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
  209. double client_system_time = 5;
  210. // The total client cpu load based on user time across all client processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
  211. double client_user_time = 6;
  212. // X% latency percentiles (in nanoseconds)
  213. double latency_50 = 7;
  214. double latency_90 = 8;
  215. double latency_95 = 9;
  216. double latency_99 = 10;
  217. double latency_999 = 11;
  218. // server cpu usage percentage
  219. double server_cpu_usage = 12;
  220. // Number of requests that succeeded/failed
  221. double successful_requests_per_second = 13;
  222. double failed_requests_per_second = 14;
  223. // Number of polls called inside completion queue per request
  224. double client_polls_per_request = 15;
  225. double server_polls_per_request = 16;
  226. // Queries per CPU-sec over all servers or clients
  227. double server_queries_per_cpu_sec = 17;
  228. double client_queries_per_cpu_sec = 18;
  229. // Start and end time for the test scenario
  230. google.protobuf.Timestamp start_time = 19;
  231. google.protobuf.Timestamp end_time =20;
  232. }
  233. // Results of a single benchmark scenario.
  234. message ScenarioResult {
  235. // Inputs used to run the scenario.
  236. Scenario scenario = 1;
  237. // Histograms from all clients merged into one histogram.
  238. HistogramData latencies = 2;
  239. // Client stats for each client
  240. repeated ClientStats client_stats = 3;
  241. // Server stats for each server
  242. repeated ServerStats server_stats = 4;
  243. // Number of cores available to each server
  244. repeated int32 server_cores = 5;
  245. // An after-the-fact computed summary
  246. ScenarioResultSummary summary = 6;
  247. // Information on success or failure of each worker
  248. repeated bool client_success = 7;
  249. repeated bool server_success = 8;
  250. // Number of failed requests (one row per status code seen)
  251. repeated RequestResultCount request_results = 9;
  252. }