Browse Source

Add an Async Ping Pong streaming benchmark (#1004)

Motivation:

Unary benchmark already exists - adding a streaming benchmark
completes very high level coverage.

Modifications:

Refactor Unary benchmaark to break out what's specific to
that and add a second implementation of that broken out
class which runs a streaming test.

Result:

Both Unary and Streaming benchmarks exist.
Peter Adams 5 years ago
parent
commit
992cf22f15

+ 7 - 1
Performance/QPSBenchmark/README.md

@@ -16,7 +16,9 @@ You can easily run the tests locally using the C++ driver program from gRPC - no
 of running the C++ tests which can be done in a gRPC checkout with 
 `./tools/run_tests/run_performance_tests.py -l c++ -r cpp_protobuf_async_unary_qps_unconstrained_insecure`
 
-For an example of running a benchmarking tests proceed as follows
+For examples of running benchmarking tests proceed as follows
+
+### Unary Benchmark
 1. Open a terminal window and run the QPSBenchmark - `swift run -c release QPSBenchmark --driver_port 10400`.  
 This will become the server when instructed by the driver.
 2. Open another terminal window and run QPSBenchmark - `swift run -c release QPSBenchmark --driver_port 10410`.
@@ -26,3 +28,7 @@ configure the environment with `export QPS_WORKERS="localhost:10400,localhost:10
 `cmake/build/qps_json_driver '--scenarios_json={"scenarios": [{"name": "swift_protobuf_async_unary_qps_unconstrained_insecure", "warmup_seconds": 5, "benchmark_seconds": 30, "num_servers": 1, "server_config": {"async_server_threads": 0, "channel_args": [{"str_value": "throughput", "name": "grpc.optimization_target"}], "server_type": "ASYNC_SERVER", "security_params": null, "threads_per_cq": 0, "server_processes": 0}, "client_config": {"security_params": null, "channel_args": [{"str_value": "throughput", "name": "grpc.optimization_target"}], "async_client_threads": 0, "outstanding_rpcs_per_channel": 100, "rpc_type": "UNARY", "payload_config": {"simple_params": {"resp_size": 0, "req_size": 0}}, "client_channels": 64, "threads_per_cq": 0, "load_params": {"closed_loop": {}}, "client_type": "ASYNC_CLIENT", "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}, "client_processes": 0}, "num_clients": 0}]}' --scenario_result_file=scenario_result.json`
 This will run a test of asynchronous unary client and server, using all the cores on the machine.  
 64 channels each with 100 outstanding requests.
+
+### Ping Pong Benchmark
+
+As above but drive with `cmake/build/qps_json_driver '--scenarios_json={"scenarios": [{"name": "swift_protobuf_async_streaming_ping_pong_insecure", "warmup_seconds": 5, "benchmark_seconds": 30, "num_servers": 1, "server_config": {"async_server_threads": 1, "channel_args": [{"str_value": "latency", "name": "grpc.optimization_target"}, {"int_value": 1, "name": "grpc.minimal_stack"}], "server_type": "ASYNC_SERVER", "security_params": null, "threads_per_cq": 0, "server_processes": 0}, "client_config": {"security_params": null, "channel_args": [{"str_value": "latency", "name": "grpc.optimization_target"}, {"int_value": 1, "name": "grpc.minimal_stack"}], "async_client_threads": 1, "outstanding_rpcs_per_channel": 1, "rpc_type": "STREAMING", "payload_config": {"simple_params": {"resp_size": 0, "req_size": 0}}, "client_channels": 1, "threads_per_cq": 0, "load_params": {"closed_loop": {}}, "client_type": "ASYNC_CLIENT", "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}, "client_processes": 0}, "num_clients": 1}]}' --scenario_result_file=scenario_result.json`

+ 30 - 32
Performance/QPSBenchmark/Sources/QPSBenchmark/Runtime/AsyncClient.swift

@@ -20,8 +20,8 @@ import GRPC
 import Logging
 import NIO
 
-/// Client to make a series of asynchronous unary calls.
-final class AsyncUnaryQPSClient: QPSClient {
+/// Client to make a series of asynchronous calls.
+final class AsyncQPSClient<RequestMakerType: RequestMaker>: QPSClient {
   private let eventLoopGroup: MultiThreadedEventLoopGroup
   private let threadCount: Int
 
@@ -32,7 +32,7 @@ final class AsyncUnaryQPSClient: QPSClient {
   private var statsPeriodStart: DispatchTime
   private var cpuStatsPeriodStart: CPUTime
 
-  /// Initialise a client to send unary requests.
+  /// Initialise a client to send requests.
   /// - parameters:
   ///      - config: Config from the driver specifying how the client should behave.
   init(config: Grpc_Testing_ClientConfig) throws {
@@ -51,7 +51,7 @@ final class AsyncUnaryQPSClient: QPSClient {
     self.statsPeriodStart = grpcTimeNow()
     self.cpuStatsPeriodStart = getResourceUsage()
 
-    let requestMessage = try AsyncUnaryQPSClient
+    let requestMessage = try AsyncQPSClient
       .makeClientRequest(payloadConfig: config.payloadConfig)
 
     // Start the requested number of channels.
@@ -158,12 +158,9 @@ final class AsyncUnaryQPSClient: QPSClient {
   /// Class to manage a channel.  Repeatedly makes requests on that channel and records what happens.
   private class ChannelRepeater {
     private let connection: ClientConnection
-    private let client: Grpc_Testing_BenchmarkServiceClient
-    private let requestMessage: Grpc_Testing_SimpleRequest
-    private let logger = Logger(label: "ChannelRepeater")
     private let maxPermittedOutstandingRequests: Int
 
-    private var stats: StatsWithLock
+    private let stats: StatsWithLock
 
     /// Has a stop been requested - if it has don't submit any more
     /// requests and when all existing requests are complete signal
@@ -173,6 +170,8 @@ final class AsyncUnaryQPSClient: QPSClient {
     private var stopComplete: EventLoopPromise<Void>
     private var numberOfOutstandingRequests = 0
 
+    private var requestMaker: RequestMakerType
+
     init(target: HostAndPort,
          requestMessage: Grpc_Testing_SimpleRequest,
          config: Grpc_Testing_ClientConfig,
@@ -180,17 +179,27 @@ final class AsyncUnaryQPSClient: QPSClient {
       // TODO: Support TLS if requested.
       self.connection = ClientConnection.insecure(group: eventLoopGroup)
         .connect(host: target.host, port: target.port)
-      self.client = Grpc_Testing_BenchmarkServiceClient(channel: self.connection)
-      self.requestMessage = requestMessage
+
+      let logger = Logger(label: "ChannelRepeater")
+      let client = Grpc_Testing_BenchmarkServiceClient(channel: self.connection)
       self.maxPermittedOutstandingRequests = Int(config.outstandingRpcsPerChannel)
       self.stopComplete = self.connection.eventLoop.makePromise()
       self.stats = StatsWithLock()
+
+      self.requestMaker = RequestMakerType(
+        config: config,
+        client: client,
+        requestMessage: requestMessage,
+        logger: logger,
+        stats: self.stats
+      )
     }
 
     /// Launch as many requests as allowed on the channel.
     /// This must be called from the connection eventLoop.
     private func launchRequests() {
-      precondition(self.connection.eventLoop.inEventLoop)
+      self.connection.eventLoop.preconditionInEventLoop()
+
       while self.canMakeRequest {
         self.makeRequestAndRepeat()
       }
@@ -198,40 +207,32 @@ final class AsyncUnaryQPSClient: QPSClient {
 
     /// Returns if it is permissible to make another request - ie we've not been asked to stop, and we're not at the limit of outstanding requests.
     private var canMakeRequest: Bool {
+      self.connection.eventLoop.assertInEventLoop()
       return !self.stopRequested
         && self.numberOfOutstandingRequests < self.maxPermittedOutstandingRequests
     }
 
     /// If there is spare permitted capacity make a request and repeat when it is done.
     private func makeRequestAndRepeat() {
+      self.connection.eventLoop.preconditionInEventLoop()
       // Check for capacity.
       if !self.canMakeRequest {
         return
       }
-      let startTime = grpcTimeNow()
       self.numberOfOutstandingRequests += 1
-      let result = self.client.unaryCall(self.requestMessage)
+      let resultStatus = self.requestMaker.makeRequest()
 
       // Wait for the request to complete.
-      result.status.whenSuccess { status in
-        self.requestCompleted(status: status, startTime: startTime)
+      resultStatus.whenSuccess { status in
+        self.requestCompleted(status: status)
       }
     }
 
     /// Call when a request has completed.
     /// Records stats and attempts to make more requests if there is available capacity.
-    private func requestCompleted(status: GRPCStatus, startTime: DispatchTime) {
-      precondition(self.connection.eventLoop.inEventLoop)
+    private func requestCompleted(status: GRPCStatus) {
+      self.connection.eventLoop.preconditionInEventLoop()
       self.numberOfOutstandingRequests -= 1
-      if status.isOk {
-        let endTime = grpcTimeNow()
-        self.recordLatency(endTime - startTime)
-      } else {
-        self.logger.error(
-          "Bad status from unary request",
-          metadata: ["status": "\(status)"]
-        )
-      }
       if self.stopRequested, self.numberOfOutstandingRequests == 0 {
         self.stopIsComplete()
       } else {
@@ -240,10 +241,6 @@ final class AsyncUnaryQPSClient: QPSClient {
       }
     }
 
-    private func recordLatency(_ latency: Nanoseconds) {
-      self.stats.add(latency: Double(latency.value))
-    }
-
     /// Get stats for sending to the driver.
     /// - parameters:
     ///     - reset: Should the stats reset after copying.
@@ -275,6 +272,7 @@ final class AsyncUnaryQPSClient: QPSClient {
     func stop() -> EventLoopFuture<Void> {
       self.connection.eventLoop.execute {
         self.stopRequested = true
+        self.requestMaker.requestStop()
         if self.numberOfOutstandingRequests == 0 {
           self.stopIsComplete()
         }
@@ -291,9 +289,9 @@ final class AsyncUnaryQPSClient: QPSClient {
 func makeAsyncClient(config: Grpc_Testing_ClientConfig) throws -> QPSClient {
   switch config.rpcType {
   case .unary:
-    return try AsyncUnaryQPSClient(config: config)
+    return try AsyncQPSClient<AsyncUnaryRequestMaker>(config: config)
   case .streaming:
-    throw GRPCStatus(code: .unimplemented, message: "Client Type not implemented")
+    return try AsyncQPSClient<AsyncPingPongRequestMaker>(config: config)
   case .streamingFromClient:
     throw GRPCStatus(code: .unimplemented, message: "Client Type not implemented")
   case .streamingFromServer:

+ 95 - 0
Performance/QPSBenchmark/Sources/QPSBenchmark/Runtime/AsyncPingPongRequestMaker.swift

@@ -0,0 +1,95 @@
+/*
+ * Copyright 2020, gRPC Authors All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Foundation
+import GRPC
+import Logging
+import NIO
+
+/// Makes streaming requests and listens to responses ping-pong style.
+/// Iterations can be limited by config.
+final class AsyncPingPongRequestMaker: RequestMaker {
+  private let client: Grpc_Testing_BenchmarkServiceClient
+  private let requestMessage: Grpc_Testing_SimpleRequest
+  private let logger: Logger
+  private let stats: StatsWithLock
+
+  /// If greater than zero gives a limit to how many messages are exchanged before termination.
+  private let messagesPerStream: Int
+  /// Stops more requests being made after stop is requested.
+  private var stopRequested = false
+
+  /// Initialiser to gather requirements.
+  /// - Parameters:
+  ///    - config: config from the driver describing what to do.
+  ///    - client: client interface to the server.
+  ///    - requestMessage: Pre-made request message to use possibly repeatedly.
+  ///    - logger: Where to log useful diagnostics.
+  ///    - stats: Where to record statistics on latency.
+  init(config: Grpc_Testing_ClientConfig,
+       client: Grpc_Testing_BenchmarkServiceClient,
+       requestMessage: Grpc_Testing_SimpleRequest,
+       logger: Logger,
+       stats: StatsWithLock) {
+    self.client = client
+    self.requestMessage = requestMessage
+    self.logger = logger
+    self.stats = stats
+
+    self.messagesPerStream = Int(config.messagesPerStream)
+  }
+
+  /// Initiate a request sequence to the server - in this case the sequence is streaming requests to the server and waiting
+  /// to see responses before repeating ping-pong style.  The number of iterations can be limited by config.
+  /// - returns: A future which completes when the request-response sequence is complete.
+  func makeRequest() -> EventLoopFuture<GRPCStatus> {
+    var startTime = grpcTimeNow()
+    var messagesSent = 1
+    var streamingCall: BidirectionalStreamingCall<
+      Grpc_Testing_SimpleRequest,
+      Grpc_Testing_SimpleResponse
+    >?
+
+    /// Handle a response from the server - potentially triggers making another request.
+    /// Will execute on the event loop which deals with thread safety concerns.
+    func handleResponse(response: Grpc_Testing_SimpleResponse) {
+      streamingCall!.eventLoop.preconditionInEventLoop()
+      let endTime = grpcTimeNow()
+      self.stats.add(latency: endTime - startTime)
+      if !self.stopRequested,
+        self.messagesPerStream == 0 || messagesSent < self.messagesPerStream {
+        messagesSent += 1
+        startTime = endTime // Use end of previous request as the start of the next.
+        streamingCall!.sendMessage(self.requestMessage, promise: nil)
+      } else {
+        streamingCall!.sendEnd(promise: nil)
+      }
+    }
+
+    // Setup the call.
+    streamingCall = self.client.streamingCall(handler: handleResponse)
+    // Kick start with initial request
+    streamingCall!.sendMessage(self.requestMessage, promise: nil)
+
+    return streamingCall!.status
+  }
+
+  /// Request termination of the request-response sequence.
+  func requestStop() {
+    // Flag stop as requested - this will prevent any more requests being made.
+    self.stopRequested = true
+  }
+}

+ 70 - 0
Performance/QPSBenchmark/Sources/QPSBenchmark/Runtime/AsyncUnaryRequestMaker.swift

@@ -0,0 +1,70 @@
+/*
+ * Copyright 2020, gRPC Authors All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import GRPC
+import Logging
+import NIO
+
+/// Makes unary requests to the server and records performance statistics.
+final class AsyncUnaryRequestMaker: RequestMaker {
+  private let client: Grpc_Testing_BenchmarkServiceClient
+  private let requestMessage: Grpc_Testing_SimpleRequest
+  private let logger: Logger
+  private let stats: StatsWithLock
+
+  /// Initialiser to gather requirements.
+  /// - Parameters:
+  ///    - config: config from the driver describing what to do.
+  ///    - client: client interface to the server.
+  ///    - requestMessage: Pre-made request message to use possibly repeatedly.
+  ///    - logger: Where to log useful diagnostics.
+  ///    - stats: Where to record statistics on latency.
+  init(config: Grpc_Testing_ClientConfig,
+       client: Grpc_Testing_BenchmarkServiceClient,
+       requestMessage: Grpc_Testing_SimpleRequest,
+       logger: Logger,
+       stats: StatsWithLock) {
+    self.client = client
+    self.requestMessage = requestMessage
+    self.logger = logger
+    self.stats = stats
+  }
+
+  /// Initiate a request sequence to the server - in this case a single unary requests and wait for a response.
+  /// - returns: A future which completes when the request-response sequence is complete.
+  func makeRequest() -> EventLoopFuture<GRPCStatus> {
+    let startTime = grpcTimeNow()
+    let result = self.client.unaryCall(self.requestMessage)
+    // Log latency stats on completion.
+    result.status.whenSuccess { status in
+      if status.isOk {
+        let endTime = grpcTimeNow()
+        self.stats.add(latency: endTime - startTime)
+      } else {
+        self.logger.error(
+          "Bad status from unary request",
+          metadata: ["status": "\(status)"]
+        )
+      }
+    }
+    return result.status
+  }
+
+  /// Request termination of the request-response sequence.
+  func requestStop() {
+    // No action here - we could potentially try and cancel the request easiest to just wait.
+  }
+}

+ 42 - 0
Performance/QPSBenchmark/Sources/QPSBenchmark/Runtime/RequestMaker.swift

@@ -0,0 +1,42 @@
+/*
+ * Copyright 2020, gRPC Authors All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import GRPC
+import Logging
+import NIO
+
+/// Implement to provide a method of making requests to a server from a client.
+protocol RequestMaker {
+  /// Initialiser to gather requirements.
+  /// - Parameters:
+  ///    - config: config from the driver describing what to do.
+  ///    - client: client interface to the server.
+  ///    - requestMessage: Pre-made request message to use possibly repeatedly.
+  ///    - logger: Where to log useful diagnostics.
+  ///    - stats: Where to record statistics on latency.
+  init(config: Grpc_Testing_ClientConfig,
+       client: Grpc_Testing_BenchmarkServiceClient,
+       requestMessage: Grpc_Testing_SimpleRequest,
+       logger: Logger,
+       stats: StatsWithLock)
+
+  /// Initiate a request sequence to the server.
+  /// - returns: A future which completes when the request-response sequence is complete.
+  func makeRequest() -> EventLoopFuture<GRPCStatus>
+
+  /// Request termination of the request-response sequence.
+  func requestStop()
+}

+ 7 - 3
Performance/QPSBenchmark/Sources/QPSBenchmark/Runtime/Stats.swift

@@ -28,22 +28,26 @@ struct Stats {
 /// Stats with access controlled by a lock -
 /// Needs locking rather than event loop hopping as the driver refuses to wait shutting
 /// the connection immediately after the request.
-struct StatsWithLock {
+class StatsWithLock {
   private var data = Stats()
   private let lock = Lock()
 
   /// Record a latency value into the stats.
   /// - parameters:
   ///     - latency: The value to record.
-  mutating func add(latency: Double) {
+  func add(latency: Double) {
     self.lock.withLockVoid { self.data.latencies.add(value: latency) }
   }
 
+  func add(latency: Nanoseconds) {
+    self.add(latency: Double(latency.value))
+  }
+
   /// Copy the data out.
   /// - parameters:
   ///     - reset: If the statistics should be reset after collection or not.
   /// - returns: A copy of the statistics.
-  mutating func copyData(reset: Bool) -> Stats {
+  func copyData(reset: Bool) -> Stats {
     return self.lock.withLock {
       let result = self.data
       if reset {