Просмотр исходного кода

Refactor benchmarks to be more flexible (#641)

Motivation:

Benchmarks are useful, but ours aren't very flexible since they require
having a gRPC connection. This makes it hard to benchmark smaller pieces
of code.

Modifications:

- Refactor benchmarks to make them more flexible.
- Added a benchmark to do 10k unary requests over an embedded channel

Result:

Easier to write benchmarks.
George Barnett 6 лет назад
Родитель
Сommit
1399e9d863

+ 6 - 1
Sources/GRPC/Box.swift

@@ -18,10 +18,15 @@ import Foundation
 /// Provides a "box" to put a value in.
 ///
 /// Allows large values to be passed around without being copied.
+///
+/// - Important: This is **NOT** part of the public API.
 public final class _Box<T> {
   let value: T
 
-  init(_ value: T) {
+  /// Constructs a box for a value.
+  ///
+  /// - Important: This is **NOT** part of the public API.
+  public init(_ value: T) {
     self.value = value
   }
 }

+ 1 - 1
Sources/GRPC/ClientCalls/BaseClientCall.swift

@@ -76,7 +76,7 @@ public class BaseClientCall<Request: Message, Response: Message>: ClientCall {
     multiplexer: EventLoopFuture<HTTP2StreamMultiplexer>,
     callType: GRPCCallType,
     responseHandler: GRPCClientResponseChannelHandler<Response>,
-    requestHandler: ClientRequestChannelHandler<Request>,
+    requestHandler: _ClientRequestChannelHandler<Request>,
     logger: Logger
   ) {
     self.logger = logger

+ 1 - 1
Sources/GRPC/ClientCalls/BidirectionalStreamingCall.swift

@@ -63,7 +63,7 @@ public final class BidirectionalStreamingCall<RequestMessage: Message, ResponseM
       options: callOptions
     )
 
-    let requestHandler = StreamingRequestChannelHandler<RequestMessage>(requestHead: requestHead)
+    let requestHandler = _StreamingRequestChannelHandler<RequestMessage>(requestHead: requestHead)
 
     super.init(
       eventLoop: connection.eventLoop,

+ 1 - 1
Sources/GRPC/ClientCalls/ClientStreamingCall.swift

@@ -67,7 +67,7 @@ public final class ClientStreamingCall<RequestMessage: Message, ResponseMessage:
       options: callOptions
     )
 
-    let requestHandler = StreamingRequestChannelHandler<RequestMessage>(requestHead: requestHead)
+    let requestHandler = _StreamingRequestChannelHandler<RequestMessage>(requestHead: requestHead)
 
     super.init(
       eventLoop: connection.eventLoop,

+ 1 - 1
Sources/GRPC/ClientCalls/ServerStreamingCall.swift

@@ -55,7 +55,7 @@ public final class ServerStreamingCall<RequestMessage: Message, ResponseMessage:
       options: callOptions
     )
 
-    let requestHandler = UnaryRequestChannelHandler<RequestMessage>(
+    let requestHandler = _UnaryRequestChannelHandler<RequestMessage>(
       requestHead: requestHead,
       request: .init(request)
     )

+ 1 - 1
Sources/GRPC/ClientCalls/UnaryCall.swift

@@ -64,7 +64,7 @@ public final class UnaryCall<RequestMessage: Message, ResponseMessage: Message>
       options: callOptions
     )
 
-    let requestHandler = UnaryRequestChannelHandler<RequestMessage>(
+    let requestHandler = _UnaryRequestChannelHandler<RequestMessage>(
       requestHead: requestHead,
       request: .init(request)
     )

+ 12 - 6
Sources/GRPC/ClientRequestChannelHandler.swift → Sources/GRPC/_ClientRequestChannelHandler.swift

@@ -19,9 +19,11 @@ import NIO
 import NIOHTTP1
 
 /// A base channel handler for client requests.
-internal class ClientRequestChannelHandler<RequestMessage: Message>: ChannelInboundHandler {
-  typealias InboundIn = Never
-  typealias OutboundOut = GRPCClientRequestPart<RequestMessage>
+///
+/// - Important: This is **NOT** part of the public API.
+public class _ClientRequestChannelHandler<RequestMessage: Message>: ChannelInboundHandler {
+  public typealias InboundIn = Never
+  public typealias OutboundOut = GRPCClientRequestPart<RequestMessage>
 
   /// The request head to send.
   internal let requestHead: GRPCRequestHead
@@ -39,11 +41,13 @@ internal class ClientRequestChannelHandler<RequestMessage: Message>: ChannelInbo
 /// A channel handler for unary client requests.
 ///
 /// Sends the request head, message and end on `channelActive(context:)`.
-internal final class UnaryRequestChannelHandler<RequestMessage: Message>: ClientRequestChannelHandler<RequestMessage> {
+///
+/// - Important: This is **NOT** part of the public API.
+public final class _UnaryRequestChannelHandler<RequestMessage: Message>: _ClientRequestChannelHandler<RequestMessage> {
   /// The request to send.
   internal let request: _Box<RequestMessage>
 
-  init(requestHead: GRPCRequestHead, request: _Box<RequestMessage>) {
+  public init(requestHead: GRPCRequestHead, request: _Box<RequestMessage>) {
     self.request = request
     super.init(requestHead: requestHead)
   }
@@ -59,7 +63,9 @@ internal final class UnaryRequestChannelHandler<RequestMessage: Message>: Client
 /// A channel handler for client calls which stream requests.
 ///
 /// Sends the request head on `channelActive(context:)`.
-internal final class StreamingRequestChannelHandler<RequestMessage: Message>: ClientRequestChannelHandler<RequestMessage> {
+///
+/// - Important: This is **NOT** part of the public API.
+public final class _StreamingRequestChannelHandler<RequestMessage: Message>: _ClientRequestChannelHandler<RequestMessage> {
   override public func channelActive(context: ChannelHandlerContext) {
     context.writeAndFlush(self.wrapOutboundOut(.head(self.requestHead)), promise: nil)
     context.fireChannelActive()

+ 88 - 0
Sources/GRPCPerformanceTests/Benchmark.swift

@@ -0,0 +1,88 @@
+/*
+ * Copyright 2019, gRPC Authors All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import Dispatch
+
+protocol Benchmark: class {
+  func setUp() throws
+  func tearDown() throws
+  func run() throws
+}
+
+/// The results of a benchmark.
+struct BenchmarkResults {
+  /// The description of the benchmark.
+  var desc: String
+
+  /// The duration of each run of the benchmark in milliseconds.
+  var milliseconds: [UInt64]
+}
+
+extension BenchmarkResults: CustomStringConvertible {
+  var description: String {
+    return "\(self.desc): \(self.milliseconds.map(String.init).joined(separator: ","))"
+  }
+}
+
+/// Runs the benchmark and prints the duration in milliseconds for each run.
+///
+/// - Parameters:
+///   - description: A description of the benchmark.
+///   - benchmark: The benchmark which should be run.
+///   - spec: The specification for the test run.
+func measureAndPrint(description: String, benchmark: Benchmark, spec: TestSpec) {
+  switch spec.action {
+  case .list:
+    print(description)
+  case .run(let filter):
+    guard filter.shouldRun(description) else {
+      return
+    }
+    print(measure(description, benchmark: benchmark, repeats: spec.repeats))
+  }
+}
+
+/// Runs the given benchmark multiple times, recording the wall time for each iteration.
+///
+/// - Parameters:
+///   - description: A description of the benchmark.
+///   - benchmark: The benchmark to run.
+///   - repeats: the number of times to run the benchmark.
+func measure(_ description: String, benchmark: Benchmark, repeats: Int) -> BenchmarkResults {
+  var milliseconds: [UInt64] = []
+  for _ in 0..<repeats {
+    do {
+      try benchmark.setUp()
+
+      let start = DispatchTime.now().uptimeNanoseconds
+      try benchmark.run()
+      let end = DispatchTime.now().uptimeNanoseconds
+
+      milliseconds.append((end - start) / 1_000_000)
+    } catch {
+      // If tearDown fails now then there's not a lot we can do!
+      try? benchmark.tearDown()
+      return BenchmarkResults(desc: description, milliseconds: [])
+    }
+
+    do {
+      try benchmark.tearDown()
+    } catch {
+      return BenchmarkResults(desc: description, milliseconds: [])
+    }
+  }
+
+  return BenchmarkResults(desc: description, milliseconds: milliseconds)
+}

+ 116 - 0
Sources/GRPCPerformanceTests/Benchmarks/EmbeddedClientThroughput.swift

@@ -0,0 +1,116 @@
+/*
+ * Copyright 2019, gRPC Authors All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import NIO
+import NIOHTTP2
+import NIOHPACK
+import GRPC
+import EchoModel
+import Logging
+
+/// Tests the throughput on the client side by firing a unary request through an embedded channel
+/// and writing back enough gRPC as HTTP/2 frames to get through the state machine.
+///
+/// This only measures the handlers in the child channel.
+class EmbeddedClientThroughput: Benchmark {
+  private let requestCount: Int
+  private let requestText: String
+
+  private var logger: Logger!
+  private var requestHead: GRPCRequestHead!
+  private var request: Echo_EchoRequest!
+
+  init(requests: Int, text: String) {
+    self.requestCount = requests
+    self.requestText = text
+  }
+
+  func setUp() throws {
+    self.logger = Logger(label: "io.grpc.testing")
+
+    self.requestHead = GRPCRequestHead(
+      method: "POST",
+      scheme: "http",
+      path: "/echo.Echo/Get",
+      host: "localhost",
+      timeout: .infinite,
+      customMetadata: [:]
+    )
+
+    self.request = .with {
+      $0.text = self.requestText
+    }
+  }
+
+  func tearDown() throws {
+  }
+
+  func run() throws {
+    for _ in 0..<self.requestCount {
+      let channel = EmbeddedChannel()
+      try channel.pipeline.addHandlers([
+        GRPCClientChannelHandler<Echo_EchoRequest, Echo_EchoResponse>(streamID: .init(1), callType: .unary, logger: self.logger),
+        _UnaryRequestChannelHandler(requestHead: self.requestHead, request: .init(self.request))
+      ]).wait()
+
+      // Trigger the request handler.
+      channel.pipeline.fireChannelActive()
+
+      // Read out the request frames.
+      var requestFrames = 0
+      while let _ = try channel.readOutbound(as: HTTP2Frame.self) {
+        requestFrames += 1
+      }
+      assert(requestFrames == 3)  // headers, data, empty data (end-stream)
+
+      // Okay, let's build a response.
+
+      // Required headers.
+      let responseHeaders: HPACKHeaders = [
+        ":status": "200",
+        "content-type": "application/grpc+proto"
+      ]
+      let headerFrame = HTTP2Frame(streamID: .init(1), payload: .headers(.init(headers: responseHeaders)))
+
+      // Some data.
+      let response = try Echo_EchoResponse.with { $0.text = self.requestText }.serializedData()
+      var buffer = channel.allocator.buffer(capacity: response.count + 5)
+      buffer.writeInteger(UInt8(0))  // compression byte
+      buffer.writeInteger(UInt32(response.count))
+      buffer.writeBytes(response)
+      let dataFrame = HTTP2Frame(streamID: .init(1), payload: .data(.init(data: .byteBuffer(buffer))))
+
+      // Required trailers.
+      let responseTrailers: HPACKHeaders = [
+        "grpc-status": "0",
+        "grpc-message": "ok"
+      ]
+      let trailersFrame = HTTP2Frame(streamID: .init(1), payload: .headers(.init(headers: responseTrailers)))
+
+      // Now write the response frames back into the channel.
+      try channel.writeInbound(headerFrame)
+      try channel.writeInbound(dataFrame)
+      try channel.writeInbound(trailersFrame)
+
+      // And read them back out.
+      var responseParts = 0
+      while let _ = try channel.readOutbound(as: GRPCClientResponsePart<Echo_EchoResponse>.self) {
+        responseParts += 1
+      }
+
+      assert(responseParts == 4, "received \(responseParts) response parts")
+    }
+  }
+}

+ 49 - 0
Sources/GRPCPerformanceTests/Benchmarks/ServerProvidingBenchmark.swift

@@ -0,0 +1,49 @@
+/*
+ * Copyright 2019, gRPC Authors All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import Foundation
+import GRPC
+import NIO
+
+class ServerProvidingBenchmark: Benchmark {
+  private let providers: [CallHandlerProvider]
+  private let threadCount: Int
+  private var group: EventLoopGroup!
+  private(set) var server: Server!
+
+  init(providers: [CallHandlerProvider], threadCount: Int = 1) {
+    self.providers = providers
+    self.threadCount = threadCount
+  }
+
+  func setUp() throws {
+    self.group = MultiThreadedEventLoopGroup(numberOfThreads: self.threadCount)
+    let configuration = Server.Configuration(
+      target: .hostAndPort("", 0),
+      eventLoopGroup: self.group,
+      serviceProviders: self.providers
+    )
+    self.server = try Server.start(configuration: configuration).wait()
+  }
+
+  func tearDown() throws {
+    try self.server.close().wait()
+    try self.group.syncShutdownGracefully()
+  }
+
+  func run() throws {
+    // no-op
+  }
+}

+ 96 - 0
Sources/GRPCPerformanceTests/Benchmarks/UnaryThroughput.swift

@@ -0,0 +1,96 @@
+/*
+ * Copyright 2019, gRPC Authors All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import Foundation
+import EchoModel
+import EchoImplementation
+import GRPC
+import NIO
+
+/// Tests unary throughput by sending requests on a single connection.
+///
+/// Requests are sent in batches of (up-to) 100 requests. This is due to
+/// https://github.com/apple/swift-nio-http2/issues/87#issuecomment-483542401.
+class Unary: ServerProvidingBenchmark {
+  private var group: EventLoopGroup!
+  private(set) var client: Echo_EchoServiceClient!
+
+  let requestCount: Int
+  let requestText: String
+
+  init(requests: Int, text: String) {
+    self.requestCount = requests
+    self.requestText = text
+    super.init(providers: [EchoProvider()])
+  }
+
+  override func setUp() throws {
+    try super.setUp()
+    self.group = MultiThreadedEventLoopGroup(numberOfThreads: 1)
+
+    let configuration = ClientConnection.Configuration(
+      target: .socketAddress(self.server.channel.localAddress!),
+      eventLoopGroup: self.group
+    )
+
+    let connection = ClientConnection(configuration: configuration)
+    self.client = .init(connection: connection)
+  }
+
+  override func run() throws {
+    let batchSize = 100
+
+    for lowerBound in stride(from: 0, to: self.requestCount, by: batchSize) {
+      let upperBound = min(lowerBound + batchSize, self.requestCount)
+
+      let requests = (lowerBound..<upperBound).map { _ in
+        client.get(Echo_EchoRequest.with { $0.text = self.requestText }).response
+      }
+
+      try EventLoopFuture.andAllSucceed(requests, on: self.client.connection.eventLoop).wait()
+    }
+  }
+
+  override func tearDown() throws {
+    try self.client.connection.close().wait()
+    try self.group.syncShutdownGracefully()
+    try super.tearDown()
+  }
+}
+
+
+/// Tests bidirectional throughput by sending requests over a single stream.
+class Bidi: Unary {
+  let batchSize: Int
+
+  init(requests: Int, text: String, batchSize: Int) {
+    self.batchSize = batchSize
+    super.init(requests: requests, text: text)
+  }
+
+  override func run() throws {
+    let update = self.client.update { _ in }
+
+    for _ in stride(from: 0, to: self.requestCount, by: self.batchSize) {
+      let batch = (0..<self.batchSize).map { _ in
+        Echo_EchoRequest.with { $0.text = self.requestText }
+      }
+      update.sendMessages(batch, promise: nil)
+    }
+    update.sendEnd(promise: nil)
+
+    _ = try update.status.wait()
+  }
+}

+ 90 - 445
Sources/GRPCPerformanceTests/main.swift

@@ -21,487 +21,132 @@ import EchoImplementation
 import EchoModel
 import Logging
 
-struct ConnectionFactory {
-  var configuration: ClientConnection.Configuration
-
-  func makeConnection() -> ClientConnection {
-    return ClientConnection(configuration: self.configuration)
-  }
-
-  func makeEchoClient() -> Echo_EchoServiceClient {
-    return Echo_EchoServiceClient(connection: self.makeConnection())
-  }
-}
-
-protocol Benchmark: class {
-  func setUp() throws
-  func tearDown() throws
-  func run() throws
-}
-
-/// Tests unary throughput by sending requests on a single connection.
-///
-/// Requests are sent in batches of (up-to) 100 requests. This is due to
-/// https://github.com/apple/swift-nio-http2/issues/87#issuecomment-483542401.
-class UnaryThroughput: Benchmark {
-  let factory: ConnectionFactory
-  let requests: Int
-  let requestLength: Int
-  var client: Echo_EchoServiceClient!
-  var request: String!
-
-  init(factory: ConnectionFactory, requests: Int, requestLength: Int) {
-    self.factory = factory
-    self.requests = requests
-    self.requestLength = requestLength
-  }
-
-  func setUp() throws {
-    self.client = self.factory.makeEchoClient()
-    self.request = String(repeating: "0", count: self.requestLength)
-  }
-
-  func run() throws {
-    let batchSize = 100
-
-    for lowerBound in stride(from: 0, to: self.requests, by: batchSize) {
-      let upperBound = min(lowerBound + batchSize, self.requests)
-
-      let requests = (lowerBound..<upperBound).map { _ in
-        client.get(Echo_EchoRequest.with { $0.text = self.request }).response
-      }
-
-      try EventLoopFuture.andAllSucceed(requests, on: self.client.connection.eventLoop).wait()
-    }
-  }
-
-  func tearDown() throws {
-    try self.client.connection.close().wait()
-  }
-}
-
-/// Tests bidirectional throughput by sending requests over a single stream.
-///
-/// Requests are sent in batches of (up-to) 100 requests. This is due to
-/// https://github.com/apple/swift-nio-http2/issues/87#issuecomment-483542401.
-class BidirectionalThroughput: UnaryThroughput {
-  override func run() throws {
-    let update = self.client.update { _ in }
-
-    for _ in 0..<self.requests {
-      update.sendMessage(Echo_EchoRequest.with { $0.text = self.request }, promise: nil)
-    }
-    update.sendEnd(promise: nil)
-
-    _ = try update.status.wait()
-  }
-}
-
-/// Tests the number of connections that can be created.
-final class ConnectionCreationThroughput: Benchmark {
-  let factory: ConnectionFactory
-  let connections: Int
-  var createdConnections: [ClientConnection] = []
-
-  class ConnectionReadinessDelegate: ConnectivityStateDelegate {
-    let promise: EventLoopPromise<Void>
-
-    var ready: EventLoopFuture<Void> {
-      return promise.futureResult
-    }
-
-    init(promise: EventLoopPromise<Void>) {
-      self.promise = promise
-    }
-
-    func connectivityStateDidChange(from oldState: ConnectivityState, to newState: ConnectivityState) {
-      switch newState {
-      case .ready:
-        promise.succeed(())
-
-      case .shutdown:
-        promise.fail(GRPCStatus(code: .unavailable, message: nil))
-
-      default:
-        break
-      }
-    }
-  }
-
-  init(factory: ConnectionFactory, connections: Int) {
-    self.factory = factory
-    self.connections = connections
-  }
-
-  func setUp() throws { }
-
-  func run() throws {
-    let connectionsAndDelegates: [(ClientConnection, ConnectionReadinessDelegate)] = (0..<connections).map { _ in
-      let promise = self.factory.configuration.eventLoopGroup.next().makePromise(of: Void.self)
-      var configuration = self.factory.configuration
-      let delegate = ConnectionReadinessDelegate(promise: promise)
-      configuration.connectivityStateDelegate = delegate
-      return (ClientConnection(configuration: configuration), delegate)
-    }
-
-    self.createdConnections = connectionsAndDelegates.map { connection, _ in connection }
-    let futures = connectionsAndDelegates.map { _, delegate in delegate.ready }
-    try EventLoopFuture.andAllSucceed(
-      futures,
-      on: self.factory.configuration.eventLoopGroup.next()
-    ).wait()
-  }
-
-  func tearDown() throws {
-    let connectionClosures = self.createdConnections.map {
-      $0.close()
-    }
-
-    try EventLoopFuture.andAllSucceed(
-      connectionClosures,
-      on: self.factory.configuration.eventLoopGroup.next()).wait()
-  }
-}
-
-/// The results of a benchmark.
-struct BenchmarkResults {
-  let benchmarkDescription: String
-  let durations: [TimeInterval]
-
-  /// Returns the results as a comma separated string.
-  ///
-  /// The format of the string is as such:
-  /// <name>, <number of results> [, <duration>]
-  var asCSV: String {
-    let items = [self.benchmarkDescription, String(self.durations.count)] + self.durations.map { String($0) }
-    return items.joined(separator: ", ")
-  }
-}
-
-/// Runs the given benchmark multiple times, recording the wall time for each iteration.
-///
-/// - Parameter description: A description of the benchmark.
-/// - Parameter benchmark: The benchmark to run.
-/// - Parameter repeats: The number of times to run the benchmark.
-func measure(description: String, benchmark: Benchmark, repeats: Int) -> BenchmarkResults {
-  var durations: [TimeInterval] = []
-  for _ in 0..<repeats {
-    do {
-      try benchmark.setUp()
-
-      let start = Date()
-      try benchmark.run()
-      let end = Date()
-
-      durations.append(end.timeIntervalSince(start))
-    } catch {
-      // If tearDown fails now then there's not a lot we can do!
-      try? benchmark.tearDown()
-      return BenchmarkResults(benchmarkDescription: description, durations: [])
-    }
-
-    do {
-      try benchmark.tearDown()
-    } catch {
-      return BenchmarkResults(benchmarkDescription: description, durations: [])
-    }
-  }
-
-  return BenchmarkResults(benchmarkDescription: description, durations: durations)
-}
-
-/// Makes an SSL context if one is required. Note that the CLI tool doesn't support optional values,
-/// so we use empty strings for the paths if we don't require SSL.
-///
-/// This function will terminate the program if it is not possible to create an SSL context.
-///
-/// - Parameter caCertificatePath: The path to the CA certificate PEM file.
-/// - Parameter certificatePath: The path to the certificate.
-/// - Parameter privateKeyPath: The path to the private key.
-/// - Parameter server: Whether this is for the server or not.
-private func makeServerTLSConfiguration(caCertificatePath: String, certificatePath: String, privateKeyPath: String) throws -> Server.Configuration.TLS? {
-  // Commander doesn't have Optional options; we use empty strings to indicate no value.
-  guard certificatePath.isEmpty == privateKeyPath.isEmpty &&
-    privateKeyPath.isEmpty == caCertificatePath.isEmpty else {
-      print("Paths for CA certificate, certificate and private key must be provided")
-      exit(1)
-  }
+// Add benchmarks here!
+func runBenchmarks(spec: TestSpec) {
+  let smallRequest = String(repeating: "x", count: 8)
+  let largeRequest = String(repeating: "x", count: 1 << 16)  // 65k
+
+  measureAndPrint(
+    description: "unary_10k_small_requests",
+    benchmark: Unary(requests: 10_000, text: smallRequest),
+    spec: spec
+  )
 
-  // No need to check them all because of the guard statement above.
-  if caCertificatePath.isEmpty {
-    return nil
-  }
+  measureAndPrint(
+    description: "unary_10k_long_requests",
+    benchmark: Unary(requests: 10_000, text: largeRequest),
+    spec: spec
+  )
 
-  return .init(
-    certificateChain: try NIOSSLCertificate.fromPEMFile(certificatePath).map { .certificate($0) },
-    privateKey: .file(privateKeyPath),
-    trustRoots: .file(caCertificatePath)
+  measureAndPrint(
+    description: "bidi_10k_small_requests_in_batches_of_1",
+    benchmark: Bidi(requests: 10_000, text: smallRequest, batchSize: 1),
+    spec: spec
   )
-}
 
-private func makeClientTLSConfiguration(
-  caCertificatePath: String,
-  certificatePath: String,
-  privateKeyPath: String
-) throws -> ClientConnection.Configuration.TLS? {
-  // Commander doesn't have Optional options; we use empty strings to indicate no value.
-  guard certificatePath.isEmpty == privateKeyPath.isEmpty &&
-    privateKeyPath.isEmpty == caCertificatePath.isEmpty else {
-      print("Paths for CA certificate, certificate and private key must be provided")
-      exit(1)
-  }
+  measureAndPrint(
+    description: "bidi_10k_small_requests_in_batches_of_5",
+    benchmark: Bidi(requests: 10_000, text: smallRequest, batchSize: 5),
+    spec: spec
+  )
 
-  // No need to check them all because of the guard statement above.
-  if caCertificatePath.isEmpty {
-    return nil
-  }
+  measureAndPrint(
+    description: "bidi_1k_large_requests_in_batches_of_5",
+    benchmark: Bidi(requests: 1_000, text: largeRequest, batchSize: 1),
+    spec: spec
+  )
 
-  return .init(
-    certificateChain: try NIOSSLCertificate.fromPEMFile(certificatePath).map { .certificate($0) },
-    privateKey: .file(privateKeyPath),
-    trustRoots: .file(caCertificatePath)
+  measureAndPrint(
+    description: "embedded_client_unary_10k_small_requests",
+    benchmark: EmbeddedClientThroughput(requests: 10_000, text: smallRequest),
+    spec: spec
   )
 }
 
-enum Benchmarks: String, CaseIterable {
-  case unaryThroughputSmallRequests = "unary_throughput_small"
-  case unaryThroughputLargeRequests = "unary_throughput_large"
-  case bidirectionalThroughputSmallRequests = "bidi_throughput_small"
-  case bidirectionalThroughputLargeRequests = "bidi_throughput_large"
-  case connectionThroughput = "connection_throughput"
+struct TestSpec {
+  var action: Action
+  var repeats: Int
 
-  static let smallRequest = 8
-  static let largeRequest = 1 << 16
-
-  var description: String {
-    switch self {
-    case .unaryThroughputSmallRequests:
-      return "10k unary requests of size \(Benchmarks.smallRequest)"
-
-    case .unaryThroughputLargeRequests:
-      return "10k unary requests of size \(Benchmarks.largeRequest)"
-
-    case .bidirectionalThroughputSmallRequests:
-      return "20k bidirectional messages of size \(Benchmarks.smallRequest)"
-
-    case .bidirectionalThroughputLargeRequests:
-      return "10k bidirectional messages of size \(Benchmarks.largeRequest)"
-
-    case .connectionThroughput:
-      return "100 connections created"
-    }
+  init(action: Action, repeats: Int = 10) {
+    self.action = action
+    self.repeats = repeats
   }
 
-  func makeBenchmark(factory: ConnectionFactory) -> Benchmark {
-    switch self {
-    case .unaryThroughputSmallRequests:
-      return UnaryThroughput(factory: factory, requests: 10_000, requestLength: Benchmarks.smallRequest)
-
-    case .unaryThroughputLargeRequests:
-      return UnaryThroughput(factory: factory, requests: 10_000, requestLength: Benchmarks.largeRequest)
-
-    case .bidirectionalThroughputSmallRequests:
-      return BidirectionalThroughput(factory: factory, requests: 20_000, requestLength: Benchmarks.smallRequest)
-
-    case .bidirectionalThroughputLargeRequests:
-      return BidirectionalThroughput(factory: factory, requests: 10_000, requestLength: Benchmarks.largeRequest)
-
-    case .connectionThroughput:
-      return ConnectionCreationThroughput(factory: factory, connections: 100)
-    }
+  enum Action {
+    /// Run the benchmark with the given filter.
+    case run(Filter)
+    /// List all benchmarks.
+    case list
   }
 
-  func run(using factory: ConnectionFactory, repeats: Int = 10) -> BenchmarkResults {
-    let benchmark = self.makeBenchmark(factory: factory)
-    return measure(description: self.description, benchmark: benchmark, repeats: repeats)
-  }
-}
-
-enum Command {
-  case listBenchmarks
-  case benchmark(name: String, host: String, port: Int, tls: (ca: String, cert: String)?)
-  case server(port: Int, tls: (ca: String, cert: String, key: String)?)
-
-  init?(from args: [String]) {
-    guard !args.isEmpty else {
-      return nil
-    }
-
-    var args = args
-    let command = args.removeFirst()
-    switch command {
-    case "server":
-      guard let port = args.popLast().flatMap(Int.init) else {
-        return nil
-      }
-
-      let caPath = args.suffixOfFirst(prefixedWith: "--caPath=")
-      let certPath = args.suffixOfFirst(prefixedWith: "--certPath=")
-      let keyPath = args.suffixOfFirst(prefixedWith: "--keyPath=")
-
-      // We need all or nothing here:
-      switch (caPath, certPath, keyPath) {
-      case let (.some(ca), .some(cert), .some(key)):
-        self = .server(port: port, tls: (ca: ca, cert: cert, key: key))
-      case (.none, .none, .none):
-        self = .server(port: port, tls: nil)
-      default:
-        return nil
-      }
-
-    case "benchmark":
-      guard let name = args.popLast(),
-        let port = args.popLast().flatMap(Int.init),
-        let host = args.popLast()
-        else {
-          return nil
-      }
+  enum Filter {
+    /// Run all tests.
+    case all
+    /// Run the tests which match the given descriptions.
+    case some([String])
 
-      let caPath = args.suffixOfFirst(prefixedWith: "--caPath=")
-      let certPath = args.suffixOfFirst(prefixedWith: "--certPath=")
-      // We need all or nothing here:
-      switch (caPath, certPath) {
-      case let (.some(ca), .some(cert)):
-        self = .benchmark(name: name, host: host, port: port, tls: (ca: ca, cert: cert))
-      case (.none, .none):
-        self = .benchmark(name: name, host: host, port: port, tls: nil)
-      default:
-        return nil
+    func shouldRun(_ description: String) -> Bool {
+      switch self {
+      case .all:
+        return true
+      case .some(let whitelist):
+        return whitelist.contains(description)
       }
-
-    case "list_benchmarks":
-      self = .listBenchmarks
-
-    default:
-      return nil
     }
   }
 }
 
-func printUsageAndExit(program: String) -> Never {
-  print("""
-  Usage: \(program) COMMAND [OPTIONS...]
-
-  benchmark:
-    Run the given benchmark (see 'list_benchmarks' for possible options) against a server on the
-    specified host and port. TLS may be used by spefifying the path to the PEM formatted
-    certificate and CA certificate.
-
-      benchmark [--ca=CA --cert=CERT] HOST PORT BENCHMARK_NAME
+func usage(program: String) -> String {
+  return """
+  USAGE: \(program) [-alh] [BENCHMARK ...]
 
-    Note: eiether all or none of CA and CERT must be provided.
+  OPTIONS:
 
-  list_benchmarks:
-    List the available benchmarks to run.
+    The following options are available:
 
-  server:
-    Start the server on the given PORT. TLS may be used by specifying the paths to the PEM formatted
-    certificate, private key and CA certificate.
+    -a  Run all benchmarks. (Also: '--all')
 
-      server [--ca=CA --cert=CERT --key=KEY] PORT
+    -l  List all benchmarks. (Also: '--list')
 
-    Note: eiether all or none of CA, CERT and KEY must be provided.
-  """)
-  exit(1)
-}
-
-fileprivate extension Array where Element == String {
-  func suffixOfFirst(prefixedWith prefix: String) -> String? {
-    return self.first {
-      $0.hasPrefix(prefix)
-    }.map {
-      String($0.dropFirst(prefix.count))
-    }
-  }
+    -h  Prints this message. (Also: '--help')
+  """
 }
 
 func main(args: [String]) {
-  var args = args
-  let program = args.removeFirst()
-  guard let command = Command(from: args) else {
-    printUsageAndExit(program: program)
+  // Quieten the logs.
+  LoggingSystem.bootstrap {
+    var handler = StreamLogHandler.standardOutput(label: $0)
+    handler.logLevel = .critical
+    return handler
   }
 
-  switch command {
-  case let .server(port: port, tls: tls):
-    let group = MultiThreadedEventLoopGroup(numberOfThreads: System.coreCount)
-    defer {
-      try! group.syncShutdownGracefully()
-    }
-
-    // Quieten the logs.
-    LoggingSystem.bootstrap {
-      var handler = StreamLogHandler.standardOutput(label: $0)
-      handler.logLevel = .warning
-      return handler
-    }
-
-    do {
-      let configuration = try Server.Configuration(
-        target: .hostAndPort("localhost", port),
-        eventLoopGroup: group,
-        serviceProviders: [EchoProvider()],
-        tls: tls.map { tlsArgs in
-          return .init(
-            certificateChain: try NIOSSLCertificate.fromPEMFile(tlsArgs.cert).map { .certificate($0) },
-            privateKey: .file(tlsArgs.key),
-            trustRoots: .file(tlsArgs.ca)
-          )
-        }
-      )
+  let program = args.first!
+  let arg0 = args.dropFirst().first
 
-      let server = try Server.start(configuration: configuration).wait()
-      print("server started on port: \(server.channel.localAddress?.port ?? port)")
+  switch arg0 {
+  case "-h", "--help":
+    print(usage(program: program))
 
-      // Stop the program from exiting.
-      try? server.onClose.wait()
-    } catch {
-      print("unable to start server: \(error)")
-      exit(1)
-    }
+  case "-l", "--list":
+    runBenchmarks(spec: TestSpec(action: .list))
 
-  case let .benchmark(name: name, host: host, port: port, tls: tls):
-    guard let benchmark = Benchmarks(rawValue: name) else {
-      printUsageAndExit(program: program)
-    }
+  case "-a", "-all":
+    runBenchmarks(spec: TestSpec(action: .run(.all)))
 
-    // Quieten the logs.
-    LoggingSystem.bootstrap {
-      var handler = StreamLogHandler.standardOutput(label: $0)
-      handler.logLevel = .critical
-      return handler
-    }
-
-    let group = MultiThreadedEventLoopGroup(numberOfThreads: 1)
-    defer {
-      try! group.syncShutdownGracefully()
-    }
-
-    do {
-      let configuration = try ClientConnection.Configuration(
-        target: .hostAndPort(host, port),
-        eventLoopGroup: group,
-        tls: tls.map { tlsArgs in
-          return .init(
-            certificateChain: try NIOSSLCertificate.fromPEMFile(tlsArgs.cert).map { .certificate($0) },
-            trustRoots: .file(tlsArgs.ca)
-          )
-        }
-      )
-
-      let factory = ConnectionFactory(configuration: configuration)
-      let results = benchmark.run(using: factory)
-      print(results.asCSV)
-    } catch {
-      print("unable to run benchmark: \(error)")
-      exit(1)
-    }
-
-  case .listBenchmarks:
-    Benchmarks.allCases.forEach {
-      print($0.rawValue)
+  default:
+    // This must be a list of benchmarks to run.
+    let tests = Array(args.dropFirst())
+    if tests.isEmpty {
+      print(usage(program: program))
+    } else {
+      runBenchmarks(spec: TestSpec(action: .run(.some(tests))))
     }
   }
 }
 
+assert({
+  print("⚠️ WARNING: YOU ARE RUNNING IN DEBUG MODE ⚠️")
+  return true
+}())
+
 main(args: CommandLine.arguments)