Browse Source

Log some HTTP/2 information (#938)

Motivation:

Sometimes it's useful to know what's going on at the HTTP/2 layer, none
of our logs currently surface this information.

Modifications:

- Add logs for stream created/closed events (includes open stream count)
- Add logs for initial settings
- Add more logging for connection lifecycle
- Add trace logs to the stream for read/written frames
- Prefix request and connection ID metadata keys with "grpc_"
- Remove duplicated log messages

Result:

- Better insight for HTTP/2
George Barnett 5 years ago
parent
commit
07432c4266

+ 2 - 3
Sources/GRPC/ClientCalls/ClientCallTransport.swift

@@ -169,9 +169,7 @@ internal class ChannelTransport<Request, Response> {
         switch result {
         case let .success(mux):
           mux.createStreamChannel(promise: streamPromise) { stream in
-            logger.trace("created http/2 stream", source: "GRPC")
-
-            return stream.pipeline.addHandlers([
+            stream.pipeline.addHandlers([
               _GRPCClientChannelHandler(callType: callType, logger: logger),
               GRPCClientCodecHandler(serializer: serializer, deserializer: deserializer),
               GRPCClientCallHandler(call: call),
@@ -574,6 +572,7 @@ extension ChannelTransport: ClientCallInbound {
   /// Must be called on the event loop.
   internal func activate(stream: Channel) {
     self.eventLoop.preconditionInEventLoop()
+    self.logger.debug("activated stream channel", source: "GRPC")
 
     // The channel has become active: what now?
     switch self.state {

+ 5 - 5
Sources/GRPC/ClientConnection.swift

@@ -166,7 +166,6 @@ extension ClientConnection: GRPCChannel {
     callOptions: CallOptions
   ) -> UnaryCall<Serializer.Input, Deserializer.Output> {
     let (logger, requestID) = self.populatedLoggerAndRequestID(from: callOptions)
-    logger.debug("starting rpc", metadata: ["path": "\(path)"])
 
     let call = UnaryCall<Serializer.Input, Deserializer.Output>.makeOnHTTP2Stream(
       multiplexer: self.multiplexer,
@@ -229,7 +228,6 @@ extension ClientConnection {
     callOptions: CallOptions
   ) -> ClientStreamingCall<Serializer.Input, Deserializer.Output> {
     let (logger, requestID) = self.populatedLoggerAndRequestID(from: callOptions)
-    logger.debug("starting rpc", metadata: ["path": "\(path)"])
 
     let call = ClientStreamingCall<Serializer.Input, Deserializer.Output>.makeOnHTTP2Stream(
       multiplexer: self.multiplexer,
@@ -290,7 +288,6 @@ extension ClientConnection {
     handler: @escaping (Deserializer.Output) -> Void
   ) -> ServerStreamingCall<Serializer.Input, Deserializer.Output> {
     let (logger, requestID) = self.populatedLoggerAndRequestID(from: callOptions)
-    logger.debug("starting rpc", metadata: ["path": "\(path)"])
 
     let call = ServerStreamingCall<Serializer.Input, Deserializer.Output>.makeOnHTTP2Stream(
       multiplexer: self.multiplexer,
@@ -362,7 +359,6 @@ extension ClientConnection {
     handler: @escaping (Deserializer.Output) -> Void
   ) -> BidirectionalStreamingCall<Serializer.Input, Deserializer.Output> {
     let (logger, requestID) = self.populatedLoggerAndRequestID(from: callOptions)
-    logger.debug("starting rpc", metadata: ["path": "\(path)"])
 
     let call = BidirectionalStreamingCall<Serializer.Input, Deserializer.Output>.makeOnHTTP2Stream(
       multiplexer: self.multiplexer,
@@ -688,7 +684,11 @@ extension Channel {
         self.pipeline.addHandlers(
           [
             GRPCClientKeepaliveHandler(configuration: connectionKeepalive),
-            GRPCIdleHandler(mode: .client(connectionManager), idleTimeout: connectionIdleTimeout),
+            GRPCIdleHandler(
+              mode: .client(connectionManager),
+              logger: logger,
+              idleTimeout: connectionIdleTimeout
+            ),
           ],
           position: .after(http2Handler)
         )

+ 66 - 12
Sources/GRPC/ConnectionManager.swift

@@ -140,6 +140,23 @@ internal class ConnectionManager {
 
     /// We never want another `Channel`: this state is terminal.
     case shutdown(ShutdownState)
+
+    fileprivate var label: String {
+      switch self {
+      case .idle:
+        return "idle"
+      case .connecting:
+        return "connecting"
+      case .active:
+        return "active"
+      case .ready:
+        return "ready"
+      case .transientFailure:
+        return "transientFailure"
+      case .shutdown:
+        return "shutdown"
+      }
+    }
   }
 
   private var state: State {
@@ -240,6 +257,8 @@ internal class ConnectionManager {
   /// Returns a future for a connected channel.
   internal func getChannel() -> EventLoopFuture<Channel> {
     return self.eventLoop.flatSubmit {
+      let channel: EventLoopFuture<Channel>
+
       switch self.state {
       case .idle:
         self.startConnecting()
@@ -247,23 +266,29 @@ internal class ConnectionManager {
         guard case let .connecting(connecting) = self.state else {
           self.invalidState()
         }
-        return connecting.readyChannelPromise.futureResult
+        channel = connecting.readyChannelPromise.futureResult
 
       case let .connecting(state):
-        return state.readyChannelPromise.futureResult
+        channel = state.readyChannelPromise.futureResult
 
       case let .active(state):
-        return state.readyChannelPromise.futureResult
+        channel = state.readyChannelPromise.futureResult
 
       case let .ready(state):
-        return state.channel.eventLoop.makeSucceededFuture(state.channel)
+        channel = state.channel.eventLoop.makeSucceededFuture(state.channel)
 
       case let .transientFailure(state):
-        return state.readyChannelPromise.futureResult
+        channel = state.readyChannelPromise.futureResult
 
       case .shutdown:
-        return self.eventLoop.makeFailedFuture(GRPCStatus(code: .unavailable, message: nil))
+        channel = self.eventLoop.makeFailedFuture(GRPCStatus(code: .unavailable, message: nil))
       }
+
+      self.logger.debug("vending channel future", metadata: [
+        "connectivity_state": "\(self.state.label)",
+      ])
+
+      return channel
     }
   }
 
@@ -273,6 +298,8 @@ internal class ConnectionManager {
   /// Note: if the state is 'transientFailure' or 'shutdown' then a failed future will be returned.
   internal func getOptimisticChannel() -> EventLoopFuture<Channel> {
     return self.eventLoop.flatSubmit {
+      let channel: EventLoopFuture<Channel>
+
       switch self.state {
       case .idle:
         self.startConnecting()
@@ -280,29 +307,38 @@ internal class ConnectionManager {
         guard case let .connecting(connecting) = self.state else {
           self.invalidState()
         }
-        return connecting.candidate
+        channel = connecting.candidate
 
       case let .connecting(state):
-        return state.candidate
+        channel = state.candidate
 
       case let .active(state):
-        return state.candidate.eventLoop.makeSucceededFuture(state.candidate)
+        channel = state.candidate.eventLoop.makeSucceededFuture(state.candidate)
 
       case let .ready(state):
-        return state.channel.eventLoop.makeSucceededFuture(state.channel)
+        channel = state.channel.eventLoop.makeSucceededFuture(state.channel)
 
       case .transientFailure:
-        return self.eventLoop.makeFailedFuture(ChannelError.ioOnClosedChannel)
+        channel = self.eventLoop.makeFailedFuture(ChannelError.ioOnClosedChannel)
 
       case .shutdown:
-        return self.eventLoop.makeFailedFuture(GRPCStatus(code: .unavailable, message: nil))
+        channel = self.eventLoop.makeFailedFuture(GRPCStatus(code: .unavailable, message: nil))
       }
+
+      self.logger.debug("vending fast-failing channel future", metadata: [
+        "connectivity_state": "\(self.state.label)",
+      ])
+
+      return channel
     }
   }
 
   /// Shutdown any connection which exists. This is a request from the application.
   internal func shutdown() -> EventLoopFuture<Void> {
     return self.eventLoop.flatSubmit {
+      self.logger.debug("shutting down connection", metadata: [
+        "connectivity_state": "\(self.state.label)",
+      ])
       let shutdown: ShutdownState
 
       switch self.state {
@@ -375,6 +411,9 @@ internal class ConnectionManager {
   /// The connecting channel became `active`. Must be called on the `EventLoop`.
   internal func channelActive(channel: Channel) {
     self.eventLoop.preconditionInEventLoop()
+    self.logger.debug("activating connection", metadata: [
+      "connectivity_state": "\(self.state.label)",
+    ])
 
     switch self.state {
     case let .connecting(connecting):
@@ -393,6 +432,9 @@ internal class ConnectionManager {
   /// Must be called on the `EventLoop`.
   internal func channelInactive() {
     self.eventLoop.preconditionInEventLoop()
+    self.logger.debug("deactivating connection", metadata: [
+      "connectivity_state": "\(self.state.label)",
+    ])
 
     switch self.state {
     // The channel is `active` but not `ready`. Should we try again?
@@ -400,6 +442,7 @@ internal class ConnectionManager {
       switch active.reconnect {
       // No, shutdown instead.
       case .none:
+        self.logger.debug("shutting down connection")
         self.state = .shutdown(ShutdownState(closeFuture: self.eventLoop.makeSucceededFuture(())))
         active.readyChannelPromise.fail(GRPCStatus(code: .unavailable, message: nil))
 
@@ -408,6 +451,7 @@ internal class ConnectionManager {
         let scheduled = self.eventLoop.scheduleTask(in: .seconds(timeInterval: delay)) {
           self.startConnecting()
         }
+        self.logger.debug("scheduling connection attempt", metadata: ["delay_secs": "\(delay)"])
         self.state = .transientFailure(TransientFailureState(from: active, scheduled: scheduled))
       }
 
@@ -416,12 +460,14 @@ internal class ConnectionManager {
     case let .ready(ready):
       // No, no backoff is configured.
       if ready.configuration.connectionBackoff == nil {
+        self.logger.debug("shutting down connection, no reconnect configured/remaining")
         self.state = .shutdown(ShutdownState(closeFuture: ready.channel.closeFuture))
       } else {
         // Yes, start connecting now. We should go via `transientFailure`, however.
         let scheduled = self.eventLoop.scheduleTask(in: .nanoseconds(0)) {
           self.startConnecting()
         }
+        self.logger.debug("scheduling connection attempt", metadata: ["delay": "0"])
         self.state = .transientFailure(TransientFailureState(from: ready, scheduled: scheduled))
       }
 
@@ -442,6 +488,9 @@ internal class ConnectionManager {
   /// called on the `EventLoop`.
   internal func ready() {
     self.eventLoop.preconditionInEventLoop()
+    self.logger.debug("connection ready", metadata: [
+      "connectivity_state": "\(self.state.label)",
+    ])
 
     switch self.state {
     case let .active(connected):
@@ -460,6 +509,9 @@ internal class ConnectionManager {
   /// the `EventLoop`.
   internal func idle() {
     self.eventLoop.preconditionInEventLoop()
+    self.logger.debug("idling connection", metadata: [
+      "connectivity_state": "\(self.state.label)",
+    ])
 
     switch self.state {
     case let .ready(state):
@@ -482,11 +534,13 @@ extension ConnectionManager {
       switch connecting.reconnect {
       // No, shutdown.
       case .none:
+        self.logger.debug("shutting down connection, no reconnect configured/remaining")
         connecting.readyChannelPromise.fail(error)
         self.state = .shutdown(ShutdownState(closeFuture: self.eventLoop.makeSucceededFuture(())))
 
       // Yes, after a delay.
       case let .after(delay):
+        self.logger.debug("scheduling connection attempt", metadata: ["delay": "\(delay)"])
         let scheduled = self.eventLoop.scheduleTask(in: .seconds(timeInterval: delay)) {
           self.startConnecting()
         }

+ 56 - 7
Sources/GRPC/GRPCIdleHandler.swift

@@ -13,6 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+import Logging
 import NIO
 import NIOHTTP2
 
@@ -31,6 +32,9 @@ internal class GRPCIdleHandler: ChannelInboundHandler {
   /// Client and server have slightly different behaviours; track which we are following.
   private var mode: Mode
 
+  /// A logger.
+  private let logger: Logger
+
   /// The mode of operation: the client tracks additional connection state in the connection
   /// manager.
   internal enum Mode {
@@ -52,22 +56,33 @@ internal class GRPCIdleHandler: ChannelInboundHandler {
     case closed
   }
 
-  init(mode: Mode, idleTimeout: TimeAmount = .minutes(5)) {
+  init(mode: Mode, logger: Logger, idleTimeout: TimeAmount = .minutes(5)) {
     self.mode = mode
     self.idleTimeout = idleTimeout
+    self.logger = logger
   }
 
   func userInboundEventTriggered(context: ChannelHandlerContext, event: Any) {
     switch self.state {
     case .notReady, .ready:
-      if event is NIOHTTP2StreamCreatedEvent {
+      if let created = event as? NIOHTTP2StreamCreatedEvent {
         // We have a stream: don't go idle
         self.scheduledIdle?.cancel()
         self.scheduledIdle = nil
-
         self.activeStreams += 1
-      } else if event is StreamClosedEvent {
+
+        self.logger.debug("HTTP2 stream created", metadata: [
+          MetadataKey.h2StreamID: "\(created.streamID)",
+          MetadataKey.h2ActiveStreams: "\(self.activeStreams)",
+        ])
+      } else if let closed = event as? StreamClosedEvent {
         self.activeStreams -= 1
+
+        self.logger.debug("HTTP2 stream closed", metadata: [
+          MetadataKey.h2StreamID: "\(closed.streamID)",
+          MetadataKey.h2ActiveStreams: "\(self.activeStreams)",
+        ])
+
         // No active streams: go idle soon.
         if self.activeStreams == 0 {
           self.scheduleIdleTimeout(context: context)
@@ -128,15 +143,15 @@ internal class GRPCIdleHandler: ChannelInboundHandler {
     if frame.streamID == .rootStream {
       switch (self.state, frame.payload) {
       // We only care about SETTINGS as long as we are in state `.notReady`.
-      case (.notReady, .settings):
+      case let (.notReady, .settings(content)):
         self.state = .ready
 
         switch self.mode {
         case let .client(manager):
           let remoteAddressDescription = context.channel.remoteAddress.map { "\($0)" } ?? "n/a"
           manager.logger.info("gRPC connection ready", metadata: [
-            "remote_address": "\(remoteAddressDescription)",
-            "event_loop": "\(context.eventLoop)",
+            MetadataKey.remoteAddress: "\(remoteAddressDescription)",
+            MetadataKey.eventLoop: "\(context.eventLoop)",
           ])
 
           // Let the manager know we're ready.
@@ -146,6 +161,15 @@ internal class GRPCIdleHandler: ChannelInboundHandler {
           ()
         }
 
+        if case let .settings(settings) = content {
+          self.logger.debug(
+            "received initial HTTP2 settings",
+            metadata: Dictionary(settings.map {
+              ("\($0.parameter.loggingMetadataKey)", "\($0.value)")
+            }, uniquingKeysWith: { a, _ in a })
+          )
+        }
+
         // Start the idle timeout.
         self.scheduleIdleTimeout(context: context)
 
@@ -187,6 +211,8 @@ internal class GRPCIdleHandler: ChannelInboundHandler {
       case .server:
         ()
       }
+
+      self.logger.debug("Closing idle channel")
       context.close(mode: .all, promise: nil)
 
     // We need to guard against double closure here. We may go idle as a result of receiving a
@@ -196,3 +222,26 @@ internal class GRPCIdleHandler: ChannelInboundHandler {
     }
   }
 }
+
+extension HTTP2SettingsParameter {
+  fileprivate var loggingMetadataKey: String {
+    switch self {
+    case .headerTableSize:
+      return "h2_settings_header_table_size"
+    case .enablePush:
+      return "h2_settings_enable_push"
+    case .maxConcurrentStreams:
+      return "h2_settings_max_concurrent_streams"
+    case .initialWindowSize:
+      return "h2_settings_initial_window_size"
+    case .maxFrameSize:
+      return "h2_settings_max_frame_size"
+    case .maxHeaderListSize:
+      return "h2_settings_max_header_list_size"
+    case .enableConnectProtocol:
+      return "h2_settings_enable_connect_protocol"
+    default:
+      return String(describing: self)
+    }
+  }
+}

+ 7 - 2
Sources/GRPC/GRPCTimeout.swift

@@ -42,8 +42,13 @@ public struct GRPCTimeout: CustomStringConvertible, Equatable {
   ///
   /// - Parameter deadline: The deadline to create a timeout from.
   internal init(deadline: NIODeadline, testingOnlyNow: NIODeadline? = nil) {
-    let timeAmountUntilDeadline = deadline - (testingOnlyNow ?? .now())
-    self.init(rounding: timeAmountUntilDeadline.nanoseconds, unit: .nanoseconds)
+    switch deadline {
+    case .distantFuture:
+      self = .infinite
+    default:
+      let timeAmountUntilDeadline = deadline - (testingOnlyNow ?? .now())
+      self.init(rounding: timeAmountUntilDeadline.nanoseconds, unit: .nanoseconds)
+    }
   }
 
   private init(nanoseconds: Int64, wireEncoding: String) {

+ 7 - 3
Sources/GRPC/HTTPProtocolSwitcher.swift

@@ -150,10 +150,10 @@ extension HTTPProtocolSwitcher: ChannelInboundHandler, RemovableChannelHandler {
 
           // Grab the streamID from the channel.
           return streamChannel.getOption(HTTP2StreamChannelOptions.streamID).map { streamID in
-            logger[metadataKey: MetadataKey.streamID] = "\(streamID)"
+            logger[metadataKey: MetadataKey.h2StreamID] = "\(streamID)"
             return logger
           }.recover { _ in
-            logger[metadataKey: MetadataKey.streamID] = "<unknown>"
+            logger[metadataKey: MetadataKey.h2StreamID] = "<unknown>"
             return logger
           }.flatMap { logger in
             streamChannel.pipeline.addHandler(HTTP2FramePayloadToHTTP1ServerCodec()).flatMap {
@@ -163,7 +163,11 @@ extension HTTPProtocolSwitcher: ChannelInboundHandler, RemovableChannelHandler {
         }.flatMap { multiplexer -> EventLoopFuture<Void> in
           // Add a keepalive and idle handlers between the two HTTP2 handlers.
           let keepaliveHandler = GRPCServerKeepaliveHandler(configuration: self.keepAlive)
-          let idleHandler = GRPCIdleHandler(mode: .server, idleTimeout: self.idleTimeout)
+          let idleHandler = GRPCIdleHandler(
+            mode: .server,
+            logger: self.logger,
+            idleTimeout: self.idleTimeout
+          )
           return context.channel.pipeline.addHandlers(
             [keepaliveHandler, idleHandler],
             position: .before(multiplexer)

+ 13 - 3
Sources/GRPC/Logger.swift

@@ -17,8 +17,18 @@ import Logging
 
 /// Keys for `Logger` metadata.
 enum MetadataKey {
-  static let streamID = "http2_stream_id"
-  static let requestID = "request_id"
-  static let connectionID = "connection_id"
+  static let requestID = "grpc_request_id"
+  static let connectionID = "grpc_connection_id"
+
+  static let eventLoop = "event_loop"
+  static let remoteAddress = "remote_address"
+
+  static let h2StreamID = "h2_stream_id"
+  static let h2ActiveStreams = "h2_active_streams"
+  static let h2EndStream = "h2_end_stream"
+  static let h2Payload = "h2_payload"
+  static let h2Headers = "h2_headers"
+  static let h2DataBytes = "h2_data_bytes"
+
   static let error = "error"
 }

+ 6 - 1
Sources/GRPC/Server.swift

@@ -103,12 +103,17 @@ public final class Server {
       )
       // Set the handlers that are applied to the accepted Channels
       .childChannelInitializer { channel in
+        var logger = configuration.logger
+        logger[metadataKey: MetadataKey.connectionID] = "\(UUID().uuidString)"
+        logger[metadataKey: MetadataKey.remoteAddress] = channel.remoteAddress
+          .map { "\($0)" } ?? "n/a"
+
         let protocolSwitcher = HTTPProtocolSwitcher(
           errorDelegate: configuration.errorDelegate,
           httpTargetWindowSize: configuration.httpTargetWindowSize,
           keepAlive: configuration.connectionKeepalive,
           idleTimeout: configuration.connectionIdleTimeout,
-          logger: configuration.logger
+          logger: logger
         ) { (channel, logger) -> EventLoopFuture<Void> in
           let handler = GRPCServerRequestRoutingHandler(
             servicesByName: configuration.serviceProvidersByName,

+ 27 - 0
Sources/GRPC/_GRPCClientChannelHandler.swift

@@ -328,6 +328,12 @@ extension _GRPCClientChannelHandler: ChannelInboundHandler {
     content: HTTP2Frame.FramePayload.Headers,
     context: ChannelHandlerContext
   ) {
+    self.logger.trace("received HTTP2 frame", metadata: [
+      MetadataKey.h2Payload: "HEADERS",
+      MetadataKey.h2Headers: "\(content.headers)",
+      MetadataKey.h2EndStream: "\(content.endStream)",
+    ])
+
     // In the case of a "Trailers-Only" response there's no guarantee that end-of-stream will be set
     // on the headers frame: end stream may be sent on an empty data frame as well. If the headers
     // contain a gRPC status code then they must be for a "Trailers-Only" response.
@@ -396,6 +402,12 @@ extension _GRPCClientChannelHandler: ChannelInboundHandler {
       preconditionFailure("Received DATA frame with non-ByteBuffer IOData")
     }
 
+    self.logger.trace("received HTTP2 frame", metadata: [
+      MetadataKey.h2Payload: "DATA",
+      MetadataKey.h2DataBytes: "\(content.data.readableBytes)",
+      MetadataKey.h2EndStream: "\(content.endStream)",
+    ])
+
     // Do we have bytes to read? If there are no bytes to read then we can't do anything. This may
     // happen if the end-of-stream flag is not set on the trailing headers frame (i.e. the one
     // containing the gRPC status code) and an additional empty data frame is sent with the
@@ -453,6 +465,11 @@ extension _GRPCClientChannelHandler: ChannelOutboundHandler {
       case let .success(headers):
         // We're clear to write some headers. Create an appropriate frame and write it.
         let framePayload = HTTP2Frame.FramePayload.headers(.init(headers: headers))
+        self.logger.trace("writing HTTP2 frame", metadata: [
+          MetadataKey.h2Payload: "HEADERS",
+          MetadataKey.h2Headers: "\(headers)",
+          MetadataKey.h2EndStream: "false",
+        ])
         context.write(self.wrapOutboundOut(framePayload), promise: promise)
 
       case let .failure(sendRequestHeadersError):
@@ -475,6 +492,11 @@ extension _GRPCClientChannelHandler: ChannelOutboundHandler {
       case let .success(buffer):
         // We're clear to send a message; wrap it up in an HTTP/2 frame.
         let framePayload = HTTP2Frame.FramePayload.data(.init(data: .byteBuffer(buffer)))
+        self.logger.trace("writing HTTP2 frame", metadata: [
+          MetadataKey.h2Payload: "DATA",
+          MetadataKey.h2DataBytes: "\(buffer.readableBytes)",
+          MetadataKey.h2EndStream: "false",
+        ])
         context.write(self.wrapOutboundOut(framePayload), promise: promise)
 
       case let .failure(writeError):
@@ -503,6 +525,11 @@ extension _GRPCClientChannelHandler: ChannelOutboundHandler {
         let empty = context.channel.allocator.buffer(capacity: 0)
         let framePayload = HTTP2Frame.FramePayload
           .data(.init(data: .byteBuffer(empty), endStream: true))
+        self.logger.trace("writing HTTP2 frame", metadata: [
+          MetadataKey.h2Payload: "DATA",
+          MetadataKey.h2DataBytes: "0",
+          MetadataKey.h2EndStream: "true",
+        ])
         context.write(self.wrapOutboundOut(framePayload), promise: promise)
 
       case let .failure(error):

+ 11 - 11
Tests/GRPCTests/ConnectionManagerTests.swift

@@ -124,7 +124,7 @@ extension ConnectionManagerTests {
 
     // Setup the real channel and activate it.
     let channel = EmbeddedChannel(
-      handler: GRPCIdleHandler(mode: .client(manager)),
+      handler: GRPCIdleHandler(mode: .client(manager), logger: self.logger),
       loop: self.loop
     )
     channelPromise.succeed(channel)
@@ -165,7 +165,7 @@ extension ConnectionManagerTests {
 
     // Setup the channel.
     let channel = EmbeddedChannel(
-      handler: GRPCIdleHandler(mode: .client(manager)),
+      handler: GRPCIdleHandler(mode: .client(manager), logger: self.logger),
       loop: self.loop
     )
     channelPromise.succeed(channel)
@@ -215,7 +215,7 @@ extension ConnectionManagerTests {
 
     // Setup the channel.
     let channel = EmbeddedChannel(
-      handler: GRPCIdleHandler(mode: .client(manager)),
+      handler: GRPCIdleHandler(mode: .client(manager), logger: self.logger),
       loop: self.loop
     )
     channelPromise.succeed(channel)
@@ -278,7 +278,7 @@ extension ConnectionManagerTests {
 
     // Setup the channel.
     let channel = EmbeddedChannel(
-      handler: GRPCIdleHandler(mode: .client(manager)),
+      handler: GRPCIdleHandler(mode: .client(manager), logger: self.logger),
       loop: self.loop
     )
     channelPromise.succeed(channel)
@@ -337,7 +337,7 @@ extension ConnectionManagerTests {
 
     // Setup the actual channel and complete the promise.
     let channel = EmbeddedChannel(
-      handler: GRPCIdleHandler(mode: .client(manager)),
+      handler: GRPCIdleHandler(mode: .client(manager), logger: self.logger),
       loop: self.loop
     )
     channelPromise.succeed(channel)
@@ -444,7 +444,7 @@ extension ConnectionManagerTests {
 
     // Prepare the channel
     let channel = EmbeddedChannel(
-      handler: GRPCIdleHandler(mode: .client(manager)),
+      handler: GRPCIdleHandler(mode: .client(manager), logger: self.logger),
       loop: self.loop
     )
     channelPromise.succeed(channel)
@@ -508,7 +508,7 @@ extension ConnectionManagerTests {
 
     // Prepare the channel
     let firstChannel = EmbeddedChannel(
-      handler: GRPCIdleHandler(mode: .client(manager)),
+      handler: GRPCIdleHandler(mode: .client(manager), logger: self.logger),
       loop: self.loop
     )
     channelPromise.succeed(firstChannel)
@@ -572,7 +572,7 @@ extension ConnectionManagerTests {
 
     // Prepare the first channel
     let firstChannel = EmbeddedChannel(
-      handler: GRPCIdleHandler(mode: .client(manager)),
+      handler: GRPCIdleHandler(mode: .client(manager), logger: self.logger),
       loop: self.loop
     )
     firstChannelPromise.succeed(firstChannel)
@@ -602,7 +602,7 @@ extension ConnectionManagerTests {
 
     // Prepare the second channel
     let secondChannel = EmbeddedChannel(
-      handler: GRPCIdleHandler(mode: .client(manager)),
+      handler: GRPCIdleHandler(mode: .client(manager), logger: self.logger),
       loop: self.loop
     )
     secondChannelPromise.succeed(secondChannel)
@@ -643,7 +643,7 @@ extension ConnectionManagerTests {
 
     // Setup the channel.
     let channel = EmbeddedChannel(
-      handler: GRPCIdleHandler(mode: .client(manager)),
+      handler: GRPCIdleHandler(mode: .client(manager), logger: self.logger),
       loop: self.loop
     )
     channelPromise.succeed(channel)
@@ -791,7 +791,7 @@ extension ConnectionManagerTests {
     let channel = EmbeddedChannel(loop: self.loop)
     XCTAssertNoThrow(try channel.pipeline.addHandlers([
       CloseDroppingHandler(),
-      GRPCIdleHandler(mode: .client(manager)),
+      GRPCIdleHandler(mode: .client(manager), logger: manager.logger),
     ]).wait())
     channelPromise.succeed(channel)
     self.loop.run()

+ 1 - 1
Tests/GRPCTests/GRPCTestCase.swift

@@ -116,7 +116,7 @@ class GRPCTestCase: XCTestCase {
         .map { key, value in "\(key)=\(value)" }
         .joined(separator: " ")
 
-      print("\(date) \(log.label) \(level):", log.message, formattedMetadata)
+      print("\(date) \(log.label) \(level):", log.message, "{", formattedMetadata, "}")
     }
 
     print("Test Case '\(self.name)' logs finished")

+ 4 - 0
Tests/GRPCTests/GRPCTimeoutTests.swift

@@ -124,4 +124,8 @@ class GRPCTimeoutTests: GRPCTestCase {
     let timeout = GRPCTimeout(deadline: deadline, testingOnlyNow: .uptimeNanoseconds(200))
     XCTAssertEqual(timeout.nanoseconds, 0)
   }
+
+  func testTimeoutFromDistantFuture() throws {
+    XCTAssertEqual(GRPCTimeout(deadline: .distantFuture), .infinite)
+  }
 }

+ 1 - 0
Tests/GRPCTests/XCTestManifests.swift

@@ -626,6 +626,7 @@ extension GRPCTimeoutTests {
         ("testRoundingNegativeTimeout", testRoundingNegativeTimeout),
         ("testRoundingSecondsTimeout", testRoundingSecondsTimeout),
         ("testTimeoutFromDeadline", testTimeoutFromDeadline),
+        ("testTimeoutFromDistantFuture", testTimeoutFromDistantFuture),
         ("testTimeoutFromPastDeadline", testTimeoutFromPastDeadline),
     ]
 }