Просмотр исходного кода

Remove GRPCLogger (#1853)

Motivation:

GRPCLogger was added so that the source of logs was always set to
'GRPC'. This was done because swift-log would set it to the parent
directory of the source file assuming it would be the module which isn't
always the case. This was fixed some time ago in swift-log making
GRPCLogger redundant.

Modifications:

- Remove GRPCLogger

Result:

Less code
George Barnett 1 год назад
Родитель
Сommit
6ee1ed29e1

+ 8 - 8
Sources/GRPC/ConnectionPool/ConnectionPool.swift

@@ -108,9 +108,9 @@ internal final class ConnectionPool {
   @usableFromInline
   internal var delegate: GRPCConnectionPoolDelegate?
 
-  /// A logger which always sets "GRPC" as its source.
+  /// A logger.
   @usableFromInline
-  internal let logger: GRPCLogger
+  internal let logger: Logger
 
   /// Returns `NIODeadline` representing 'now'. This is useful for testing.
   @usableFromInline
@@ -175,7 +175,7 @@ internal final class ConnectionPool {
     channelProvider: ConnectionManagerChannelProvider,
     streamLender: StreamLender,
     delegate: GRPCConnectionPoolDelegate?,
-    logger: GRPCLogger,
+    logger: Logger,
     now: @escaping () -> NIODeadline = NIODeadline.now
   ) {
     precondition(
@@ -241,7 +241,7 @@ internal final class ConnectionPool {
       connectionBackoff: self.connectionBackoff,
       connectivityDelegate: self,
       http2Delegate: self,
-      logger: self.logger.unwrapped
+      logger: self.logger
     )
     let id = manager.id
     self._connections[id] = PerConnectionState(manager: manager)
@@ -274,7 +274,7 @@ internal final class ConnectionPool {
   internal func makeStream(
     deadline: NIODeadline,
     promise: EventLoopPromise<Channel>,
-    logger: GRPCLogger,
+    logger: Logger,
     initializer: @escaping @Sendable (Channel) -> EventLoopFuture<Void>
   ) {
     if self.eventLoop.inEventLoop {
@@ -300,7 +300,7 @@ internal final class ConnectionPool {
   @inlinable
   internal func makeStream(
     deadline: NIODeadline,
-    logger: GRPCLogger,
+    logger: Logger,
     initializer: @escaping @Sendable (Channel) -> EventLoopFuture<Void>
   ) -> EventLoopFuture<Channel> {
     let promise = self.eventLoop.makePromise(of: Channel.self)
@@ -336,7 +336,7 @@ internal final class ConnectionPool {
   internal func _makeStream(
     deadline: NIODeadline,
     promise: EventLoopPromise<Channel>,
-    logger: GRPCLogger,
+    logger: Logger,
     initializer: @escaping @Sendable (Channel) -> EventLoopFuture<Void>
   ) {
     self.eventLoop.assertInEventLoop()
@@ -403,7 +403,7 @@ internal final class ConnectionPool {
   internal func _enqueueWaiter(
     deadline: NIODeadline,
     promise: EventLoopPromise<Channel>,
-    logger: GRPCLogger,
+    logger: Logger,
     initializer: @escaping @Sendable (Channel) -> EventLoopFuture<Void>
   ) {
     // Don't overwhelm the pool with too many waiters.

+ 4 - 4
Sources/GRPC/ConnectionPool/PoolManager.swift

@@ -136,7 +136,7 @@ internal final class PoolManager {
   internal static func makeInitializedPoolManager(
     using group: EventLoopGroup,
     perPoolConfiguration: PerPoolConfiguration,
-    logger: GRPCLogger
+    logger: Logger
   ) -> PoolManager {
     let manager = PoolManager(privateButUsableFromInline_group: group)
     manager.initialize(perPoolConfiguration: perPoolConfiguration, logger: logger)
@@ -181,7 +181,7 @@ internal final class PoolManager {
   ///   - logger: A logger.
   private func initialize(
     perPoolConfiguration configuration: PerPoolConfiguration,
-    logger: GRPCLogger
+    logger: Logger
   ) {
     var logger = logger
     logger[metadataKey: Metadata.id] = "\(self.id)"
@@ -244,7 +244,7 @@ internal final class PoolManager {
   /// - Returns: An array of `ConnectionPool`s.
   private func makePools(
     perPoolConfiguration configuration: PerPoolConfiguration,
-    logger: GRPCLogger
+    logger: Logger
   ) -> [ConnectionPool] {
     let eventLoops = self.group.makeIterator()
     return eventLoops.map { eventLoop in
@@ -311,7 +311,7 @@ internal final class PoolManager {
   internal func makeStream(
     preferredEventLoop: EventLoop?,
     deadline: NIODeadline,
-    logger: GRPCLogger,
+    logger: Logger,
     streamInitializer initializer: @escaping @Sendable (Channel) -> EventLoopFuture<Void>
   ) -> PooledStreamChannel {
     let preferredEventLoopID = preferredEventLoop.map { EventLoopID($0) }

+ 2 - 2
Sources/GRPC/ConnectionPool/PooledChannel.swift

@@ -104,7 +104,7 @@ internal final class PooledChannel: GRPCChannel {
         delegate: configuration.delegate,
         statsPeriod: configuration.statsPeriod
       ),
-      logger: configuration.backgroundActivityLogger.wrapped
+      logger: configuration.backgroundActivityLogger
     )
   }
 
@@ -119,7 +119,7 @@ internal final class PooledChannel: GRPCChannel {
     let streamChannel = self._pool.makeStream(
       preferredEventLoop: preferredEventLoop,
       deadline: deadline,
-      logger: GRPCLogger(wrapping: callOptions.logger)
+      logger: callOptions.logger
     ) { channel in
       return channel.eventLoop.makeSucceededVoidFuture()
     }

+ 2 - 2
Sources/GRPC/GRPCClientChannelHandler.swift

@@ -284,7 +284,7 @@ public enum GRPCCallType: Hashable, Sendable {
 /// }
 /// ```
 internal final class GRPCClientChannelHandler {
-  private let logger: GRPCLogger
+  private let logger: Logger
   private var stateMachine: GRPCClientStateMachine
   private let maximumReceiveMessageLength: Int
 
@@ -297,7 +297,7 @@ internal final class GRPCClientChannelHandler {
   internal init(
     callType: GRPCCallType,
     maximumReceiveMessageLength: Int,
-    logger: GRPCLogger
+    logger: Logger
   ) {
     self.logger = logger
     self.maximumReceiveMessageLength = maximumReceiveMessageLength

+ 0 - 129
Sources/GRPC/GRPCLogger.swift

@@ -1,129 +0,0 @@
-/*
- * Copyright 2021, gRPC Authors All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import Logging
-import NIOCore
-
-/// Wraps `Logger` to always provide the source as "GRPC".
-///
-/// See https://github.com/apple/swift-log/issues/145 for rationale.
-@usableFromInline
-internal struct GRPCLogger {
-  @usableFromInline
-  internal var logger: Logger
-
-  internal var unwrapped: Logger {
-    return self.logger
-  }
-
-  @inlinable
-  internal init(wrapping logger: Logger) {
-    self.logger = logger
-  }
-
-  internal subscript(metadataKey metadataKey: String) -> Logger.Metadata.Value? {
-    get {
-      return self.logger[metadataKey: metadataKey]
-    }
-    set {
-      self.logger[metadataKey: metadataKey] = newValue
-    }
-  }
-
-  @usableFromInline
-  internal func trace(
-    _ message: @autoclosure () -> Logger.Message,
-    metadata: @autoclosure () -> Logger.Metadata? = nil,
-    file: String = #fileID,
-    function: String = #function,
-    line: UInt = #line
-  ) {
-    self.logger.trace(
-      message(),
-      metadata: metadata(),
-      source: "GRPC",
-      file: file,
-      function: function,
-      line: line
-    )
-  }
-
-  @usableFromInline
-  internal func debug(
-    _ message: @autoclosure () -> Logger.Message,
-    metadata: @autoclosure () -> Logger.Metadata? = nil,
-    file: String = #fileID,
-    function: String = #function,
-    line: UInt = #line
-  ) {
-    self.logger.debug(
-      message(),
-      metadata: metadata(),
-      source: "GRPC",
-      file: file,
-      function: function,
-      line: line
-    )
-  }
-
-  @usableFromInline
-  internal func notice(
-    _ message: @autoclosure () -> Logger.Message,
-    metadata: @autoclosure () -> Logger.Metadata? = nil,
-    file: String = #fileID,
-    function: String = #function,
-    line: UInt = #line
-  ) {
-    self.logger.notice(
-      message(),
-      metadata: metadata(),
-      source: "GRPC",
-      file: file,
-      function: function,
-      line: line
-    )
-  }
-
-  @usableFromInline
-  internal func warning(
-    _ message: @autoclosure () -> Logger.Message,
-    metadata: @autoclosure () -> Logger.Metadata? = nil,
-    file: String = #fileID,
-    function: String = #function,
-    line: UInt = #line
-  ) {
-    self.logger.warning(
-      message(),
-      metadata: metadata(),
-      source: "GRPC",
-      file: file,
-      function: function,
-      line: line
-    )
-  }
-}
-
-extension GRPCLogger {
-  internal mutating func addIPAddressMetadata(local: SocketAddress?, remote: SocketAddress?) {
-    self.logger.addIPAddressMetadata(local: local, remote: remote)
-  }
-}
-
-extension Logger {
-  @inlinable
-  internal var wrapped: GRPCLogger {
-    return GRPCLogger(wrapping: self)
-  }
-}

+ 1 - 1
Sources/GRPC/Interceptor/ClientInterceptorContext.swift

@@ -36,7 +36,7 @@ public struct ClientInterceptorContext<Request, Response> {
 
   /// A logger.
   public var logger: Logger {
-    return self._pipeline.logger.unwrapped
+    return self._pipeline.logger
   }
 
   /// The type of the RPC, e.g. "unary".

+ 4 - 4
Sources/GRPC/Interceptor/ClientInterceptorPipeline.swift

@@ -61,7 +61,7 @@ import NIOHTTP2
 internal final class ClientInterceptorPipeline<Request, Response> {
   /// A logger.
   @usableFromInline
-  internal var logger: GRPCLogger
+  internal var logger: Logger
 
   /// The `EventLoop` this RPC is being executed on.
   @usableFromInline
@@ -135,7 +135,7 @@ internal final class ClientInterceptorPipeline<Request, Response> {
   internal init(
     eventLoop: EventLoop,
     details: CallDetails,
-    logger: GRPCLogger,
+    logger: Logger,
     interceptors: [ClientInterceptor<Request, Response>],
     errorDelegate: ClientErrorDelegate?,
     onError: @escaping (Error) -> Void,
@@ -289,13 +289,13 @@ internal final class ClientInterceptorPipeline<Request, Response> {
       unwrappedError = errorContext.error
       self._errorDelegate?.didCatchError(
         errorContext.error,
-        logger: self.logger.unwrapped,
+        logger: self.logger,
         file: errorContext.file,
         line: errorContext.line
       )
     } else {
       unwrappedError = error
-      self._errorDelegate?.didCatchErrorWithoutContext(error, logger: self.logger.unwrapped)
+      self._errorDelegate?.didCatchErrorWithoutContext(error, logger: self.logger)
     }
 
     // Emit the unwrapped error.

+ 3 - 4
Sources/GRPC/Interceptor/ClientTransport.swift

@@ -68,7 +68,7 @@ internal final class ClientTransport<Request, Response> {
   internal let callDetails: CallDetails
 
   /// A logger.
-  internal var logger: GRPCLogger
+  internal var logger: Logger
 
   /// Is the call streaming requests?
   private var isStreamingRequests: Bool {
@@ -119,15 +119,14 @@ internal final class ClientTransport<Request, Response> {
     self.callEventLoop = eventLoop
     self.callDetails = details
     self.onStart = onStart
-    let logger = GRPCLogger(wrapping: details.options.logger)
-    self.logger = logger
+    self.logger = details.options.logger
     self.serializer = serializer
     self.deserializer = deserializer
     // The references to self held by the pipeline are dropped when it is closed.
     self._pipeline = ClientInterceptorPipeline(
       eventLoop: eventLoop,
       details: details,
-      logger: logger,
+      logger: details.options.logger,
       interceptors: interceptors,
       errorDelegate: errorDelegate,
       onError: onError,

+ 1 - 1
Sources/GRPC/_EmbeddedThroughput.swift

@@ -33,7 +33,7 @@ extension EmbeddedChannel {
       GRPCClientChannelHandler(
         callType: callType,
         maximumReceiveMessageLength: .max,
-        logger: GRPCLogger(wrapping: logger)
+        logger: logger
       ),
       GRPCClientCodecHandler(
         serializer: ProtobufSerializer<Request>(),

+ 1 - 1
Tests/GRPCTests/ClientInterceptorPipelineTests.swift

@@ -45,7 +45,7 @@ class ClientInterceptorPipelineTests: GRPCTestCase {
     return ClientInterceptorPipeline(
       eventLoop: self.embeddedEventLoop,
       details: callDetails,
-      logger: callDetails.options.logger.wrapped,
+      logger: callDetails.options.logger,
       interceptors: interceptors,
       errorDelegate: errorDelegate,
       onError: onError,

+ 36 - 36
Tests/GRPCTests/ConnectionPool/ConnectionPoolTests.swift

@@ -75,7 +75,7 @@ final class ConnectionPoolTests: GRPCTestCase {
         onUpdateMaxAvailableStreams: onMaximumReservationsChange
       ),
       delegate: delegate,
-      logger: self.logger.wrapped,
+      logger: self.logger,
       now: now
     )
   }
@@ -158,7 +158,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     }
     XCTAssertNoThrow(try pool.shutdown().wait())
 
-    let stream = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let stream = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
@@ -174,12 +174,12 @@ final class ConnectionPoolTests: GRPCTestCase {
     }
 
     let waiting = (0 ..< maxWaiters).map { _ in
-      return pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+      return pool.makeStream(deadline: .distantFuture, logger: self.logger) {
         $0.eventLoop.makeSucceededVoidFuture()
       }
     }
 
-    let tooManyWaiters = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let tooManyWaiters = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
@@ -201,7 +201,7 @@ final class ConnectionPoolTests: GRPCTestCase {
       self.noChannelExpected($0, $1)
     }
 
-    let waiter = pool.makeStream(deadline: .uptimeNanoseconds(10), logger: self.logger.wrapped) {
+    let waiter = pool.makeStream(deadline: .uptimeNanoseconds(10), logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     XCTAssertEqual(pool.sync.waiters, 1)
@@ -221,7 +221,7 @@ final class ConnectionPoolTests: GRPCTestCase {
 
     self.eventLoop.advanceTime(to: .uptimeNanoseconds(10))
 
-    let waiter = pool.makeStream(deadline: .uptimeNanoseconds(5), logger: self.logger.wrapped) {
+    let waiter = pool.makeStream(deadline: .uptimeNanoseconds(5), logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     XCTAssertEqual(pool.sync.waiters, 1)
@@ -242,7 +242,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     // No channels yet.
     XCTAssertEqual(controller.count, 0)
 
-    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     // Start creating the channel.
@@ -277,7 +277,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     let (pool, controller) = self.setUpPoolAndController()
     pool.initialize(connections: 1)
 
-    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     // Start creating the channel.
@@ -297,7 +297,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     // connection we won't have to wait.
     XCTAssertEqual(pool.sync.waiters, 0)
     XCTAssertEqual(pool.sync.reservedStreams, 1)
-    let notWaiting = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let notWaiting = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     // Still no waiters.
@@ -320,7 +320,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     // Enqueue twice as many waiters as the connection will be able to handle.
     let maxConcurrentStreams = 10
     let waiters = (0 ..< maxConcurrentStreams * 2).map { _ in
-      return pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+      return pool.makeStream(deadline: .distantFuture, logger: self.logger) {
         $0.eventLoop.makeSucceededVoidFuture()
       }
     }
@@ -373,7 +373,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     )
     pool.initialize(connections: 1)
 
-    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     // Start creating the channel.
@@ -392,7 +392,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     // Create a handful of streams.
     XCTAssertEqual(pool.sync.availableStreams, 9)
     for _ in 0 ..< 5 {
-      let notWaiting = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+      let notWaiting = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
         $0.eventLoop.makeSucceededVoidFuture()
       }
       self.eventLoop.run()
@@ -422,7 +422,7 @@ final class ConnectionPoolTests: GRPCTestCase {
 
     // Reserve a bunch of streams.
     let waiters = (0 ..< 10).map { _ in
-      return pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+      return pool.makeStream(deadline: .distantFuture, logger: self.logger) {
         $0.eventLoop.makeSucceededVoidFuture()
       }
     }
@@ -443,7 +443,7 @@ final class ConnectionPoolTests: GRPCTestCase {
 
     // Add a waiter.
     XCTAssertEqual(pool.sync.waiters, 0)
-    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     XCTAssertEqual(pool.sync.waiters, 1)
@@ -484,11 +484,11 @@ final class ConnectionPoolTests: GRPCTestCase {
     })
     pool.initialize(connections: 1)
 
-    let waiter1 = pool.makeStream(deadline: .uptimeNanoseconds(10), logger: self.logger.wrapped) {
+    let waiter1 = pool.makeStream(deadline: .uptimeNanoseconds(10), logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
-    let waiter2 = pool.makeStream(deadline: .uptimeNanoseconds(15), logger: self.logger.wrapped) {
+    let waiter2 = pool.makeStream(deadline: .uptimeNanoseconds(15), logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
@@ -533,7 +533,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     // No demand so all three connections are idle.
     XCTAssertEqual(pool.sync.idleConnections, 3)
 
-    let w1 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w1 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
@@ -550,7 +550,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     XCTAssertNoThrow(try w1.wait())
     controller.openStreamInChannel(atIndex: 0)
 
-    let w2 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w2 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
@@ -562,7 +562,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     XCTAssertEqual(pool.sync.idleConnections, 1)
 
     // Add more demand before the second connection comes up.
-    let w3 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w3 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
@@ -586,7 +586,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     pool.initialize(connections: 1)
     XCTAssertEqual(pool.sync.connections, 1)
 
-    let w1 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w1 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     // Start creating the channel.
@@ -619,7 +619,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     XCTAssertEqual(pool.sync.idleConnections, 1)
 
     // Ask for another stream: this will be on the new idle connection.
-    let w2 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w2 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     self.eventLoop.run()
@@ -664,7 +664,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     pool.initialize(connections: 1)
     XCTAssertEqual(pool.sync.connections, 1)
 
-    let w1 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w1 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     // Start creating the channel.
@@ -686,13 +686,13 @@ final class ConnectionPoolTests: GRPCTestCase {
     XCTAssertEqual(pool.sync.idleConnections, 0)
 
     // Enqueue two waiters. One to time out before the reconnect happens.
-    let w2 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w2 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
     let w3 = pool.makeStream(
       deadline: .uptimeNanoseconds(UInt64(TimeAmount.milliseconds(500).nanoseconds)),
-      logger: self.logger.wrapped
+      logger: self.logger
     ) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
@@ -742,11 +742,11 @@ final class ConnectionPoolTests: GRPCTestCase {
     //   passed but no connection has previously failed)
     // - w2 will fail because of a timeout but after the underlying channel has failed to connect so
     //   should have that additional failure information.
-    let w1 = pool.makeStream(deadline: .uptimeNanoseconds(10), logger: self.logger.wrapped) {
+    let w1 = pool.makeStream(deadline: .uptimeNanoseconds(10), logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
-    let w2 = pool.makeStream(deadline: .uptimeNanoseconds(20), logger: self.logger.wrapped) {
+    let w2 = pool.makeStream(deadline: .uptimeNanoseconds(20), logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
@@ -803,7 +803,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     // These streams should succeed when the new connection is up. We'll limit the connection to 10
     // streams when we bring it up.
     let streams = (0 ..< 10).map { _ in
-      pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+      pool.makeStream(deadline: .distantFuture, logger: self.logger) {
         $0.eventLoop.makeSucceededVoidFuture()
       }
     }
@@ -832,14 +832,14 @@ final class ConnectionPoolTests: GRPCTestCase {
     let now = NIODeadline.now()
     self.eventLoop.advanceTime(to: now)
     let waiters = (0 ..< 10).map { _ in
-      pool.makeStream(deadline: now + .seconds(1), logger: self.logger.wrapped) {
+      pool.makeStream(deadline: now + .seconds(1), logger: self.logger) {
         $0.eventLoop.makeSucceededVoidFuture()
       }
     }
 
     // This is one waiter more than is allowed so it should hit too-many-waiters. We don't expect
     // an inner error though, the connection is just busy.
-    let tooManyWaiters = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let tooManyWaiters = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     XCTAssertThrowsError(try tooManyWaiters.wait()) { error in
@@ -893,7 +893,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     })
     pool.initialize(connections: 1)
 
-    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     // Start creating the channel.
@@ -950,7 +950,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     let connID1 = try assertConnectionAdded(recorder.popFirst())
     let connID2 = try assertConnectionAdded(recorder.popFirst())
 
-    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let waiter = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     // Start creating the channel.
@@ -984,7 +984,7 @@ final class ConnectionPoolTests: GRPCTestCase {
 
     // Okay, more utilization!
     for n in 2 ... 8 {
-      let w = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+      let w = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
         $0.eventLoop.makeSucceededVoidFuture()
       }
 
@@ -996,7 +996,7 @@ final class ConnectionPoolTests: GRPCTestCase {
 
     // The utilisation threshold before bringing up a new connection is 0.9; we have 8 open streams
     // (out of 10) now so opening the next should trigger a connect on the other connection.
-    let w9 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w9 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
     XCTAssertEqual(recorder.popFirst(), .startedConnecting(secondConn))
@@ -1013,7 +1013,7 @@ final class ConnectionPoolTests: GRPCTestCase {
     XCTAssertEqual(recorder.popFirst(), .connectSucceeded(secondConn, 10))
 
     // The next stream should be on the new connection.
-    let w10 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w10 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 
@@ -1107,10 +1107,10 @@ final class ConnectionPoolTests: GRPCTestCase {
 
     // Open two streams, which, because the maxConcurrentStreams is 1, will
     // create two channels.
-    let w1 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w1 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
-    let w2 = pool.makeStream(deadline: .distantFuture, logger: self.logger.wrapped) {
+    let w2 = pool.makeStream(deadline: .distantFuture, logger: self.logger) {
       $0.eventLoop.makeSucceededVoidFuture()
     }
 

+ 1 - 1
Tests/GRPCTests/ConnectionPool/PoolManagerStateMachineTests.swift

@@ -43,7 +43,7 @@ class PoolManagerStateMachineTests: GRPCTestCase {
         onUpdateMaxAvailableStreams: { _ in }
       ),
       delegate: nil,
-      logger: self.logger.wrapped
+      logger: self.logger
     )
   }
 

+ 1 - 1
Tests/GRPCTests/GRPCClientChannelHandlerTests.swift

@@ -39,7 +39,7 @@ class GRPCClientChannelHandlerTests: GRPCTestCase {
     let handler = GRPCClientChannelHandler(
       callType: .unary,
       maximumReceiveMessageLength: .max,
-      logger: GRPCLogger(wrapping: self.clientLogger)
+      logger: self.clientLogger
     )
 
     let channel = EmbeddedChannel(handler: handler)

+ 0 - 40
Tests/GRPCTests/GRPCLoggerTests.swift

@@ -1,40 +0,0 @@
-/*
- * Copyright 2021, gRPC Authors All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import Logging
-import XCTest
-
-@testable import GRPC
-
-final class GRPCLoggerTests: GRPCTestCase {
-  func testLogSourceIsGRPC() {
-    let recorder = CapturingLogHandlerFactory(printWhenCaptured: false)
-    let logger = Logger(label: "io.grpc.testing", factory: recorder.make(_:))
-
-    var gRPCLogger = GRPCLogger(wrapping: logger)
-    gRPCLogger[metadataKey: "foo"] = "bar"
-
-    gRPCLogger.debug("foo")
-    gRPCLogger.trace("bar")
-
-    let logs = recorder.clearCapturedLogs()
-    XCTAssertEqual(logs.count, 2)
-    for log in logs {
-      XCTAssertEqual(log.source, "GRPC")
-      XCTAssertEqual(gRPCLogger[metadataKey: "foo"], "bar")
-    }
-  }
-}

+ 1 - 1
Tests/GRPCTests/GRPCStatusCodeTests.swift

@@ -34,7 +34,7 @@ class GRPCStatusCodeTests: GRPCTestCase {
     let handler = GRPCClientChannelHandler(
       callType: .unary,
       maximumReceiveMessageLength: .max,
-      logger: GRPCLogger(wrapping: self.logger)
+      logger: self.logger
     )
     self.channel = EmbeddedChannel(handler: handler)
   }

+ 9 - 24
Tests/GRPCTests/InterceptedRPCCancellationTests.swift

@@ -15,6 +15,7 @@
  */
 import EchoImplementation
 import EchoModel
+import Logging
 import NIOCore
 import NIOPosix
 import XCTest
@@ -86,10 +87,10 @@ final class MagicRequiredServerInterceptor<
     switch part {
     case let .metadata(metadata):
       if metadata.contains(name: "magic") {
-        context.log.debug("metadata contains magic; accepting rpc")
+        context.logger.debug("metadata contains magic; accepting rpc")
         context.receive(part)
       } else {
-        context.log.debug("metadata does not contains magic; rejecting rpc")
+        context.logger.debug("metadata does not contains magic; rejecting rpc")
         let status = GRPCStatus(code: .permissionDenied, message: nil)
         context.send(.end(status, [:]), promise: nil)
       }
@@ -116,7 +117,7 @@ final class MagicAddingClientInterceptor<
     context: ClientInterceptorContext<Request, Response>
   ) {
     if let retry = self.retry {
-      context.log.debug("cancelling retry RPC")
+      context.logger.debug("cancelling retry RPC")
       retry.cancel(promise: promise)
     } else {
       context.cancel(promise: promise)
@@ -129,7 +130,7 @@ final class MagicAddingClientInterceptor<
     context: ClientInterceptorContext<Request, Response>
   ) {
     if let retry = self.retry {
-      context.log.debug("retrying part \(part)")
+      context.logger.debug("retrying part \(part)")
       retry.send(part, promise: promise)
     } else {
       switch part {
@@ -161,7 +162,7 @@ final class MagicAddingClientInterceptor<
 
       XCTAssertNil(self.retry)
 
-      context.log.debug("initial rpc failed, retrying")
+      context.logger.debug("initial rpc failed, retrying")
 
       self.retry = self.channel.makeCall(
         path: context.path,
@@ -171,33 +172,17 @@ final class MagicAddingClientInterceptor<
       )
 
       self.retry!.invoke {
-        context.log.debug("intercepting error from retried rpc")
+        context.logger.debug("intercepting error from retried rpc")
         context.errorCaught($0)
       } onResponsePart: { responsePart in
-        context.log.debug("intercepting response part from retried rpc")
+        context.logger.debug("intercepting response part from retried rpc")
         context.receive(responsePart)
       }
 
       while let requestPart = self.requestParts.popFirst() {
-        context.log.debug("replaying \(requestPart) on new rpc")
+        context.logger.debug("replaying \(requestPart) on new rpc")
         self.retry!.send(requestPart, promise: nil)
       }
     }
   }
 }
-
-// MARK: - GRPC Logger
-
-// Our tests also check the "Source" of a logger is "GRPC". That assertion fails when we log from
-// tests so we'll use our internal logger instead.
-extension ClientInterceptorContext {
-  var log: GRPCLogger {
-    return GRPCLogger(wrapping: self.logger)
-  }
-}
-
-extension ServerInterceptorContext {
-  var log: GRPCLogger {
-    return GRPCLogger(wrapping: self.logger)
-  }
-}