2
0

ConnectionPool.swift 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. /*
  2. * Copyright 2021, gRPC Authors All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. import Logging
  17. import NIOConcurrencyHelpers
  18. import NIOCore
  19. import NIOHTTP2
  20. @usableFromInline
  21. internal final class ConnectionPool {
  22. /// The event loop all connections in this pool are running on.
  23. @usableFromInline
  24. internal let eventLoop: EventLoop
  25. @usableFromInline
  26. internal enum State {
  27. case active
  28. case shuttingDown(EventLoopFuture<Void>)
  29. case shutdown
  30. }
  31. /// The state of the connection pool.
  32. @usableFromInline
  33. internal var _state: State = .active
  34. /// The most recent connection error we have observed.
  35. ///
  36. /// This error is used to provide additional context to failed waiters. A waiter may, for example,
  37. /// timeout because the pool is busy, or because no connection can be established because of an
  38. /// underlying connection error. In the latter case it's useful for the caller to know why the
  39. /// connection is failing at the RPC layer.
  40. ///
  41. /// This value is cleared when a connection becomes 'available'. That is, when we receive an
  42. /// http/2 SETTINGS frame.
  43. ///
  44. /// This value is set whenever an underlying connection transitions to the transient failure state
  45. /// or to the idle state and has an associated error.
  46. @usableFromInline
  47. internal var _mostRecentError: Error? = nil
  48. /// Connection managers and their stream availability state keyed by the ID of the connection
  49. /// manager.
  50. ///
  51. /// Connections are accessed by their ID for connection state changes (infrequent) and when
  52. /// streams are closed (frequent). However when choosing which connection to succeed a waiter
  53. /// with (frequent) requires the connections to be ordered by their availability. A dictionary
  54. /// might not be the most efficient data structure (a queue prioritised by stream availability may
  55. /// be a better choice given the number of connections is likely to be very low in practice).
  56. @usableFromInline
  57. internal var _connections: [ConnectionManagerID: PerConnectionState]
  58. /// The threshold which if exceeded when creating a stream determines whether the pool will
  59. /// start connecting an idle connection (if one exists).
  60. ///
  61. /// The 'load' is calculated as the ratio of demand for streams (the sum of the number of waiters
  62. /// and the number of reserved streams) and the total number of streams which non-idle connections
  63. /// could support (this includes the streams that a connection in the connecting state could
  64. /// support).
  65. @usableFromInline
  66. internal let reservationLoadThreshold: Double
  67. /// The assumed value for the maximum number of concurrent streams a connection can support. We
  68. /// assume a connection will support this many streams until we know better.
  69. @usableFromInline
  70. internal let assumedMaxConcurrentStreams: Int
  71. /// A queue of waiters which may or may not get a stream in the future.
  72. @usableFromInline
  73. internal var waiters: CircularBuffer<Waiter>
  74. /// The maximum number of waiters allowed, the size of `waiters` must not exceed this value. If
  75. /// there are this many waiters in the queue then the next waiter will be failed immediately.
  76. @usableFromInline
  77. internal let maxWaiters: Int
  78. /// Configuration for backoff between subsequence connection attempts.
  79. @usableFromInline
  80. internal let connectionBackoff: ConnectionBackoff
  81. /// Provides a channel factory to the `ConnectionManager`.
  82. @usableFromInline
  83. internal let channelProvider: ConnectionManagerChannelProvider
  84. /// The object to notify about changes to stream reservations; in practice this is usually
  85. /// the `PoolManager`.
  86. @usableFromInline
  87. internal let streamLender: StreamLender
  88. @usableFromInline
  89. internal var delegate: GRPCConnectionPoolDelegate?
  90. /// A logger which always sets "GRPC" as its source.
  91. @usableFromInline
  92. internal let logger: GRPCLogger
  93. /// Returns `NIODeadline` representing 'now'. This is useful for testing.
  94. @usableFromInline
  95. internal let now: () -> NIODeadline
  96. /// Logging metadata keys.
  97. @usableFromInline
  98. internal enum Metadata {
  99. /// The ID of this pool.
  100. @usableFromInline
  101. static let id = "pool.id"
  102. /// The number of stream reservations (i.e. number of open streams + number of waiters).
  103. @usableFromInline
  104. static let reservationsCount = "pool.reservations.count"
  105. /// The number of streams this pool can support with non-idle connections at this time.
  106. @usableFromInline
  107. static let reservationsCapacity = "pool.reservations.capacity"
  108. /// The current reservation load (i.e. reservation count / reservation capacity)
  109. @usableFromInline
  110. static let reservationsLoad = "pool.reservations.load"
  111. /// The reservation load threshold, above which a new connection will be created (if possible).
  112. @usableFromInline
  113. static let reservationsLoadThreshold = "pool.reservations.loadThreshold"
  114. /// The current number of waiters in the pool.
  115. @usableFromInline
  116. static let waitersCount = "pool.waiters.count"
  117. /// The maximum number of waiters the pool is configured to allow.
  118. @usableFromInline
  119. static let waitersMax = "pool.waiters.max"
  120. /// The number of waiters which were successfully serviced.
  121. @usableFromInline
  122. static let waitersServiced = "pool.waiters.serviced"
  123. /// The ID of waiter.
  124. @usableFromInline
  125. static let waiterID = "pool.waiter.id"
  126. }
  127. @usableFromInline
  128. init(
  129. eventLoop: EventLoop,
  130. maxWaiters: Int,
  131. reservationLoadThreshold: Double,
  132. assumedMaxConcurrentStreams: Int,
  133. connectionBackoff: ConnectionBackoff,
  134. channelProvider: ConnectionManagerChannelProvider,
  135. streamLender: StreamLender,
  136. delegate: GRPCConnectionPoolDelegate?,
  137. logger: GRPCLogger,
  138. now: @escaping () -> NIODeadline = NIODeadline.now
  139. ) {
  140. precondition(
  141. (0.0 ... 1.0).contains(reservationLoadThreshold),
  142. "reservationLoadThreshold must be within the range 0.0 ... 1.0"
  143. )
  144. self.reservationLoadThreshold = reservationLoadThreshold
  145. self.assumedMaxConcurrentStreams = assumedMaxConcurrentStreams
  146. self._connections = [:]
  147. self.maxWaiters = maxWaiters
  148. self.waiters = CircularBuffer(initialCapacity: 16)
  149. self.eventLoop = eventLoop
  150. self.connectionBackoff = connectionBackoff
  151. self.channelProvider = channelProvider
  152. self.streamLender = streamLender
  153. self.delegate = delegate
  154. self.logger = logger
  155. self.now = now
  156. }
  157. /// Initialize the connection pool.
  158. ///
  159. /// - Parameter connections: The number of connections to add to the pool.
  160. internal func initialize(connections: Int) {
  161. assert(self._connections.isEmpty)
  162. self._connections.reserveCapacity(connections)
  163. while self._connections.count < connections {
  164. self.addConnectionToPool()
  165. }
  166. }
  167. /// Make and add a new connection to the pool.
  168. private func addConnectionToPool() {
  169. let manager = ConnectionManager(
  170. eventLoop: self.eventLoop,
  171. channelProvider: self.channelProvider,
  172. callStartBehavior: .waitsForConnectivity,
  173. connectionBackoff: self.connectionBackoff,
  174. connectivityDelegate: self,
  175. http2Delegate: self,
  176. logger: self.logger.unwrapped
  177. )
  178. let id = manager.id
  179. self._connections[id] = PerConnectionState(manager: manager)
  180. self.delegate?.connectionAdded(id: .init(id))
  181. }
  182. // MARK: - Called from the pool manager
  183. /// Make and initialize an HTTP/2 stream `Channel`.
  184. ///
  185. /// - Parameters:
  186. /// - deadline: The point in time by which the `promise` must have been resolved.
  187. /// - promise: A promise for a `Channel`.
  188. /// - logger: A request logger.
  189. /// - initializer: A closure to initialize the `Channel` with.
  190. @inlinable
  191. internal func makeStream(
  192. deadline: NIODeadline,
  193. promise: EventLoopPromise<Channel>,
  194. logger: GRPCLogger,
  195. initializer: @escaping (Channel) -> EventLoopFuture<Void>
  196. ) {
  197. if self.eventLoop.inEventLoop {
  198. self._makeStream(
  199. deadline: deadline,
  200. promise: promise,
  201. logger: logger,
  202. initializer: initializer
  203. )
  204. } else {
  205. self.eventLoop.execute {
  206. self._makeStream(
  207. deadline: deadline,
  208. promise: promise,
  209. logger: logger,
  210. initializer: initializer
  211. )
  212. }
  213. }
  214. }
  215. /// See `makeStream(deadline:promise:logger:initializer:)`.
  216. @inlinable
  217. internal func makeStream(
  218. deadline: NIODeadline,
  219. logger: GRPCLogger,
  220. initializer: @escaping (Channel) -> EventLoopFuture<Void>
  221. ) -> EventLoopFuture<Channel> {
  222. let promise = self.eventLoop.makePromise(of: Channel.self)
  223. self.makeStream(deadline: deadline, promise: promise, logger: logger, initializer: initializer)
  224. return promise.futureResult
  225. }
  226. /// Shutdown the connection pool.
  227. ///
  228. /// Existing waiters will be failed and all underlying connections will be shutdown. Subsequent
  229. /// calls to `makeStream` will be failed immediately.
  230. ///
  231. /// - Parameter mode: The mode to use when shutting down.
  232. /// - Returns: A future indicated when shutdown has been completed.
  233. internal func shutdown(mode: ConnectionManager.ShutdownMode) -> EventLoopFuture<Void> {
  234. let promise = self.eventLoop.makePromise(of: Void.self)
  235. if self.eventLoop.inEventLoop {
  236. self._shutdown(mode: mode, promise: promise)
  237. } else {
  238. self.eventLoop.execute {
  239. self._shutdown(mode: mode, promise: promise)
  240. }
  241. }
  242. return promise.futureResult
  243. }
  244. /// See `makeStream(deadline:promise:logger:initializer:)`.
  245. ///
  246. /// - Important: Must be called on the pool's `EventLoop`.
  247. @inlinable
  248. internal func _makeStream(
  249. deadline: NIODeadline,
  250. promise: EventLoopPromise<Channel>,
  251. logger: GRPCLogger,
  252. initializer: @escaping (Channel) -> EventLoopFuture<Void>
  253. ) {
  254. self.eventLoop.assertInEventLoop()
  255. guard case .active = self._state else {
  256. // Fail the promise right away if we're shutting down or already shut down.
  257. promise.fail(ConnectionPoolError.shutdown)
  258. return
  259. }
  260. // Try to make a stream on an existing connection.
  261. let streamCreated = self._tryMakeStream(promise: promise, initializer: initializer)
  262. if !streamCreated {
  263. // No stream was created, wait for one.
  264. self._enqueueWaiter(
  265. deadline: deadline,
  266. promise: promise,
  267. logger: logger,
  268. initializer: initializer
  269. )
  270. }
  271. }
  272. /// Try to find an existing connection on which we can make a stream.
  273. ///
  274. /// - Parameters:
  275. /// - promise: A promise to succeed if we can make a stream.
  276. /// - initializer: A closure to initialize the stream with.
  277. /// - Returns: A boolean value indicating whether the stream was created or not.
  278. @inlinable
  279. internal func _tryMakeStream(
  280. promise: EventLoopPromise<Channel>,
  281. initializer: @escaping (Channel) -> EventLoopFuture<Void>
  282. ) -> Bool {
  283. // We shouldn't jump the queue.
  284. guard self.waiters.isEmpty else {
  285. return false
  286. }
  287. // Reserve a stream, if we can.
  288. guard let multiplexer = self._reserveStreamFromMostAvailableConnection() else {
  289. return false
  290. }
  291. multiplexer.createStreamChannel(promise: promise, initializer)
  292. // Has reserving another stream tipped us over the limit for needing another connection?
  293. if self._shouldBringUpAnotherConnection() {
  294. self._startConnectingIdleConnection()
  295. }
  296. return true
  297. }
  298. /// Enqueue a waiter to be provided with a stream at some point in the future.
  299. ///
  300. /// - Parameters:
  301. /// - deadline: The point in time by which the promise should have been completed.
  302. /// - promise: The promise to complete with the `Channel`.
  303. /// - logger: A logger.
  304. /// - initializer: A closure to initialize the `Channel` with.
  305. @inlinable
  306. internal func _enqueueWaiter(
  307. deadline: NIODeadline,
  308. promise: EventLoopPromise<Channel>,
  309. logger: GRPCLogger,
  310. initializer: @escaping (Channel) -> EventLoopFuture<Void>
  311. ) {
  312. // Don't overwhelm the pool with too many waiters.
  313. guard self.waiters.count < self.maxWaiters else {
  314. logger.trace("connection pool has too many waiters", metadata: [
  315. Metadata.waitersMax: "\(self.maxWaiters)",
  316. ])
  317. promise.fail(ConnectionPoolError.tooManyWaiters(connectionError: self._mostRecentError))
  318. return
  319. }
  320. let waiter = Waiter(deadline: deadline, promise: promise, channelInitializer: initializer)
  321. // Fail the waiter and punt it from the queue when it times out. It's okay that we schedule the
  322. // timeout before appending it to the waiters, it wont run until the next event loop tick at the
  323. // earliest (even if the deadline has already passed).
  324. waiter.scheduleTimeout(on: self.eventLoop) {
  325. waiter.fail(ConnectionPoolError.deadlineExceeded(connectionError: self._mostRecentError))
  326. if let index = self.waiters.firstIndex(where: { $0.id == waiter.id }) {
  327. self.waiters.remove(at: index)
  328. logger.trace("timed out waiting for a connection", metadata: [
  329. Metadata.waiterID: "\(waiter.id)",
  330. Metadata.waitersCount: "\(self.waiters.count)",
  331. ])
  332. }
  333. }
  334. // request logger
  335. logger.debug("waiting for a connection to become available", metadata: [
  336. Metadata.waiterID: "\(waiter.id)",
  337. Metadata.waitersCount: "\(self.waiters.count)",
  338. ])
  339. self.waiters.append(waiter)
  340. // pool logger
  341. self.logger.trace("enqueued connection waiter", metadata: [
  342. Metadata.waitersCount: "\(self.waiters.count)",
  343. ])
  344. if self._shouldBringUpAnotherConnection() {
  345. self._startConnectingIdleConnection()
  346. }
  347. }
  348. /// Compute the current demand and capacity for streams.
  349. ///
  350. /// The 'demand' for streams is the number of reserved streams and the number of waiters. The
  351. /// capacity for streams is the product of max concurrent streams and the number of non-idle
  352. /// connections.
  353. ///
  354. /// - Returns: A tuple of the demand and capacity for streams.
  355. @usableFromInline
  356. internal func _computeStreamDemandAndCapacity() -> (demand: Int, capacity: Int) {
  357. let demand = self.sync.reservedStreams + self.sync.waiters
  358. // TODO: make this cheaper by storing and incrementally updating the number of idle connections
  359. let capacity = self._connections.values.reduce(0) { sum, state in
  360. if state.manager.sync.isIdle || state.isQuiescing {
  361. // Idle connection or quiescing (so the capacity should be ignored).
  362. return sum
  363. } else if let knownMaxAvailableStreams = state.maxAvailableStreams {
  364. // A known value of max concurrent streams, i.e. the connection is active.
  365. return sum + knownMaxAvailableStreams
  366. } else {
  367. // Not idle and no known value, the connection must be connecting so use our assumed value.
  368. return sum + self.assumedMaxConcurrentStreams
  369. }
  370. }
  371. return (demand, capacity)
  372. }
  373. /// Returns whether the pool should start connecting an idle connection (if one exists).
  374. @usableFromInline
  375. internal func _shouldBringUpAnotherConnection() -> Bool {
  376. let (demand, capacity) = self._computeStreamDemandAndCapacity()
  377. // Infinite -- i.e. all connections are idle or no connections exist -- is okay here as it
  378. // will always be greater than the threshold and a new connection will be spun up.
  379. let load = Double(demand) / Double(capacity)
  380. let loadExceedsThreshold = load >= self.reservationLoadThreshold
  381. if loadExceedsThreshold {
  382. self.logger.debug(
  383. "stream reservation load factor greater than or equal to threshold, bringing up additional connection if available",
  384. metadata: [
  385. Metadata.reservationsCount: "\(demand)",
  386. Metadata.reservationsCapacity: "\(capacity)",
  387. Metadata.reservationsLoad: "\(load)",
  388. Metadata.reservationsLoadThreshold: "\(self.reservationLoadThreshold)",
  389. ]
  390. )
  391. }
  392. return loadExceedsThreshold
  393. }
  394. /// Starts connecting an idle connection, if one exists.
  395. @usableFromInline
  396. internal func _startConnectingIdleConnection() {
  397. if let index = self._connections.values.firstIndex(where: { $0.manager.sync.isIdle }) {
  398. self._connections.values[index].manager.sync.startConnecting()
  399. }
  400. }
  401. /// Returns the index in `self.connections.values` of the connection with the most available
  402. /// streams. Returns `self.connections.endIndex` if no connection has at least one stream
  403. /// available.
  404. ///
  405. /// - Note: this is linear in the number of connections.
  406. @usableFromInline
  407. internal func _mostAvailableConnectionIndex(
  408. ) -> Dictionary<ConnectionManagerID, PerConnectionState>.Index {
  409. var index = self._connections.values.startIndex
  410. var selectedIndex = self._connections.values.endIndex
  411. var mostAvailableStreams = 0
  412. while index != self._connections.values.endIndex {
  413. let availableStreams = self._connections.values[index].availableStreams
  414. if availableStreams > mostAvailableStreams {
  415. mostAvailableStreams = availableStreams
  416. selectedIndex = index
  417. }
  418. self._connections.values.formIndex(after: &index)
  419. }
  420. return selectedIndex
  421. }
  422. /// Reserves a stream from the connection with the most available streams, if one exists.
  423. ///
  424. /// - Returns: The `HTTP2StreamMultiplexer` from the connection the stream was reserved from,
  425. /// or `nil` if no stream could be reserved.
  426. @usableFromInline
  427. internal func _reserveStreamFromMostAvailableConnection() -> HTTP2StreamMultiplexer? {
  428. let index = self._mostAvailableConnectionIndex()
  429. if index != self._connections.endIndex {
  430. // '!' is okay here; the most available connection must have at least one stream available
  431. // to reserve.
  432. return self._connections.values[index].reserveStream()!
  433. } else {
  434. return nil
  435. }
  436. }
  437. /// See `shutdown(mode:)`.
  438. ///
  439. /// - Parameter promise: A `promise` to complete when the pool has been shutdown.
  440. @usableFromInline
  441. internal func _shutdown(mode: ConnectionManager.ShutdownMode, promise: EventLoopPromise<Void>) {
  442. self.eventLoop.assertInEventLoop()
  443. switch self._state {
  444. case .active:
  445. self.logger.debug("shutting down connection pool")
  446. // We're shutting down now and when that's done we'll be fully shutdown.
  447. self._state = .shuttingDown(promise.futureResult)
  448. promise.futureResult.whenComplete { _ in
  449. self._state = .shutdown
  450. self.delegate = nil
  451. self.logger.trace("finished shutting down connection pool")
  452. }
  453. // Shutdown all the connections and remove them from the pool.
  454. let connections = self._connections
  455. self._connections.removeAll()
  456. let allShutdown: [EventLoopFuture<Void>] = connections.values.map {
  457. let id = $0.manager.id
  458. let manager = $0.manager
  459. return manager.eventLoop.flatSubmit {
  460. // If the connection was idle/shutdown before calling shutdown then we shouldn't tell
  461. // the delegate the connection closed (because it either never connected or was already
  462. // informed about this).
  463. let connectionIsInactive = manager.sync.isIdle || manager.sync.isShutdown
  464. return manager.shutdown(mode: mode).always { _ in
  465. if !connectionIsInactive {
  466. self.delegate?.connectionClosed(id: .init(id), error: nil)
  467. }
  468. self.delegate?.connectionRemoved(id: .init(id))
  469. }
  470. }
  471. }
  472. // Fail the outstanding waiters.
  473. while let waiter = self.waiters.popFirst() {
  474. waiter.fail(ConnectionPoolError.shutdown)
  475. }
  476. // Cascade the result of the shutdown into the promise.
  477. EventLoopFuture.andAllSucceed(allShutdown, promise: promise)
  478. case let .shuttingDown(future):
  479. // We're already shutting down, cascade the result.
  480. promise.completeWith(future)
  481. case .shutdown:
  482. // Already shutdown, fine.
  483. promise.succeed(())
  484. }
  485. }
  486. }
  487. extension ConnectionPool: ConnectionManagerConnectivityDelegate {
  488. // We're interested in a few different situations here:
  489. //
  490. // 1. The connection was usable ('ready') and is no longer usable (either it became idle or
  491. // encountered an error. If this happens we need to notify any connections of the change as
  492. // they may no longer be used for new RPCs.
  493. // 2. The connection was not usable but moved to a different unusable state. If this happens and
  494. // we know the cause of the state transition (i.e. the error) then we need to update our most
  495. // recent error with the error. This information is used when failing waiters to provide some
  496. // context as to why they may be failing.
  497. func connectionStateDidChange(
  498. _ manager: ConnectionManager,
  499. from oldState: _ConnectivityState,
  500. to newState: _ConnectivityState
  501. ) {
  502. switch (oldState, newState) {
  503. case let (.ready, .transientFailure(error)),
  504. let (.ready, .idle(.some(error))):
  505. self.updateMostRecentError(error)
  506. self.connectionUnavailable(manager.id)
  507. case (.ready, .idle(.none)),
  508. (.ready, .shutdown):
  509. self.connectionUnavailable(manager.id)
  510. case let (_, .transientFailure(error)),
  511. let (_, .idle(.some(error))):
  512. self.updateMostRecentError(error)
  513. default:
  514. ()
  515. }
  516. guard let delegate = self.delegate else { return }
  517. switch (oldState, newState) {
  518. case (.idle, .connecting),
  519. (.transientFailure, .connecting):
  520. delegate.startedConnecting(id: .init(manager.id))
  521. case (.connecting, .ready):
  522. // The connection becoming ready is handled by 'receivedSettingsMaxConcurrentStreams'.
  523. ()
  524. case (.ready, .idle):
  525. delegate.connectionClosed(id: .init(manager.id), error: nil)
  526. case let (.ready, .transientFailure(error)):
  527. delegate.connectionClosed(id: .init(manager.id), error: error)
  528. case let (.connecting, .transientFailure(error)):
  529. delegate.connectFailed(id: .init(manager.id), error: error)
  530. default:
  531. ()
  532. }
  533. }
  534. func connectionIsQuiescing(_ manager: ConnectionManager) {
  535. self.eventLoop.assertInEventLoop()
  536. // Find the relevant connection.
  537. guard let index = self._connections.index(forKey: manager.id) else {
  538. return
  539. }
  540. // Drop the connectivity delegate, we're no longer interested in its events now.
  541. manager.sync.connectivityDelegate = nil
  542. // Started quiescing; update our state and notify the pool delegate.
  543. self._connections.values[index].isQuiescing = true
  544. self.delegate?.connectionQuiescing(id: .init(manager.id))
  545. // As the connection is quescing, we need to know when the current connection its managing has
  546. // closed. When that happens drop the H2 delegate and update the pool delegate.
  547. manager.onCurrentConnectionClose { hadActiveConnection in
  548. assert(hadActiveConnection)
  549. if let removed = self._connections.removeValue(forKey: manager.id) {
  550. removed.manager.sync.http2Delegate = nil
  551. self.delegate?.connectionClosed(id: .init(removed.manager.id), error: nil)
  552. self.delegate?.connectionRemoved(id: .init(removed.manager.id))
  553. }
  554. }
  555. // Grab the number of reserved streams (before invalidating the index by adding a connection).
  556. let reservedStreams = self._connections.values[index].reservedStreams
  557. // Replace the connection with a new idle one.
  558. self.addConnectionToPool()
  559. // Since we're removing this connection from the pool (and no new streams can be created on
  560. // the connection), the pool manager can ignore any streams reserved against this connection.
  561. // We do still care about the number of reserved streams for the connection though
  562. //
  563. // Note: we don't need to adjust the number of available streams as the effective number of
  564. // connections hasn't changed.
  565. self.streamLender.returnStreams(reservedStreams, to: self)
  566. }
  567. private func updateMostRecentError(_ error: Error) {
  568. self.eventLoop.assertInEventLoop()
  569. // Update the last known error if there is one. We will use it to provide some context to
  570. // waiters which may fail.
  571. self._mostRecentError = error
  572. }
  573. /// A connection has become unavailable.
  574. private func connectionUnavailable(_ id: ConnectionManagerID) {
  575. self.eventLoop.assertInEventLoop()
  576. // The connection is no longer available: any streams which haven't been closed will be counted
  577. // as a dropped reservation, we need to tell the pool manager about them.
  578. if let droppedReservations = self._connections[id]?.unavailable(), droppedReservations > 0 {
  579. self.streamLender.returnStreams(droppedReservations, to: self)
  580. }
  581. }
  582. }
  583. extension ConnectionPool: ConnectionManagerHTTP2Delegate {
  584. internal func streamOpened(_ manager: ConnectionManager) {
  585. self.eventLoop.assertInEventLoop()
  586. if let utilization = self._connections[manager.id]?.openedStream(),
  587. let delegate = self.delegate {
  588. delegate.connectionUtilizationChanged(
  589. id: .init(manager.id),
  590. streamsUsed: utilization.used,
  591. streamCapacity: utilization.capacity
  592. )
  593. }
  594. }
  595. internal func streamClosed(_ manager: ConnectionManager) {
  596. self.eventLoop.assertInEventLoop()
  597. guard let index = self._connections.index(forKey: manager.id) else {
  598. return
  599. }
  600. // Return the stream the connection and to the pool manager.
  601. if let utilization = self._connections.values[index].returnStream(),
  602. let delegate = self.delegate {
  603. delegate.connectionUtilizationChanged(
  604. id: .init(manager.id),
  605. streamsUsed: utilization.used,
  606. streamCapacity: utilization.capacity
  607. )
  608. }
  609. // Don't return the stream to the pool manager if the connection is quescing, they were returned
  610. // when the connection started quiescing.
  611. if !self._connections.values[index].isQuiescing {
  612. self.streamLender.returnStreams(1, to: self)
  613. // A stream was returned: we may be able to service a waiter now.
  614. self.tryServiceWaiters()
  615. }
  616. }
  617. internal func receivedSettingsMaxConcurrentStreams(
  618. _ manager: ConnectionManager,
  619. maxConcurrentStreams: Int
  620. ) {
  621. self.eventLoop.assertInEventLoop()
  622. // Find the relevant connection.
  623. guard let index = self._connections.index(forKey: manager.id) else {
  624. return
  625. }
  626. // When the connection is quiescing, the pool manager is not interested in updates to the
  627. // connection, bail out early.
  628. if self._connections.values[index].isQuiescing {
  629. return
  630. }
  631. // If we received a SETTINGS update then a connection is okay: drop the last known error.
  632. self._mostRecentError = nil
  633. let previous = self._connections.values[index].updateMaxConcurrentStreams(maxConcurrentStreams)
  634. let delta: Int
  635. if let previousValue = previous {
  636. // There was a previous value of max concurrent streams, i.e. a change in value for an
  637. // existing connection.
  638. delta = maxConcurrentStreams - previousValue
  639. } else {
  640. // There was no previous value so this must be a new connection. We'll compare against our
  641. // assumed default.
  642. delta = maxConcurrentStreams - self.assumedMaxConcurrentStreams
  643. // Notify the delegate.
  644. self.delegate?.connectSucceeded(id: .init(manager.id), streamCapacity: maxConcurrentStreams)
  645. }
  646. if delta != 0 {
  647. self.streamLender.changeStreamCapacity(by: delta, for: self)
  648. }
  649. // We always check, even if `delta` isn't greater than zero as this might be a new connection.
  650. self.tryServiceWaiters()
  651. }
  652. }
  653. extension ConnectionPool {
  654. // MARK: - Waiters
  655. /// Try to service as many waiters as possible.
  656. ///
  657. /// This an expensive operation, in the worst case it will be `O(W ⨉ N)` where `W` is the number
  658. /// of waiters and `N` is the number of connections.
  659. private func tryServiceWaiters() {
  660. if self.waiters.isEmpty { return }
  661. self.logger.trace("servicing waiters", metadata: [
  662. Metadata.waitersCount: "\(self.waiters.count)",
  663. ])
  664. let now = self.now()
  665. var serviced = 0
  666. while !self.waiters.isEmpty {
  667. if self.waiters.first!.deadlineIsAfter(now) {
  668. if let multiplexer = self._reserveStreamFromMostAvailableConnection() {
  669. // The waiter's deadline is in the future, and we have a suitable connection. Remove and
  670. // succeed the waiter.
  671. let waiter = self.waiters.removeFirst()
  672. serviced &+= 1
  673. waiter.succeed(with: multiplexer)
  674. } else {
  675. // There are waiters but no available connections, we're done.
  676. break
  677. }
  678. } else {
  679. // The waiter's deadline has already expired, there's no point completing it. Remove it and
  680. // let its scheduled timeout fail the promise.
  681. self.waiters.removeFirst()
  682. }
  683. }
  684. self.logger.trace("done servicing waiters", metadata: [
  685. Metadata.waitersCount: "\(self.waiters.count)",
  686. Metadata.waitersServiced: "\(serviced)",
  687. ])
  688. }
  689. }
  690. extension ConnectionPool {
  691. /// Synchronous operations for the pool, mostly used by tests.
  692. internal struct Sync {
  693. private let pool: ConnectionPool
  694. fileprivate init(_ pool: ConnectionPool) {
  695. self.pool = pool
  696. }
  697. /// The number of outstanding connection waiters.
  698. internal var waiters: Int {
  699. self.pool.eventLoop.assertInEventLoop()
  700. return self.pool.waiters.count
  701. }
  702. /// The number of connection currently in the pool (in any state).
  703. internal var connections: Int {
  704. self.pool.eventLoop.assertInEventLoop()
  705. return self.pool._connections.count
  706. }
  707. /// The number of idle connections in the pool.
  708. internal var idleConnections: Int {
  709. self.pool.eventLoop.assertInEventLoop()
  710. return self.pool._connections.values.reduce(0) { $0 &+ ($1.manager.sync.isIdle ? 1 : 0) }
  711. }
  712. /// The number of streams currently available to reserve across all connections in the pool.
  713. internal var availableStreams: Int {
  714. self.pool.eventLoop.assertInEventLoop()
  715. return self.pool._connections.values.reduce(0) { $0 + $1.availableStreams }
  716. }
  717. /// The number of streams which have been reserved across all connections in the pool.
  718. internal var reservedStreams: Int {
  719. self.pool.eventLoop.assertInEventLoop()
  720. return self.pool._connections.values.reduce(0) { $0 + $1.reservedStreams }
  721. }
  722. }
  723. internal var sync: Sync {
  724. return Sync(self)
  725. }
  726. }
  727. @usableFromInline
  728. internal enum ConnectionPoolError: Error {
  729. /// The pool is shutdown or shutting down.
  730. case shutdown
  731. /// There are too many waiters in the pool.
  732. case tooManyWaiters(connectionError: Error?)
  733. /// The deadline for creating a stream has passed.
  734. case deadlineExceeded(connectionError: Error?)
  735. }
  736. extension ConnectionPoolError: GRPCStatusTransformable {
  737. @usableFromInline
  738. internal func makeGRPCStatus() -> GRPCStatus {
  739. switch self {
  740. case .shutdown:
  741. return GRPCStatus(
  742. code: .unavailable,
  743. message: "The connection pool is shutdown"
  744. )
  745. case let .tooManyWaiters(error):
  746. return GRPCStatus(
  747. code: .resourceExhausted,
  748. message: "The connection pool has no capacity for new RPCs or RPC waiters",
  749. cause: error
  750. )
  751. case let .deadlineExceeded(error):
  752. return GRPCStatus(
  753. code: .deadlineExceeded,
  754. message: "Timed out waiting for an HTTP/2 stream from the connection pool",
  755. cause: error
  756. )
  757. }
  758. }
  759. }