1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
use std::future::{poll_fn, Future, IntoFuture};
use std::io::Error;
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::pin::Pin;
use std::task::Poll;
use std::thread;

use tokio::net::TcpStream;
use tokio::sync::mpsc::error::TrySendError;
use tokio::task::{JoinError, JoinSet, LocalSet};

use crate::connection::ConnectionInfo;
use crate::server::configuration::ServerConfiguration;
use crate::server::worker::{ConnectionMessage, Worker, WorkerHandle};

use super::{IncomingStream, ShutdownMode};

/// A handle to a running [`Server`](super::Server).
///
/// # Example: waiting for the server to shut down
///
/// You can just `.await` the [`ServerHandle`] to wait for the server to shut down:
///
/// ```rust
/// use std::net::SocketAddr;
/// use pavex::server::Server;
///
/// # #[derive(Clone)] struct ApplicationState;
/// # async fn router(_req: hyper::Request<hyper::body::Incoming>, _conn_info: Option<pavex::connection::ConnectionInfo>, _state: ApplicationState) -> pavex::response::Response { todo!() }
/// # async fn t() -> std::io::Result<()> {
/// # let application_state = ApplicationState;
/// let addr = SocketAddr::from(([127, 0, 0, 1], 8080));
///
/// let server_handle = Server::new()
///     .bind(addr)
///     .await?
///     .serve(router, application_state);
/// // Wait until the server shuts down.
/// server_handle.await;
/// # Ok(())
/// # }
/// ```
#[derive(Clone)]
pub struct ServerHandle {
    command_outbox: tokio::sync::mpsc::Sender<ServerCommand>,
}

impl ServerHandle {
    pub(super) fn new<HandlerFuture, ApplicationState>(
        config: ServerConfiguration,
        incoming: Vec<IncomingStream>,
        handler: fn(
            http::Request<hyper::body::Incoming>,
            Option<ConnectionInfo>,
            ApplicationState,
        ) -> HandlerFuture,
        application_state: ApplicationState,
    ) -> Self
    where
        HandlerFuture: Future<Output = crate::response::Response> + 'static,
        ApplicationState: Clone + Send + Sync + 'static,
    {
        let (command_outbox, command_inbox) = tokio::sync::mpsc::channel(32);
        let acceptor = Acceptor::new(config, incoming, handler, application_state, command_inbox);
        let _ = acceptor.spawn();
        Self { command_outbox }
    }

    /// Instruct the [`Server`](super::Server) to stop accepting new connections.
    #[doc(alias("stop"))]
    pub async fn shutdown(self, mode: ShutdownMode) {
        let (completion_notifier, completion) = tokio::sync::oneshot::channel();
        if self
            .command_outbox
            .send(ServerCommand::Shutdown {
                completion_notifier,
                mode,
            })
            .await
            .is_ok()
        {
            // What if sending fails?
            // It only happens if the other end of the channel has already been dropped, which
            // implies that the acceptor thread has already shut down—nothing to do!
            let _ = completion.await;
        }
    }
}

impl IntoFuture for ServerHandle {
    type Output = ();
    type IntoFuture = Pin<Box<dyn Future<Output = ()> + Send + Sync + 'static>>;

    fn into_future(self) -> Self::IntoFuture {
        Box::pin(async move { self.command_outbox.closed().await })
    }
}

enum ServerCommand {
    Shutdown {
        completion_notifier: tokio::sync::oneshot::Sender<()>,
        mode: ShutdownMode,
    },
}

#[must_use]
struct Acceptor<HandlerFuture, ApplicationState> {
    command_inbox: tokio::sync::mpsc::Receiver<ServerCommand>,
    incoming: Vec<IncomingStream>,
    worker_handles: Vec<WorkerHandle>,
    #[allow(dead_code)]
    config: ServerConfiguration,
    next_worker: usize,
    max_queue_length: usize,
    handler: fn(
        http::Request<hyper::body::Incoming>,
        Option<ConnectionInfo>,
        ApplicationState,
    ) -> HandlerFuture,
    application_state: ApplicationState,
    // We use a `fn() -> HandlerFuture` instead of a `HandlerFuture` because we need `Acceptor`
    // to be `Send` and `Sync`. That wouldn't work with `PhantomData<HandlerFuture>`.
    // In the end, we just need to stash the generic type *somewhere*.
    handler_output_future: PhantomData<fn() -> HandlerFuture>,
}

enum AcceptorInboxMessage {
    ServerCommand(ServerCommand),
    Connection(Option<Result<(IncomingStream, TcpStream, SocketAddr), JoinError>>),
}

impl<HandlerFuture, ApplicationState> Acceptor<HandlerFuture, ApplicationState>
where
    HandlerFuture: Future<Output = crate::response::Response> + 'static,
    ApplicationState: Clone + Send + Sync + 'static,
{
    fn new(
        config: ServerConfiguration,
        incoming: Vec<IncomingStream>,
        handler: fn(
            http::Request<hyper::body::Incoming>,
            Option<ConnectionInfo>,
            ApplicationState,
        ) -> HandlerFuture,
        application_state: ApplicationState,
        command_inbox: tokio::sync::mpsc::Receiver<ServerCommand>,
    ) -> Self {
        // TODO: make this configurable
        let max_queue_length = 15;
        let n_workers = config.n_workers.get();
        let mut worker_handles = Vec::with_capacity(n_workers);
        for i in 0..n_workers {
            let (worker, handle) =
                Worker::new(i, max_queue_length, handler, application_state.clone());
            worker_handles.push(handle);
            // TODO: should we panic here?
            worker.spawn().expect("Failed to spawn worker thread");
        }
        Self {
            command_inbox,
            incoming,
            worker_handles,
            config,
            max_queue_length,
            handler,
            handler_output_future: Default::default(),
            next_worker: 0,
            application_state,
        }
    }

    /// Run the acceptor: accept incoming connections and dispatch them to workers.
    ///
    /// Constraint: this method **must not panic**.
    async fn run(self) {
        /// Accept a connection from the given [`IncomingStream`].
        /// If accepting a certain connection fails, log the error and keep trying with the next connection.
        async fn accept_connection(
            incoming: IncomingStream,
        ) -> (IncomingStream, TcpStream, SocketAddr) {
            #[allow(deprecated)]
            // This has been inlined from `tokio`'s codebase, since it's not public API.
            fn is_rt_shutdown_err(err: &Error) -> bool {
                const RT_SHUTDOWN_ERR: &str =
                    "A Tokio 1.x context was found, but it is being shutdown.";
                if err.kind() != std::io::ErrorKind::Other {
                    return false;
                }
                let Some(inner) = err.get_ref() else {
                    return false;
                };
                // Using `Error::description()` is more efficient than `format!("{inner}")`,
                // so we use it here even if it is deprecated.
                inner.source().is_none() && inner.description() == RT_SHUTDOWN_ERR
            }

            loop {
                match incoming.accept().await {
                    Ok((connection, remote_peer)) => return (incoming, connection, remote_peer),
                    Err(e) => {
                        if is_rt_shutdown_err(&e) {
                            tracing::debug!(error.msg = %e, error.details = ?e, "Failed to accept connection");
                        } else {
                            tracing::error!(error.msg = %e, error.details = ?e, "Failed to accept connection");
                        }
                        continue;
                    }
                }
            }
        }

        let Self {
            mut command_inbox,
            mut next_worker,
            mut worker_handles,
            incoming,
            config: _,
            max_queue_length,
            handler,
            application_state,
            handler_output_future: _,
        } = self;

        let n_workers = worker_handles.len();

        let mut incoming_join_set = JoinSet::new();
        for incoming in incoming.into_iter() {
            incoming_join_set.spawn(accept_connection(incoming));
        }

        let error = 'event_loop: loop {
            // Check if there is work to be done.
            let message =
                poll_fn(|cx| Self::poll_inboxes(cx, &mut command_inbox, &mut incoming_join_set))
                    .await;
            match message {
                AcceptorInboxMessage::ServerCommand(command) => match command {
                    ServerCommand::Shutdown {
                        completion_notifier,
                        mode,
                    } => {
                        Self::shutdown(
                            completion_notifier,
                            mode,
                            incoming_join_set,
                            worker_handles,
                        )
                        .await;
                        return;
                    }
                },
                AcceptorInboxMessage::Connection(msg) => {
                    let (incoming, connection, remote_peer) = match msg {
                        Some(Ok((incoming, connection, remote_peer))) => {
                            (incoming, connection, remote_peer)
                        }
                        Some(Err(e)) => {
                            // This only ever happens if we panicked in the task that was accepting
                            // connections or if we somehow cancel it.
                            // Neither of these should ever happen, but we handle the error just in case
                            // to make sure we log the error info if we end up introducing a fatal bug.
                            break 'event_loop e;
                        }
                        None => {
                            // When we succeed in accepting a connection, we always spawn a new task to
                            // accept the next connection from the same socket.
                            // If we fail to accept a connection, we exit the acceptor thread.
                            // Therefore, the JoinSet should never be empty.
                            unreachable!(
                                "The JoinSet for incoming connections cannot ever be empty"
                            )
                        }
                    };
                    // Re-spawn the task to keep accepting connections from the same socket.
                    incoming_join_set.spawn(accept_connection(incoming));

                    // A flag to track if the connection has been successfully sent to a worker.
                    let mut has_been_handled = false;
                    // We try to send the connection to a worker (`ConnectionMessage`).
                    // If the worker's inbox is full, we try the next worker until we find one that can
                    // accept the connection or we've tried all workers.
                    let mut connection_message = ConnectionMessage {
                        connection,
                        peer_addr: remote_peer,
                    };
                    for _ in 0..n_workers {
                        // Track if the worker has crashed.
                        let mut has_crashed: Option<usize> = None;
                        let worker_handle = &worker_handles[next_worker];
                        if let Err(e) = worker_handle.dispatch(connection_message) {
                            connection_message = match e {
                                TrySendError::Full(message) => message,
                                // A closed channel implies that the worker thread is no longer running,
                                // therefore we need to restart it.
                                TrySendError::Closed(conn) => {
                                    has_crashed = Some(worker_handle.id());
                                    conn
                                }
                            };
                            next_worker = (next_worker + 1) % n_workers;
                        } else {
                            // We've successfully sent the connection to a worker, so we can stop trying
                            // to send it to other workers.
                            has_been_handled = true;
                            break;
                        }

                        // Restart the crashed worker thread.
                        if let Some(worker_id) = has_crashed {
                            tracing::warn!(worker_id = worker_id, "Worker crashed, restarting it");
                            let (worker, worker_handle) = Worker::new(
                                worker_id,
                                max_queue_length,
                                handler,
                                application_state.clone(),
                            );
                            // TODO: what if we fail to spawn the worker thread? We don't want to panic here!
                            worker.spawn().expect("Failed to spawn worker thread");
                            worker_handles[worker_id] = worker_handle;
                        }
                    }

                    if !has_been_handled {
                        tracing::error!(
                            remote_peer = %remote_peer,
                            "All workers are busy, dropping connection",
                        );
                    }
                }
            }
        };

        tracing::error!(
            error.msg = %error,
            error.details = ?error,
            "Failed to accept new connections. The acceptor thread will exit now."
        );
    }

    /// Check if there is work to be done.
    fn poll_inboxes(
        cx: &mut std::task::Context<'_>,
        server_command_inbox: &mut tokio::sync::mpsc::Receiver<ServerCommand>,
        incoming_join_set: &mut JoinSet<(IncomingStream, TcpStream, SocketAddr)>,
    ) -> Poll<AcceptorInboxMessage> {
        // Order matters here: we want to prioritize shutdown messages over incoming connections.
        if let Poll::Ready(Some(message)) = server_command_inbox.poll_recv(cx) {
            return Poll::Ready(AcceptorInboxMessage::ServerCommand(message));
        }
        if let Poll::Ready(message) = incoming_join_set.poll_join_next(cx) {
            return Poll::Ready(AcceptorInboxMessage::Connection(message));
        }
        Poll::Pending
    }

    fn spawn(self) -> thread::JoinHandle<()> {
        thread::Builder::new()
            .name("pavex-acceptor".to_string())
            .spawn(move || {
                let rt = tokio::runtime::Builder::new_current_thread()
                    .enable_all()
                    .build()
                    .expect("Failed to build single-threaded Tokio runtime for acceptor thread");
                LocalSet::new().block_on(&rt, self.run());
            })
            .expect("Failed to spawn acceptor thread")
    }

    async fn shutdown(
        completion_notifier: tokio::sync::oneshot::Sender<()>,
        mode: ShutdownMode,
        incoming_join_set: JoinSet<(IncomingStream, TcpStream, SocketAddr)>,
        worker_handles: Vec<WorkerHandle>,
    ) {
        // This drops the `JoinSet`, which will cause all the tasks that are still running to
        // be cancelled.
        // It will in turn cause the `Incoming` to be dropped, which will cause the `TcpListener`
        // to be dropped, thus closing the socket and stopping acceptance of new connections.
        drop(incoming_join_set);

        let mut shutdown_join_set = JoinSet::new();
        for worker_handle in worker_handles {
            let mode2 = mode.clone();
            // The shutdown command is enqueued immediately, before the future is polled for the
            // first time.
            let future = worker_handle.shutdown(mode2);
            if mode.is_graceful() {
                shutdown_join_set.spawn_local(future);
            }
        }

        if let ShutdownMode::Graceful { timeout } = mode {
            // Wait for all workers to shut down, or for the timeout to expire,
            // whichever happens first.
            let _ = tokio::time::timeout(timeout, async move {
                while shutdown_join_set.join_next().await.is_some() {}
            })
            .await;
        }

        // Notify the caller that the server has shut down.
        let _ = completion_notifier.send(());
    }
}