1
//! Executor for running tests with mocked environment
2
//!
3
//! See [`MockExecutor`]
4

            
5
use std::any::Any;
6
use std::cell::Cell;
7
use std::collections::VecDeque;
8
use std::fmt::{self, Debug, Display};
9
use std::future::Future;
10
use std::io::{self, Write as _};
11
use std::iter;
12
use std::panic::{AssertUnwindSafe, catch_unwind, panic_any};
13
use std::pin::{Pin, pin};
14
use std::sync::{Arc, Mutex, MutexGuard, Weak};
15
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
16

            
17
use futures::FutureExt as _;
18
use futures::pin_mut;
19
use futures::task::{FutureObj, Spawn, SpawnError};
20

            
21
use assert_matches::assert_matches;
22
use educe::Educe;
23
use itertools::Either::{self, *};
24
use itertools::{chain, izip};
25
use slotmap_careful::DenseSlotMap;
26
use std::backtrace::Backtrace;
27
use strum::EnumIter;
28

            
29
// NB: when using traced_test, the trace! and error! output here is generally suppressed
30
// in tests of other crates.  To see it, you can write something like this
31
// (in the dev-dependencies of the crate whose tests you're running):
32
//    tracing-test = { version = "0.2.4", features = ["no-env-filter"] }
33
use tracing::{error, trace};
34

            
35
use oneshot_fused_workaround::{self as oneshot, Canceled};
36
use tor_error::error_report;
37
use tor_rtcompat::{Blocking, ToplevelBlockOn};
38

            
39
use Poll::*;
40
use TaskState::*;
41

            
42
/// Type-erased future, one for each of our (normal) tasks
43
type TaskFuture = FutureObj<'static, ()>;
44

            
45
/// Future for the argument to `block_on`, which is handled specially
46
type MainFuture<'m> = Pin<&'m mut dyn Future<Output = ()>>;
47

            
48
//---------- principal data structures ----------
49

            
50
/// Executor for running tests with mocked environment
51
///
52
/// For test cases which don't actually wait for anything in the real world.
53
///
54
/// This is the executor.
55
/// It implements [`Spawn`] and [`ToplevelBlockOn`]
56
///
57
/// It will usually be used as part of a `MockRuntime`.
58
///
59
/// To run futures, call [`ToplevelBlockOn::block_on`]
60
///
61
/// # Restricted environment
62
///
63
/// Tests run with this executor must not attempt to block
64
/// on anything "outside":
65
/// every future that anything awaits must (eventually) be woken directly
66
/// *by some other task* in the same test case.
67
///
68
/// (By directly we mean that the [`Waker::wake`] call is made
69
/// by that waking future, before that future itself awaits anything.)
70
///
71
/// # Panics
72
///
73
/// The executor will panic
74
/// if the toplevel future (passed to `block_on`)
75
/// doesn't complete (without externally blocking),
76
/// but instead waits for something.
77
///
78
/// The executor will malfunction or panic if reentered.
79
/// (Eg, if `block_on` is reentered.)
80
#[derive(Clone, Default, Educe)]
81
#[educe(Debug)]
82
pub struct MockExecutor {
83
    /// Mutable state
84
    #[educe(Debug(ignore))]
85
    shared: Arc<Shared>,
86
}
87

            
88
/// Shared state and ancillary information
89
///
90
/// This is always within an `Arc`.
91
#[derive(Default)]
92
struct Shared {
93
    /// Shared state
94
    data: Mutex<Data>,
95
    /// Condition variable for thread scheduling
96
    ///
97
    /// Signaled when [`Data.thread_to_run`](struct.Data.html#structfield.thread_to_run)
98
    /// is modified.
99
    thread_condvar: std::sync::Condvar,
100
}
101

            
102
/// Task id, module to hide `Ti` alias
103
mod task_id {
104
    slotmap_careful::new_key_type! {
105
        /// Task ID, usually called `TaskId`
106
        ///
107
        /// Short name in special `task_id` module so that [`Debug`] is nice
108
        pub(super) struct Ti;
109
    }
110
}
111
use task_id::Ti as TaskId;
112

            
113
/// Executor's state
114
///
115
/// ### Task state machine
116
///
117
/// A task is created in `tasks`, `Awake`, so also in `awake`.
118
///
119
/// When we poll it, we take it out of `awake` and set it to `Asleep`,
120
/// and then call `poll()`.
121
/// Any time after that, it can be made `Awake` again (and put back onto `awake`)
122
/// by the waker ([`ActualWaker`], wrapped in [`Waker`]).
123
///
124
/// The task's future is of course also present here in this data structure.
125
/// However, during poll we must release the lock,
126
/// so we cannot borrow the future from `Data`.
127
/// Instead, we move it out.  So `Task.fut` is an `Option`.
128
///
129
/// ### "Main" task - the argument to `block_on`
130
///
131
/// The signature of `BlockOn::block_on` accepts a non-`'static` future
132
/// (and a non-`Send`/`Sync` one).
133
///
134
/// So we cannot store that future in `Data` because `Data` is `'static`.
135
/// Instead, this main task future is passed as an argument down the call stack.
136
/// In the data structure we simply store a placeholder, `TaskFutureInfo::Main`.
137
#[derive(Educe, derive_more::Debug)]
138
#[educe(Default)]
139
struct Data {
140
    /// Tasks
141
    ///
142
    /// Includes tasks spawned with `spawn`,
143
    /// and also the future passed to `block_on`.
144
    #[debug("{:?}", DebugTasks(self, || tasks.keys()))]
145
    tasks: DenseSlotMap<TaskId, Task>,
146

            
147
    /// `awake` lists precisely: tasks that are `Awake`, plus maybe stale `TaskId`s
148
    ///
149
    /// Tasks are pushed onto the *back* when woken,
150
    /// so back is the most recently woken.
151
    #[debug("{:?}", DebugTasks(self, || awake.iter().cloned()))]
152
    awake: VecDeque<TaskId>,
153

            
154
    /// If a future from `progress_until_stalled` exists
155
    progressing_until_stalled: Option<ProgressingUntilStalled>,
156

            
157
    /// Scheduling policy
158
    scheduling: SchedulingPolicy,
159

            
160
    /// (Sub)thread we want to run now
161
    ///
162
    /// At any one time only one thread is meant to be running.
163
    /// Other threads are blocked in condvar wait, waiting for this to change.
164
    ///
165
    /// **Modified only** within
166
    /// [`thread_context_switch_send_instruction_to_run`](Shared::thread_context_switch_send_instruction_to_run),
167
    /// which takes responsibility for preserving the following **invariants**:
168
    ///
169
    ///  1. no-one but the named thread is allowed to modify this field.
170
    ///  2. after modifying this field, signal `thread_condvar`
171
    #[educe(Default(expression = "ThreadDescriptor::Executor"))]
172
    thread_to_run: ThreadDescriptor,
173
}
174

            
175
/// How we should schedule?
176
#[derive(Debug, Clone, Default, EnumIter)]
177
#[non_exhaustive]
178
pub enum SchedulingPolicy {
179
    /// Task *most* recently woken is run
180
    ///
181
    /// This is the default.
182
    ///
183
    /// It will expose starvation bugs if a task never sleeps.
184
    /// (Which is a good thing in tests.)
185
    #[default]
186
    Stack,
187
    /// Task *least* recently woken is run.
188
    Queue,
189
}
190

            
191
/// Record of a single task
192
///
193
/// Tracks a spawned task, or the main task (the argument to `block_on`).
194
///
195
/// Stored in [`Data`]`.tasks`.
196
struct Task {
197
    /// For debugging output
198
    desc: String,
199
    /// Has this been woken via a waker?  (And is it in `Data.awake`?)
200
    ///
201
    /// **Set to `Awake` only by [`Task::set_awake`]**,
202
    /// preserving the invariant that
203
    /// every `Awake` task is in [`Data.awake`](struct.Data.html#structfield.awake).
204
    state: TaskState,
205
    /// The actual future (or a placeholder for it)
206
    ///
207
    /// May be `None` briefly in the executor main loop, because we've
208
    /// temporarily moved it out so we can poll it,
209
    /// or if this is a Subthread task which is currently running sync code
210
    /// (in which case we're blocked in the executor waiting to be
211
    /// woken up by [`thread_context_switch`](Shared::thread_context_switch).
212
    ///
213
    /// Note that the `None` can be observed outside the main loop, because
214
    /// the main loop unlocks while it polls, so other (non-main-loop) code
215
    /// might see it.
216
    fut: Option<TaskFutureInfo>,
217
}
218

            
219
/// A future as stored in our record of a [`Task`]
220
#[derive(Educe)]
221
#[educe(Debug)]
222
enum TaskFutureInfo {
223
    /// The [`Future`].  All is normal.
224
    Normal(#[educe(Debug(ignore))] TaskFuture),
225
    /// The future isn't here because this task is the main future for `block_on`
226
    Main,
227
    /// This task is actually a [`Subthread`](MockExecutor::subthread_spawn)
228
    ///
229
    /// Instead of polling it, we'll switch to it with
230
    /// [`thread_context_switch`](Shared::thread_context_switch).
231
    Subthread,
232
}
233

            
234
/// State of a task - do we think it needs to be polled?
235
///
236
/// Stored in [`Task`]`.state`.
237
#[derive(Debug)]
238
enum TaskState {
239
    /// Awake - needs to be polled
240
    ///
241
    /// Established by [`waker.wake()`](Waker::wake)
242
    Awake,
243
    /// Asleep - does *not* need to be polled
244
    ///
245
    /// Established each time just before we call the future's [`poll`](Future::poll)
246
    Asleep(Vec<SleepLocation>),
247
}
248

            
249
/// Actual implementor of `Wake` for use in a `Waker`
250
///
251
/// Futures (eg, channels from [`futures`]) will use this to wake a task
252
/// when it should be polled.
253
///
254
/// This type must not be `Cloned` with the `Data` lock held.
255
/// Consequently, a `Waker` mustn't either.
256
struct ActualWaker {
257
    /// Executor state
258
    ///
259
    /// The Waker mustn't to hold a strong reference to the executor,
260
    /// since typically a task holds a future that holds a Waker,
261
    /// and the executor holds the task - so that would be a cycle.
262
    data: Weak<Shared>,
263

            
264
    /// Which task this is
265
    id: TaskId,
266
}
267

            
268
/// State used for an in-progress call to
269
/// [`progress_until_stalled`][`MockExecutor::progress_until_stalled`]
270
///
271
/// If present in [`Data`], an (async) call to `progress_until_stalled`
272
/// is in progress.
273
///
274
/// The future from `progress_until_stalled`, [`ProgressUntilStalledFuture`]
275
/// is a normal-ish future.
276
/// It can be polled in the normal way.
277
/// When it is polled, it looks here, in `finished`, to see if it's `Ready`.
278
///
279
/// The future is made ready, and woken (via `waker`),
280
/// by bespoke code in the task executor loop.
281
///
282
/// When `ProgressUntilStalledFuture` (maybe completes and) is dropped,
283
/// its `Drop` impl is used to remove this from `Data.progressing_until_stalled`.
284
#[derive(Debug)]
285
struct ProgressingUntilStalled {
286
    /// Have we, in fact, stalled?
287
    ///
288
    /// Made `Ready` by special code in the executor loop
289
    finished: Poll<()>,
290

            
291
    /// Waker
292
    ///
293
    /// Signalled by special code in the executor loop
294
    waker: Option<Waker>,
295
}
296

            
297
/// Future from
298
/// [`progress_until_stalled`][`MockExecutor::progress_until_stalled`]
299
///
300
/// See [`ProgressingUntilStalled`] for an overview of this aspect of the contraption.
301
///
302
/// Existence of this struct implies `Data.progressing_until_stalled` is `Some`.
303
/// There can only be one at a time.
304
#[derive(Educe)]
305
#[educe(Debug)]
306
struct ProgressUntilStalledFuture {
307
    /// Executor's state; this future's state is in `.progressing_until_stalled`
308
    #[educe(Debug(ignore))]
309
    shared: Arc<Shared>,
310
}
311

            
312
/// Identifies a thread we know about - the executor thread, or a Subthread
313
///
314
/// Not related to `std::thread::ThreadId`.
315
///
316
/// See [`spawn_subthread`](MockExecutor::subthread_spawn) for definition of a Subthread.
317
///
318
/// This being a thread-local and not scoped by which `MockExecutor` we're talking about
319
/// means that we can't cope if there are multiple `MockExecutor`s involved in the same thread.
320
/// That's OK (and documented).
321
#[derive(Copy, Clone, Eq, PartialEq, derive_more::Debug)]
322
enum ThreadDescriptor {
323
    /// Foreign - neither the (running) executor, nor a Subthread
324
    #[debug("FOREIGN")]
325
    Foreign,
326
    /// The executor.
327
    #[debug("Exe")]
328
    Executor,
329
    /// This task, which is a Subthread.
330
    #[debug("{_0:?}")]
331
    Subthread(TaskId),
332
}
333

            
334
/// Marker indicating that this task is a Subthread, not an async task.
335
///
336
/// See [`spawn_subthread`](MockExecutor::subthread_spawn) for definition of a Subthread.
337
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
338
struct IsSubthread;
339

            
340
/// [`Shared::subthread_yield`] should set our task awake before switching to the executor
341
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
342
struct SetAwake;
343

            
344
thread_local! {
345
    /// Identifies this thread.
346
    pub static THREAD_DESCRIPTOR: Cell<ThreadDescriptor> = const {
347
        Cell::new(ThreadDescriptor::Foreign)
348
    };
349
}
350

            
351
//---------- creation ----------
352

            
353
impl MockExecutor {
354
    /// Make a `MockExecutor` with default parameters
355
4
    pub fn new() -> Self {
356
4
        Self::default()
357
4
    }
358

            
359
    /// Make a `MockExecutor` with a specific `SchedulingPolicy`
360
11737
    pub fn with_scheduling(scheduling: SchedulingPolicy) -> Self {
361
11737
        Data {
362
11737
            scheduling,
363
11737
            ..Default::default()
364
11737
        }
365
11737
        .into()
366
11737
    }
367
}
368

            
369
impl From<Data> for MockExecutor {
370
11737
    fn from(data: Data) -> MockExecutor {
371
11737
        let shared = Shared {
372
11737
            data: Mutex::new(data),
373
11737
            thread_condvar: std::sync::Condvar::new(),
374
11737
        };
375
11737
        MockExecutor {
376
11737
            shared: Arc::new(shared),
377
11737
        }
378
11737
    }
379
}
380

            
381
//---------- spawning ----------
382

            
383
impl MockExecutor {
384
    /// Spawn a task and return something to identify it
385
    ///
386
    /// `desc` should `Display` as some kind of short string (ideally without spaces)
387
    /// and will be used in the `Debug` impl and trace log messages from `MockExecutor`.
388
    ///
389
    /// The returned value is an opaque task identifier which is very cheap to clone
390
    /// and which can be used by the caller in debug logging,
391
    /// if it's desired to correlate with the debug output from `MockExecutor`.
392
    /// Most callers will want to ignore it.
393
    ///
394
    /// This method is infallible.  (The `MockExecutor` cannot be shut down.)
395
254
    pub fn spawn_identified(
396
254
        &self,
397
254
        desc: impl Display,
398
254
        fut: impl Future<Output = ()> + Send + 'static,
399
254
    ) -> impl Debug + Clone + Send + 'static {
400
254
        self.spawn_internal(desc.to_string(), FutureObj::from(Box::new(fut)))
401
254
    }
402

            
403
    /// Spawn a task and return its output for further usage
404
    ///
405
    /// `desc` should `Display` as some kind of short string (ideally without spaces)
406
    /// and will be used in the `Debug` impl and trace log messages from `MockExecutor`.
407
80
    pub fn spawn_join<T: Debug + Send + 'static>(
408
80
        &self,
409
80
        desc: impl Display,
410
80
        fut: impl Future<Output = T> + Send + 'static,
411
80
    ) -> impl Future<Output = T> {
412
80
        let (tx, rx) = oneshot::channel();
413
80
        self.spawn_identified(desc, async move {
414
80
            let res = fut.await;
415
80
            tx.send(res)
416
80
                .expect("Failed to send future's output, did future panic?");
417
80
        });
418
80
        rx.map(|m| m.expect("Failed to receive future's output"))
419
80
    }
420

            
421
    /// Spawn a task and return its `TaskId`
422
    ///
423
    /// Convenience method for use by `spawn_identified` and `spawn_obj`.
424
    /// The future passed to `block_on` is not handled here.
425
87408
    fn spawn_internal(&self, desc: String, fut: TaskFuture) -> TaskId {
426
87408
        let mut data = self.shared.lock();
427
87408
        data.insert_task(desc, TaskFutureInfo::Normal(fut))
428
87408
    }
429
}
430

            
431
impl Data {
432
    /// Insert a task given its `TaskFutureInfo` and return its `TaskId`.
433
101234
    fn insert_task(&mut self, desc: String, fut: TaskFutureInfo) -> TaskId {
434
101234
        let state = Awake;
435
101234
        let id = self.tasks.insert(Task {
436
101234
            state,
437
101234
            desc,
438
101234
            fut: Some(fut),
439
101234
        });
440
101234
        self.awake.push_back(id);
441
101234
        trace!("MockExecutor spawned {:?}={:?}", id, self.tasks[id]);
442
101234
        id
443
101234
    }
444
}
445

            
446
impl Spawn for MockExecutor {
447
82309
    fn spawn_obj(&self, future: TaskFuture) -> Result<(), SpawnError> {
448
82309
        self.spawn_internal("spawn_obj".into(), future);
449
82309
        Ok(())
450
82309
    }
451
}
452

            
453
impl Blocking for MockExecutor {
454
    type ThreadHandle<T: Send + 'static> = Pin<Box<dyn Future<Output = T>>>;
455

            
456
60
    fn spawn_blocking<F, T>(&self, f: F) -> Self::ThreadHandle<T>
457
60
    where
458
60
        F: FnOnce() -> T + Send + 'static,
459
60
        T: Send + 'static,
460
    {
461
        assert_matches!(
462
60
            THREAD_DESCRIPTOR.get(),
463
            ThreadDescriptor::Executor | ThreadDescriptor::Subthread(_),
464
            "MockExecutor::spawn_blocking_io only allowed from future or subthread, being run by this executor"
465
        );
466
60
        Box::pin(
467
60
            self.subthread_spawn("spawn_blocking", f)
468
60
                .map(|x| x.expect("Error in spawn_blocking subthread.")),
469
        )
470
60
    }
471

            
472
1804
    fn reenter_block_on<F>(&self, future: F) -> F::Output
473
1804
    where
474
1804
        F: Future,
475
1804
        F::Output: Send + 'static,
476
    {
477
1804
        self.subthread_block_on_future(future)
478
1804
    }
479
}
480

            
481
//---------- block_on ----------
482

            
483
impl ToplevelBlockOn for MockExecutor {
484
498
    fn block_on<F>(&self, input_fut: F) -> F::Output
485
498
    where
486
498
        F: Future,
487
    {
488
498
        let mut value: Option<F::Output> = None;
489

            
490
        // Box this just so that we can conveniently control precisely when it's dropped.
491
        // (We could do this with Option and Pin::set but that seems clumsier.)
492
498
        let mut input_fut = Box::pin(input_fut);
493

            
494
498
        let run_store_fut = {
495
498
            let value = &mut value;
496
498
            let input_fut = &mut input_fut;
497
498
            async {
498
498
                trace!("MockExecutor block_on future...");
499
498
                let t = input_fut.await;
500
498
                trace!("MockExecutor block_on future returned...");
501
498
                *value = Some(t);
502
498
                trace!("MockExecutor block_on future exiting.");
503
498
            }
504
        };
505

            
506
        {
507
498
            pin_mut!(run_store_fut);
508

            
509
498
            let main_id = self
510
498
                .shared
511
498
                .lock()
512
498
                .insert_task("main".into(), TaskFutureInfo::Main);
513
498
            trace!("MockExecutor {main_id:?} is task for block_on");
514
498
            self.execute_to_completion(run_store_fut);
515
        }
516

            
517
        #[allow(clippy::let_and_return)] // clarity
518
498
        let value = value.take().unwrap_or_else(|| {
519
            // eprintln can be captured by libtest, but the debug_dump goes to io::stderr.
520
            // use the latter, so that the debug dump is prefixed by this message.
521
            let _: io::Result<()> = writeln!(io::stderr(), "all futures blocked, crashing...");
522
            // write to tracing too, so the tracing log is clear about when we crashed
523
            error!("all futures blocked, crashing...");
524

            
525
            // Sequencing here is subtle.
526
            //
527
            // We should do the dump before dropping the input future, because the input
528
            // future is likely to own things that, when dropped, wake up other tasks,
529
            // rendering the dump inaccurate.
530
            //
531
            // But also, dropping the input future may well drop a ProgressUntilStalledFuture
532
            // which then reenters us.  More generally, we mustn't call user code
533
            // with the lock held.
534
            //
535
            // And, we mustn't panic with the data lock held.
536
            //
537
            // If value was Some, then this closure is dropped without being called,
538
            // which drops the future after it has yielded the value, which is correct.
539
            {
540
                let mut data = self.shared.lock();
541
                data.debug_dump();
542
            }
543
            drop(input_fut);
544

            
545
            panic!(
546
                r"
547
all futures blocked. waiting for the real world? or deadlocked (waiting for each other) ?
548
"
549
            );
550
        });
551

            
552
498
        value
553
498
    }
554
}
555

            
556
//---------- execution - core implementation ----------
557

            
558
impl MockExecutor {
559
    /// Keep polling tasks until nothing more can be done
560
    ///
561
    /// Ie, stop when `awake` is empty and `progressing_until_stalled` is `None`.
562
12330
    fn execute_to_completion(&self, mut main_fut: MainFuture) {
563
12330
        trace!("MockExecutor execute_to_completion...");
564
        loop {
565
3887107
            self.execute_until_first_stall(main_fut.as_mut());
566

            
567
            // Handle `progressing_until_stalled`
568
3874777
            let pus_waker = {
569
3887107
                let mut data = self.shared.lock();
570
3887107
                let pus = &mut data.progressing_until_stalled;
571
3887107
                trace!("MockExecutor execute_to_completion PUS={:?}", &pus);
572
3887107
                let Some(pus) = pus else {
573
                    // No progressing_until_stalled, we're actually done.
574
12330
                    break;
575
                };
576
3874777
                assert_eq!(
577
                    pus.finished, Pending,
578
                    "ProgressingUntilStalled finished twice?!"
579
                );
580
3874777
                pus.finished = Ready(());
581

            
582
                // Release the lock temporarily so that ActualWaker::clone doesn't deadlock
583
3874777
                let waker = pus
584
3874777
                    .waker
585
3874777
                    .take()
586
3874777
                    .expect("ProgressUntilStalledFuture not ever polled!");
587
3874777
                drop(data);
588
3874777
                let waker_copy = waker.clone();
589
3874777
                let mut data = self.shared.lock();
590

            
591
3874777
                let pus = &mut data.progressing_until_stalled;
592
3874777
                if let Some(double) = pus
593
3874777
                    .as_mut()
594
3874777
                    .expect("progressing_until_stalled updated under our feet!")
595
3874777
                    .waker
596
3874777
                    .replace(waker)
597
                {
598
                    panic!("double progressing_until_stalled.waker! {double:?}");
599
3874777
                }
600

            
601
3874777
                waker_copy
602
            };
603
3874777
            pus_waker.wake();
604
        }
605
12330
        trace!("MockExecutor execute_to_completion done");
606
12330
    }
607

            
608
    /// Keep polling tasks until `awake` is empty
609
    ///
610
    /// (Ignores `progressing_until_stalled` - so if one is active,
611
    /// will return when all other tasks have blocked.)
612
    ///
613
    /// # Panics
614
    ///
615
    /// Might malfunction or panic if called reentrantly
616
3887107
    fn execute_until_first_stall(&self, main_fut: MainFuture) {
617
3887107
        trace!("MockExecutor execute_until_first_stall ...");
618

            
619
3887107
        assert_eq!(
620
3887107
            THREAD_DESCRIPTOR.get(),
621
            ThreadDescriptor::Foreign,
622
            "MockExecutor executor re-entered"
623
        );
624
3887107
        THREAD_DESCRIPTOR.set(ThreadDescriptor::Executor);
625

            
626
3960516
        let r = catch_unwind(AssertUnwindSafe(|| self.executor_main_loop(main_fut)));
627

            
628
3887107
        THREAD_DESCRIPTOR.set(ThreadDescriptor::Foreign);
629

            
630
3887107
        match r {
631
3887107
            Ok(()) => trace!("MockExecutor execute_until_first_stall done."),
632
            Err(e) => {
633
                trace!("MockExecutor executor, or async task, panicked!");
634
                panic_any(e)
635
            }
636
        }
637
3887107
    }
638

            
639
    /// Keep polling tasks until `awake` is empty (inner, executor main loop)
640
    ///
641
    /// This is only called from [`MockExecutor::execute_until_first_stall`],
642
    /// so it could also be called `execute_until_first_stall_inner`.
643
    #[allow(clippy::cognitive_complexity)]
644
3887107
    fn executor_main_loop(&self, mut main_fut: MainFuture) {
645
        'outer: loop {
646
            // Take a `Awake` task off `awake` and make it `Asleep`
647
5618574
            let (id, mut fut) = 'inner: loop {
648
9507220
                let mut data = self.shared.lock();
649
9507220
                let Some(id) = data.schedule() else {
650
3887107
                    break 'outer;
651
                };
652
5620113
                let Some(task) = data.tasks.get_mut(id) else {
653
1539
                    trace!("MockExecutor {id:?} vanished");
654
1539
                    continue;
655
                };
656
5618574
                task.state = Asleep(vec![]);
657
5618574
                let fut = task.fut.take().expect("future missing from task!");
658
5618574
                break 'inner (id, fut);
659
            };
660

            
661
            // Poll the selected task
662
5618574
            trace!("MockExecutor {id:?} polling...");
663
5618574
            let waker = ActualWaker::make_waker(&self.shared, id);
664
5618574
            let mut cx = Context::from_waker(&waker);
665
5618574
            let r: Either<Poll<()>, IsSubthread> = match &mut fut {
666
3958855
                TaskFutureInfo::Normal(fut) => Left(fut.poll_unpin(&mut cx)),
667
1556040
                TaskFutureInfo::Main => Left(main_fut.as_mut().poll(&mut cx)),
668
103679
                TaskFutureInfo::Subthread => Right(IsSubthread),
669
            };
670

            
671
            // Deal with the returned `Poll`
672
            let _fut_drop_late;
673
            {
674
5618574
                let mut data = self.shared.lock();
675
5618574
                let task = data
676
5618574
                    .tasks
677
5618574
                    .get_mut(id)
678
5618574
                    .expect("task vanished while we were polling it");
679

            
680
5514895
                match r {
681
                    Left(Pending) => {
682
5457349
                        trace!("MockExecutor {id:?} -> Pending");
683
5457349
                        if task.fut.is_some() {
684
                            panic!("task reinserted while we polled it?!");
685
5457349
                        }
686
                        // The task might have been woken *by its own poll method*.
687
                        // That's why we set it to `Asleep` *earlier* rather than here.
688
                        // All we need to do is put the future back.
689
5457349
                        task.fut = Some(fut);
690
                    }
691
                    Left(Ready(())) => {
692
57546
                        trace!("MockExecutor {id:?} -> Ready");
693
                        // Oh, it finished!
694
                        // It might be in `awake`, but that's allowed to contain stale tasks,
695
                        // so we *don't* need to scan that list and remove it.
696
57546
                        data.tasks.remove(id);
697
                        // It is important that we don't drop `fut` until we have released
698
                        // the data lock, since it is an external type and might try to reenter
699
                        // us (eg by calling spawn).  If we do that here, we risk deadlock.
700
                        // So, move `fut` to a variable with scope outside the block with `data`.
701
57546
                        _fut_drop_late = fut;
702
                    }
703
                    Right(IsSubthread) => {
704
103679
                        trace!("MockExecutor {id:?} -> Ready, waking Subthread");
705
                        // Task is a subthread, which has called thread_context_switch
706
                        // to switch to us.  We "poll" it by switching back.
707

            
708
                        // Put back `TFI::Subthread`, which was moved out temporarily, above.
709
103679
                        task.fut = Some(fut);
710

            
711
103679
                        self.shared.thread_context_switch(
712
103679
                            data,
713
103679
                            ThreadDescriptor::Executor,
714
103679
                            ThreadDescriptor::Subthread(id),
715
103679
                        );
716

            
717
                        // Now, if the Subthread still exists, that's because it's switched
718
                        // back to us, and is waiting in subthread_block_on_future again.
719
                        // Or it might have ended, in which case it's not in `tasks` any more.
720
                        // In any case we can go back to scheduling futures.
721
                    }
722
                }
723
            }
724
        }
725
3887107
    }
726
}
727

            
728
impl Data {
729
    /// Return the next task to run
730
    ///
731
    /// The task is removed from `awake`, but **`state` is not set to `Asleep`**.
732
    /// The caller must restore the invariant!
733
9507220
    fn schedule(&mut self) -> Option<TaskId> {
734
        use SchedulingPolicy as SP;
735
9507220
        match self.scheduling {
736
4771729
            SP::Stack => self.awake.pop_back(),
737
4735491
            SP::Queue => self.awake.pop_front(),
738
        }
739
9507220
    }
740
}
741

            
742
impl ActualWaker {
743
    /// Obtain a strong reference to the executor's data
744
22624057
    fn upgrade_data(&self) -> Option<Arc<Shared>> {
745
22624057
        self.data.upgrade()
746
22624057
    }
747

            
748
    /// Wake the task corresponding to this `ActualWaker`
749
    ///
750
    /// This is like `<Self as std::task::Wake>::wake()` but takes `&self`, not `Arc`
751
6307316
    fn wake(&self) {
752
6307316
        let Some(data) = self.upgrade_data() else {
753
            // The executor is gone!  Don't try to wake.
754
4
            return;
755
        };
756
6307312
        let mut data = data.lock();
757
6307312
        let data = &mut *data;
758
6307312
        trace!("MockExecutor {:?} wake", &self.id);
759
6307312
        let Some(task) = data.tasks.get_mut(self.id) else {
760
12561
            return;
761
        };
762
6294751
        task.set_awake(self.id, &mut data.awake);
763
6307316
    }
764

            
765
    /// Create and return a `Waker` for task `id`
766
5673669
    fn make_waker(shared: &Arc<Shared>, id: TaskId) -> Waker {
767
5673669
        ActualWaker {
768
5673669
            data: Arc::downgrade(shared),
769
5673669
            id,
770
5673669
        }
771
5673669
        .new_waker()
772
5673669
    }
773
}
774

            
775
//---------- "progress until stalled" functionality ----------
776

            
777
impl MockExecutor {
778
    /// Run tasks in the current executor until every other task is waiting
779
    ///
780
    /// # Panics
781
    ///
782
    /// Might malfunction or panic if more than one such call is running at once.
783
    ///
784
    /// (Ie, you must `.await` or drop the returned `Future`
785
    /// before calling this method again.)
786
    ///
787
    /// Must be called and awaited within a future being run by `self`.
788
3874777
    pub fn progress_until_stalled(&self) -> impl Future<Output = ()> + use<> {
789
3874777
        let mut data = self.shared.lock();
790
3874777
        assert!(
791
3874777
            data.progressing_until_stalled.is_none(),
792
            "progress_until_stalled called more than once"
793
        );
794
3874777
        trace!("MockExecutor progress_until_stalled...");
795
3874777
        data.progressing_until_stalled = Some(ProgressingUntilStalled {
796
3874777
            finished: Pending,
797
3874777
            waker: None,
798
3874777
        });
799
3874777
        ProgressUntilStalledFuture {
800
3874777
            shared: self.shared.clone(),
801
3874777
        }
802
3874777
    }
803
}
804

            
805
impl Future for ProgressUntilStalledFuture {
806
    type Output = ();
807

            
808
7749819
    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> {
809
7749819
        let waker = cx.waker().clone();
810
7749819
        let mut data = self.shared.lock();
811
7749819
        let pus = data.progressing_until_stalled.as_mut();
812
7749819
        trace!("MockExecutor progress_until_stalled polling... {:?}", &pus);
813
7749819
        let pus = pus.expect("ProgressingUntilStalled missing");
814
7749819
        pus.waker = Some(waker);
815
7749819
        pus.finished
816
7749819
    }
817
}
818

            
819
impl Drop for ProgressUntilStalledFuture {
820
3874777
    fn drop(&mut self) {
821
3874777
        self.shared.lock().progressing_until_stalled = None;
822
3874777
    }
823
}
824

            
825
//---------- (sub)threads ----------
826

            
827
impl MockExecutor {
828
    /// Spawn a "Subthread", for processing in a sync context
829
    ///
830
    /// `call` will be run on a separate thread, called a "Subthread".
831
    ///
832
    /// But it will **not run simultaneously** with the executor,
833
    /// nor with other Subthreads.
834
    /// So Subthreads are somewhat like coroutines.
835
    ///
836
    /// `call` must be capable of making progress without waiting for any other Subthreads.
837
    /// `call` may wait for async futures, using
838
    /// [`subthread_block_on_future`](MockExecutor::subthread_block_on_future).
839
    ///
840
    /// Subthreads may be used for cpubound activity,
841
    /// or synchronous IO (such as large volumes of disk activity),
842
    /// provided that the synchronous code will reliably make progress,
843
    /// without waiting (directly or indirectly) for any async task or Subthread -
844
    /// except via `subthread_block_on_future`.
845
    ///
846
    /// # Subthreads vs raw `std::thread` threads
847
    ///
848
    /// Programs using `MockExecutor` may use `std::thread` threads directly.
849
    /// However, this is not recommended.  There are severe limitations:
850
    ///
851
    ///  * Only a Subthread can re-enter the async context from sync code:
852
    ///    this must be done with
853
    ///    using [`subthread_block_on_future`](MockExecutor::subthread_block_on_future).
854
    ///    (Re-entering the executor with
855
    ///    [`block_on`](tor_rtcompat::ToplevelBlockOn::block_on)
856
    ///    is not allowed.)
857
    ///  * If async tasks want to suspend waiting for synchronous code,
858
    ///    the synchronous code must run on a Subthread.
859
    ///    This allows the `MockExecutor` to know when
860
    ///    that synchronous code is still making progress.
861
    ///    (This is needed for
862
    ///    [`progress_until_stalled`](MockExecutor::progress_until_stalled)
863
    ///    and the facilities which use it, such as
864
    ///    [`MockRuntime::advance_until_stalled`](crate::MockRuntime::advance_until_stalled).)
865
    ///  * Subthreads never run in parallel -
866
    ///    they only run as scheduled deterministically by the `MockExecutor`.
867
    ///    So using Subthreads eliminates a source of test nonndeterminism.
868
    ///    (Execution order is still varied due to explicitly varying the scheduling policy.)
869
    ///
870
    /// # Panics, abuse, and malfunctions
871
    ///
872
    /// If `call` panics and unwinds, `spawn_subthread` yields `Err`.
873
    /// The application code should to do something about it if this happens,
874
    /// typically, logging errors, tearing things down, or failing a test case.
875
    ///
876
    /// If the executor doesn't run, the subthread will not run either, and will remain stuck.
877
    /// (So, typically, if the thread supposed to run the executor panics,
878
    /// for example because a future or the executor itself panics,
879
    /// all the subthreads will become stuck - effectively, they'll be leaked.)
880
    ///
881
    /// `spawn_subthread` panics if OS thread spawning fails.
882
    /// (Like `std::thread::spawn()` does.)
883
    ///
884
    /// `MockExecutor`s will malfunction or panic if
885
    /// any executor invocation method (eg `block_on`) is called on a Subthread.
886
68
    pub fn subthread_spawn<T: Send + 'static>(
887
68
        &self,
888
68
        desc: impl Display,
889
68
        call: impl FnOnce() -> T + Send + 'static,
890
68
    ) -> impl Future<Output = Result<T, Box<dyn Any + Send>>> + Unpin + Send + Sync + 'static {
891
68
        let desc = desc.to_string();
892
68
        let (output_tx, output_rx) = oneshot::channel();
893

            
894
        // NB: we don't know which thread we're on!
895
        // In principle we might be on another Subthread.
896
        // So we can't context switch here.  That would be very confusing.
897
        //
898
        // Instead, we prepare the new Subthread as follows:
899
        //   - There is a task in the executor
900
        //   - The task is ready to be polled, whenever the executor decides to
901
        //   - The thread starts running right away, but immediately waits until it is scheduled
902
        // See `subthread_entrypoint`.
903

            
904
        {
905
68
            let mut data = self.shared.lock();
906
68
            let id = data.insert_task(desc.clone(), TaskFutureInfo::Subthread);
907

            
908
68
            let _: std::thread::JoinHandle<()> = std::thread::Builder::new()
909
68
                .name(desc)
910
68
                .spawn({
911
68
                    let shared = self.shared.clone();
912
68
                    move || shared.subthread_entrypoint(id, call, output_tx)
913
                })
914
68
                .expect("spawn failed");
915
        }
916

            
917
68
        output_rx.map(|r| {
918
12
            r.unwrap_or_else(|_: Canceled| panic!("Subthread cancelled but should be impossible!"))
919
12
        })
920
68
    }
921

            
922
    /// Call an async `Future` from a Subthread
923
    ///
924
    /// Blocks the Subthread, and arranges to run async tasks,
925
    /// including `fut`, until `fut` completes.
926
    ///
927
    /// `fut` is polled on the executor thread, not on the Subthread.
928
    /// (We may change that in the future, allowing passing a non-`Send` future.)
929
    ///
930
    /// # Panics, abuse, and malfunctions
931
    ///
932
    /// `subthread_block_on_future` will malfunction or panic
933
    /// if called on a thread that isn't a Subthread from the same `MockExecutor`
934
    /// (ie a thread made with [`spawn_subthread`](MockExecutor::subthread_spawn)).
935
    ///
936
    /// If `fut` itself panics, the executor will panic.
937
    ///
938
    /// If the executor isn't running, `subthread_block_on_future` will hang indefinitely.
939
    /// See `spawn_subthread`.
940
    #[allow(clippy::cognitive_complexity)] // Splitting this up would be worse
941
1828
    pub fn subthread_block_on_future<T: Send + 'static>(&self, fut: impl Future<Output = T>) -> T {
942
1828
        let id = match THREAD_DESCRIPTOR.get() {
943
1828
            ThreadDescriptor::Subthread(id) => id,
944
            ThreadDescriptor::Executor => {
945
                panic!("subthread_block_on_future called from MockExecutor thread (async task?)")
946
            }
947
            ThreadDescriptor::Foreign => panic!(
948
                "subthread_block_on_future called on foreign thread (not spawned with spawn_subthread)"
949
            ),
950
        };
951
1828
        trace!("MockExecutor thread {id:?}, subthread_block_on_future...");
952
1828
        let mut fut = pin!(fut);
953

            
954
        // We yield once before the first poll, and once after Ready, to shake up the
955
        // execution order a bit, depending on the scheduling policy.
956
3934
        let yield_ = |set_awake| self.shared.subthread_yield(id, set_awake);
957
1828
        yield_(Some(SetAwake));
958

            
959
1828
        let ret = loop {
960
            // Poll the provided future
961
2106
            trace!("MockExecutor thread {id:?}, s.t._block_on_future polling...");
962
2106
            let waker = ActualWaker::make_waker(&self.shared, id);
963
2106
            let mut cx = Context::from_waker(&waker);
964
2106
            let r: Poll<T> = fut.as_mut().poll(&mut cx);
965

            
966
2106
            if let Ready(r) = r {
967
1828
                trace!("MockExecutor thread {id:?}, s.t._block_on_future poll -> Ready");
968
1828
                break r;
969
278
            }
970

            
971
            // Pending.  Switch back to the exeuctor thread.
972
            // When the future becomes ready, the Waker will be woken, waking the task,
973
            // so that the executor will "poll" us again.
974
278
            trace!("MockExecutor thread {id:?}, s.t._block_on_future poll -> Pending");
975

            
976
278
            yield_(None);
977
        };
978

            
979
1828
        yield_(Some(SetAwake));
980

            
981
1828
        trace!("MockExecutor thread {id:?}, subthread_block_on_future complete.");
982

            
983
1828
        ret
984
1828
    }
985
}
986

            
987
impl Shared {
988
    /// Main entrypoint function for a Subthread
989
    ///
990
    /// Entered on a new `std::thread` thread created by
991
    /// [`subthread_spawn`](MockExecutor::subthread_spawn).
992
    ///
993
    /// When `call` completes, sends its returned value `T` to `output_tx`.
994
68
    fn subthread_entrypoint<T: Send + 'static>(
995
68
        self: Arc<Self>,
996
68
        id: TaskId,
997
68
        call: impl FnOnce() -> T + Send + 'static,
998
68
        output_tx: oneshot::Sender<Result<T, Box<dyn Any + Send>>>,
999
68
    ) {
68
        THREAD_DESCRIPTOR.set(ThreadDescriptor::Subthread(id));
68
        trace!("MockExecutor thread {id:?}, entrypoint");
        // We start out Awake, but we wait for the executor to tell us to run.
        // This will be done the first time the task is "polled".
68
        {
68
            let data = self.lock();
68
            self.thread_context_switch_waitfor_instruction_to_run(
68
                data,
68
                ThreadDescriptor::Subthread(id),
68
            );
68
        }
68
        trace!("MockExecutor thread {id:?}, entering user code");
        // Run the user's actual thread function.
        // This will typically reenter us via subthread_block_on_future.
68
        let ret = catch_unwind(AssertUnwindSafe(call));
68
        trace!("MockExecutor thread {id:?}, completed user code");
        // This makes the return value from subthread_spawn ready.
        // It will be polled by the executor in due course, presumably.
68
        output_tx.send(ret).unwrap_or_else(
            #[allow(clippy::unnecessary_lazy_evaluations)]
28
            |_| {}, // receiver dropped, maybe executor dropped or something?
        );
68
        {
68
            let mut data = self.lock();
68

            
68
            // Never poll this task again (so never schedule this thread)
68
            let _: Task = data.tasks.remove(id).expect("Subthread task vanished!");
68

            
68
            // Tell the executor it is scheduled now.
68
            // We carry on exiting, in parallel (holding the data lock).
68
            self.thread_context_switch_send_instruction_to_run(
68
                &mut data,
68
                ThreadDescriptor::Subthread(id),
68
                ThreadDescriptor::Executor,
68
            );
68
        }
68
    }
    /// Yield back to the executor from a subthread
    ///
    /// Checks that things are in order
    /// (in particular, that this task is in the data structure as a subhtread)
    /// and switches to the executor thread.
    ///
    /// The caller must arrange that the task gets woken.
    ///
    /// With [`SetAwake`], sets our task awake, so that we'll be polled
    /// again as soon as we get to the top of the executor's queue.
    /// Otherwise, we'll be reentered after someone wakes a [`Waker`] for the task.
102925
    fn subthread_yield(&self, us: TaskId, set_awake: Option<SetAwake>) {
102925
        let mut data = self.lock();
        {
102925
            let data = &mut *data;
102925
            let task = data.tasks.get_mut(us).expect("Subthread task vanished!");
102925
            match &task.fut {
102925
                Some(TaskFutureInfo::Subthread) => {}
                other => panic!("subthread_block_on_future but TFI {other:?}"),
            };
102925
            if let Some(SetAwake) = set_awake {
94918
                task.set_awake(us, &mut data.awake);
94918
            }
        }
102925
        self.thread_context_switch(
102925
            data,
102925
            ThreadDescriptor::Subthread(us),
102925
            ThreadDescriptor::Executor,
        );
102925
    }
    /// Switch from (sub)thread `us` to (sub)thread `them`
    ///
    /// Returns when someone calls `thread_context_switch(.., us)`.
206604
    fn thread_context_switch(
206604
        &self,
206604
        mut data: MutexGuard<Data>,
206604
        us: ThreadDescriptor,
206604
        them: ThreadDescriptor,
206604
    ) {
206604
        trace!("MockExecutor thread {us:?}, switching to {them:?}");
206604
        self.thread_context_switch_send_instruction_to_run(&mut data, us, them);
206604
        self.thread_context_switch_waitfor_instruction_to_run(data, us);
206604
    }
    /// Instruct the (sub)thread `them` to run
    ///
    /// Update `thread_to_run`, which will wake up `them`'s
    /// call to `thread_context_switch_waitfor_instruction_to_run`.
    ///
    /// Must be called from (sub)thread `us`.
    /// Part of `thread_context_switch`, not normally called directly.
207358
    fn thread_context_switch_send_instruction_to_run(
207358
        &self,
207358
        data: &mut MutexGuard<Data>,
207358
        us: ThreadDescriptor,
207358
        them: ThreadDescriptor,
207358
    ) {
207358
        assert_eq!(data.thread_to_run, us);
207358
        data.thread_to_run = them;
207358
        self.thread_condvar.notify_all();
207358
    }
    /// Await an instruction for this thread, `us`, to run
    ///
    /// Waits for `thread_to_run` to be `us`,
    /// waiting for `thread_condvar` as necessary.
    ///
    /// Part of `thread_context_switch`, not normally called directly.
208100
    fn thread_context_switch_waitfor_instruction_to_run(
208100
        &self,
208100
        data: MutexGuard<Data>,
208100
        us: ThreadDescriptor,
208100
    ) {
        #[allow(let_underscore_lock)]
208100
        let _: MutexGuard<_> = self
208100
            .thread_condvar
594562
            .wait_while(data, |data| {
590574
                let live = data.thread_to_run;
590574
                let resume = live == us;
590574
                if resume {
207358
                    trace!("MockExecutor thread {us:?}, resuming");
                } else {
383216
                    trace!("MockExecutor thread {us:?}, waiting for {live:?}");
                }
                // We're in `.wait_while`, not `.wait_until`.  Confusing.
590574
                !resume
590574
            })
208100
            .expect("data lock poisoned");
208100
    }
}
//---------- ancillary and convenience functions ----------
/// Trait to let us assert at compile time that something is nicely `Sync` etc.
#[allow(dead_code)] // yes, we don't *use* anything from this trait
trait EnsureSyncSend: Sync + Send + 'static {}
impl EnsureSyncSend for ActualWaker {}
impl EnsureSyncSend for MockExecutor {}
impl MockExecutor {
    /// Return the number of tasks running in this executor
    ///
    /// One possible use is for a test case to check that task(s)
    /// that ought to have exited, have indeed done so.
    ///
    /// In the usual case, the answer will be at least 1,
    /// because it counts the future passed to
    /// [`block_on`](MockExecutor::block_on)
    /// (perhaps via [`MockRuntime::test_with_various`](crate::MockRuntime::test_with_various)).
212
    pub fn n_tasks(&self) -> usize {
212
        self.shared.lock().tasks.len()
212
    }
}
impl Shared {
    /// Lock and obtain the guard
    ///
    /// Convenience method which panics on poison
61217727
    fn lock(&self) -> MutexGuard<Data> {
61217727
        self.data.lock().expect("data lock poisoned")
61217727
    }
}
impl Task {
    /// Set task `id` to `Awake` and arrange that it will be polled.
6389669
    fn set_awake(&mut self, id: TaskId, data_awake: &mut VecDeque<TaskId>) {
6389669
        match self.state {
838460
            Awake => {}
5551209
            Asleep(_) => {
5551209
                self.state = Awake;
5551209
                data_awake.push_back(id);
5551209
            }
        }
6389669
    }
}
//---------- ActualWaker as RawWaker ----------
/// Using [`ActualWaker`] in a [`RawWaker`]
///
/// We need to make a
/// [`Waker`] (the safe, type-erased, waker, used by actual futures)
/// which contains an
/// [`ActualWaker`] (our actual waker implementation, also safe).
///
/// `std` offers `Waker::from<Arc<impl Wake>>`.
/// But we want a bespoke `Clone` implementation, so we don't want to use `Arc`.
///
/// So instead, we implement the `RawWaker` API in terms of `ActualWaker`.
/// We keep the `ActualWaker` in a `Box`, and actually `clone` it (and the `Box`).
///
/// SAFETY
///
///  * The data pointer is `Box::<ActualWaker>::into_raw()`
///  * We share these when we clone
///  * No-one is allowed `&mut ActualWaker` unless there are no other clones
///  * So we may make references `&ActualWaker`
impl ActualWaker {
    /// Wrap up an [`ActualWaker`] as a type-erased [`Waker`] for passing to futures etc.
5673669
    fn new_waker(self) -> Waker {
5673669
        unsafe { Waker::from_raw(self.raw_new()) }
5673669
    }
    /// Helper: wrap up an [`ActualWaker`] as a [`RawWaker`].
21990410
    fn raw_new(self) -> RawWaker {
21990410
        let self_: Box<ActualWaker> = self.into();
21990410
        let self_: *mut ActualWaker = Box::into_raw(self_);
21990410
        let self_: *const () = self_ as _;
21990410
        RawWaker::new(self_, &RAW_WAKER_VTABLE)
21990410
    }
    /// Implementation of [`RawWakerVTable`]'s `clone`
16316741
    unsafe fn raw_clone(self_: *const ()) -> RawWaker {
        unsafe {
16316741
            let self_: *const ActualWaker = self_ as _;
16316741
            let self_: &ActualWaker = self_.as_ref().unwrap_unchecked();
16316741
            let copy: ActualWaker = self_.clone();
16316741
            copy.raw_new()
        }
16316741
    }
    /// Implementation of [`RawWakerVTable`]'s `wake`
5742230
    unsafe fn raw_wake(self_: *const ()) {
5742230
        unsafe {
5742230
            Self::raw_wake_by_ref(self_);
5742230
            Self::raw_drop(self_);
5742230
        }
5742230
    }
    /// Implementation of [`RawWakerVTable`]'s `wake_ref_by`
6307316
    unsafe fn raw_wake_by_ref(self_: *const ()) {
6307316
        unsafe {
6307316
            let self_: *const ActualWaker = self_ as _;
6307316
            let self_: &ActualWaker = self_.as_ref().unwrap_unchecked();
6307316
            self_.wake();
6307316
        }
6307316
    }
    /// Implementation of [`RawWakerVTable`]'s `drop`
21951508
    unsafe fn raw_drop(self_: *const ()) {
21951508
        unsafe {
21951508
            let self_: *mut ActualWaker = self_ as _;
21951508
            let self_: Box<ActualWaker> = Box::from_raw(self_);
21951508
            drop(self_);
21951508
        }
21951508
    }
}
/// vtable for `Box<ActualWaker>` as `RawWaker`
//
// This ought to be in the impl block above, but
//   "associated `static` items are not allowed"
static RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
    ActualWaker::raw_clone,
    ActualWaker::raw_wake,
    ActualWaker::raw_wake_by_ref,
    ActualWaker::raw_drop,
);
//---------- Sleep location tracking and dumping ----------
/// We record "where a future went to sleep" as (just) a backtrace
///
/// This type alias allows us to mock `Backtrace` for miri.
/// (It also insulates from future choices about sleep location representation.0
#[cfg(not(miri))]
type SleepLocation = Backtrace;
impl Data {
    /// Dump tasks and their sleep location backtraces
2
    fn dump_backtraces(&self, f: &mut fmt::Formatter) -> fmt::Result {
8
        for (id, task) in self.tasks.iter() {
12
            let prefix = |f: &mut fmt::Formatter| write!(f, "{id:?}={task:?}: ");
8
            match &task.state {
                Awake => {
2
                    prefix(f)?;
2
                    writeln!(f, "awake")?;
                }
6
                Asleep(locs) => {
6
                    let n = locs.len();
6
                    for (i, loc) in locs.iter().enumerate() {
6
                        prefix(f)?;
6
                        writeln!(f, "asleep, backtrace {i}/{n}:\n{loc}",)?;
                    }
6
                    if n == 0 {
                        prefix(f)?;
                        writeln!(f, "asleep, no backtraces, Waker never cloned, stuck!",)?;
6
                    }
                }
            }
        }
2
        writeln!(
2
            f,
2
            "\nNote: there might be spurious traces, see docs for MockExecutor::debug_dump\n"
        )?;
2
        Ok(())
2
    }
}
/// Track sleep locations via `<Waker as Clone>`.
///
/// See [`MockExecutor::debug_dump`] for the explanation.
impl Clone for ActualWaker {
16316741
    fn clone(&self) -> Self {
16316741
        let id = self.id;
16316741
        if let Some(data) = self.upgrade_data() {
            // If the executor is gone, there is nothing to adjust
16316741
            let mut data = data.lock();
16316741
            if let Some(task) = data.tasks.get_mut(self.id) {
16316741
                match &mut task.state {
600755
                    Awake => trace!("MockExecutor cloned waker for awake task {id:?}"),
15715986
                    Asleep(locs) => locs.push(SleepLocation::force_capture()),
                }
            } else {
                trace!("MockExecutor cloned waker for dead task {id:?}");
            }
        }
16316741
        ActualWaker {
16316741
            data: self.data.clone(),
16316741
            id,
16316741
        }
16316741
    }
}
//---------- API for full debug dump ----------
/// Debugging dump of a `MockExecutor`'s state
///
/// Returned by [`MockExecutor::as_debug_dump`]
//
// Existence implies backtraces have been resolved
//
// We use `Either` so that we can also use this internally when we have &mut Data.
pub struct DebugDump<'a>(Either<&'a Data, MutexGuard<'a, Data>>);
impl MockExecutor {
    /// Dump the executor's state including backtraces of waiting tasks, to stderr
    ///
    /// This is considerably more extensive than simply
    /// `MockExecutor as Debug`.
    ///
    /// (This is a convenience method, which wraps
    /// [`MockExecutor::as_debug_dump()`].
    ///
    /// ### Backtrace salience (possible spurious traces)
    ///
    /// **Summary**
    ///
    /// The technique used to capture backtraces when futures sleep is not 100% exact.
    /// It will usually show all the actual sleeping sites,
    /// but it might also show other backtraces which were part of
    /// the implementation of some complex relevant future.
    ///
    /// **Details**
    ///
    /// When a future's implementation wants to sleep,
    /// it needs to record the [`Waker`] (from the [`Context`])
    /// so that the "other end" can call `.wake()` on it later,
    /// when the future should be woken.
    ///
    /// Since `Context.waker()` gives `&Waker`, borrowed from the `Context`,
    /// the future must clone the `Waker`,
    /// and it must do so in within the `poll()` call.
    ///
    /// A future which is waiting in a `select!` will typically
    /// show multiple traces, one for each branch.
    /// But,
    /// if a future sleeps on one thing, and then when polled again later,
    /// sleeps on something different, without waking up in between,
    /// both backtrace locations will be shown.
    /// And,
    /// a complicated future contraption *might* clone the `Waker` more times.
    /// So not every backtrace will necessarily be informative.
    ///
    /// ### Panics
    ///
    /// Panics on write errors.
2
    pub fn debug_dump(&self) {
2
        self.as_debug_dump().to_stderr();
2
    }
    /// Dump the executor's state including backtraces of waiting tasks
    ///
    /// This is considerably more extensive than simply
    /// `MockExecutor as Debug`.
    ///
    /// Returns an object for formatting with [`Debug`].
    /// To simply print the dump to stderr (eg in a test),
    /// use [`.debug_dump()`](MockExecutor::debug_dump).
    ///
    /// **Backtrace salience (possible spurious traces)** -
    /// see [`.debug_dump()`](MockExecutor::debug_dump).
2
    pub fn as_debug_dump(&self) -> DebugDump {
2
        let data = self.shared.lock();
2
        DebugDump(Right(data))
2
    }
}
impl Data {
    /// Convenience function: dump including backtraces, to stderr
    fn debug_dump(&mut self) {
        DebugDump(Left(self)).to_stderr();
    }
}
impl DebugDump<'_> {
    /// Convenience function: dump tasks and backtraces to stderr
    #[allow(clippy::wrong_self_convention)] // "to_stderr" doesn't mean "convert to stderr"
2
    fn to_stderr(self) {
2
        write!(io::stderr().lock(), "{:?}", self)
2
            .unwrap_or_else(|e| error_report!(e, "failed to write debug dump to stderr"));
2
    }
}
//---------- bespoke Debug impls ----------
impl Debug for DebugDump<'_> {
2
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2
        let self_: &Data = &self.0;
2
        writeln!(f, "MockExecutor state:\n{self_:#?}")?;
2
        writeln!(f, "MockExecutor task dump:")?;
2
        self_.dump_backtraces(f)?;
2
        Ok(())
2
    }
}
// See `impl Debug for Data` for notes on the output
impl Debug for Task {
122
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
122
        let Task { desc, state, fut } = self;
122
        write!(f, "{:?}", desc)?;
122
        write!(f, "=")?;
118
        match fut {
4
            None => write!(f, "P")?,
76
            Some(TaskFutureInfo::Normal(_)) => write!(f, "f")?,
30
            Some(TaskFutureInfo::Main) => write!(f, "m")?,
12
            Some(TaskFutureInfo::Subthread) => write!(f, "T")?,
        }
122
        match state {
110
            Awake => write!(f, "W")?,
12
            Asleep(locs) => write!(f, "s{}", locs.len())?,
        };
122
        Ok(())
122
    }
}
/// Helper: `Debug`s as a list of tasks, given the `Data` for lookups and a list of the ids
///
/// `Task`s in `Data` are printed as `Ti(ID)"SPEC"=FLAGS"`.
///
/// `FLAGS` are:
///
///  * `T`: this task is for a Subthread (from subthread_spawn).
///  * `P`: this task is being polled (its `TaskFutureInfo` is absent)
///  * `f`: this is a normal task with a future and its future is present in `Data`
///  * `m`: this is the main task from `block_on`
///
///  * `W`: the task is awake
///  * `s<n>`: the task is asleep, and `<n>` is the number of recorded sleeping locations
//
// We do it this way because the naive dump from derive is very expansive
// and makes it impossible to see the wood for the trees.
// This very compact representation it easier to find a task of interest in the output.
//
// This is implemented in `impl Debug for Task`.
//
//
// rustc doesn't think automatically-derived Debug impls count for whether a thing is used.
// This has caused quite some fallout.  https://github.com/rust-lang/rust/pull/85200
// I think derive_more emits #[automatically_derived], so that even though we use this
// in our Debug impl, that construction is unused.
#[allow(dead_code)]
struct DebugTasks<'d, F>(&'d Data, F);
// See `impl Debug for Data` for notes on the output
impl<F, I> Debug for DebugTasks<'_, F>
where
    F: Fn() -> I,
    I: Iterator<Item = TaskId>,
{
4
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
4
        let DebugTasks(data, ids) = self;
10
        for (id, delim) in izip!(ids(), chain!(iter::once(""), iter::repeat(" ")),) {
10
            write!(f, "{delim}{id:?}")?;
10
            match data.tasks.get(id) {
                None => write!(f, "-")?,
10
                Some(task) => write!(f, "={task:?}")?,
            }
        }
4
        Ok(())
4
    }
}
/// Mock `Backtrace` for miri
///
/// See also the not-miri `type SleepLocation`, alias above.
#[cfg(miri)]
mod miri_sleep_location {
    #[derive(Debug, derive_more::Display)]
    #[display("<SleepLocation>")]
    pub(super) struct SleepLocation {}
    impl SleepLocation {
        pub(super) fn force_capture() -> Self {
            SleepLocation {}
        }
    }
}
#[cfg(miri)]
use miri_sleep_location::SleepLocation;
#[cfg(test)]
mod test {
    // @@ begin test lint list maintained by maint/add_warning @@
    #![allow(clippy::bool_assert_comparison)]
    #![allow(clippy::clone_on_copy)]
    #![allow(clippy::dbg_macro)]
    #![allow(clippy::mixed_attributes_style)]
    #![allow(clippy::print_stderr)]
    #![allow(clippy::print_stdout)]
    #![allow(clippy::single_char_pattern)]
    #![allow(clippy::unwrap_used)]
    #![allow(clippy::unchecked_time_subtraction)]
    #![allow(clippy::useless_vec)]
    #![allow(clippy::needless_pass_by_value)]
    //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
    use super::*;
    use futures::channel::mpsc;
    use futures::{SinkExt as _, StreamExt as _};
    use strum::IntoEnumIterator;
    use tracing::info;
    #[cfg(not(miri))] // trace! asks for the time, which miri doesn't support
    use tracing_test::traced_test;
    fn various_mock_executors() -> impl Iterator<Item = MockExecutor> {
        // This duplicates the part of the logic in MockRuntime::test_with_various which
        // relates to MockExecutor, because we don't have a MockRuntime::builder.
        // The only parameter to MockExecutor is its scheduling policy, so this seems fine.
        SchedulingPolicy::iter().map(|scheduling| {
            eprintln!("===== MockExecutor::with_scheduling({scheduling:?}) =====");
            MockExecutor::with_scheduling(scheduling)
        })
    }
    #[cfg_attr(not(miri), traced_test)]
    #[test]
    fn simple() {
        let runtime = MockExecutor::default();
        let val = runtime.block_on(async { 42 });
        assert_eq!(val, 42);
    }
    #[cfg_attr(not(miri), traced_test)]
    #[test]
    fn stall() {
        let runtime = MockExecutor::default();
        runtime.block_on({
            let runtime = runtime.clone();
            async move {
                const N: usize = 3;
                let (mut txs, mut rxs): (Vec<_>, Vec<_>) =
                    (0..N).map(|_| mpsc::channel::<usize>(5)).unzip();
                let mut rx_n = rxs.pop().unwrap();
                for (i, mut rx) in rxs.into_iter().enumerate() {
                    runtime.spawn_identified(i, {
                        let mut txs = txs.clone();
                        async move {
                            loop {
                                eprintln!("task {i} rx...");
                                let v = rx.next().await.unwrap();
                                let nv = v + 1;
                                eprintln!("task {i} rx {v}, tx {nv}");
                                let v = nv;
                                txs[v].send(v).await.unwrap();
                            }
                        }
                    });
                }
                dbg!();
                #[allow(deprecated)] // TODO(#2386)
                let _: mpsc::TryRecvError = rx_n.try_next().unwrap_err();
                dbg!();
                runtime.progress_until_stalled().await;
                dbg!();
                #[allow(deprecated)] // TODO(#2386)
                let _: mpsc::TryRecvError = rx_n.try_next().unwrap_err();
                dbg!();
                txs[0].send(0).await.unwrap();
                dbg!();
                runtime.progress_until_stalled().await;
                dbg!();
                let r = rx_n.next().await;
                assert_eq!(r, Some(N - 1));
                dbg!();
                #[allow(deprecated)] // TODO(#2386)
                let _: mpsc::TryRecvError = rx_n.try_next().unwrap_err();
                runtime.spawn_identified("tx", {
                    let txs = txs.clone();
                    async {
                        eprintln!("sending task...");
                        for (i, mut tx) in txs.into_iter().enumerate() {
                            eprintln!("sending 0 to {i}...");
                            tx.send(0).await.unwrap();
                        }
                        eprintln!("sending task done");
                    }
                });
                runtime.debug_dump();
                for i in 0..txs.len() {
                    eprintln!("main {i} wait stall...");
                    runtime.progress_until_stalled().await;
                    eprintln!("main {i} rx wait...");
                    let r = rx_n.next().await;
                    eprintln!("main {i} rx = {r:?}");
                    assert!(r == Some(0) || r == Some(N - 1));
                }
                eprintln!("finishing...");
                runtime.progress_until_stalled().await;
                eprintln!("finished.");
            }
        });
    }
    #[cfg_attr(not(miri), traced_test)]
    #[test]
    fn spawn_blocking() {
        let runtime = MockExecutor::default();
        runtime.block_on({
            let runtime = runtime.clone();
            async move {
                let thr_1 = runtime.spawn_blocking(|| 42);
                let thr_2 = runtime.spawn_blocking(|| 99);
                assert_eq!(thr_2.await, 99);
                assert_eq!(thr_1.await, 42);
            }
        });
    }
    #[cfg_attr(not(miri), traced_test)]
    #[test]
    fn drop_reentrancy() {
        // Check that dropping a completed task future is done *outside* the data lock.
        // Involves a contrived future whose Drop impl reenters the executor.
        //
        // If `_fut_drop_late = fut` in execute_until_first_stall (the main loop)
        // is replaced with `drop(fut)` (dropping the future at the wrong moment),
        // we do indeed get deadlock, so this test case is working.
        struct ReentersOnDrop {
            runtime: MockExecutor,
        }
        impl Future for ReentersOnDrop {
            type Output = ();
            fn poll(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<()> {
                Poll::Ready(())
            }
        }
        impl Drop for ReentersOnDrop {
            fn drop(&mut self) {
                self.runtime
                    .spawn_identified("dummy", futures::future::ready(()));
            }
        }
        for runtime in various_mock_executors() {
            runtime.block_on(async {
                runtime.spawn_identified("trapper", {
                    let runtime = runtime.clone();
                    ReentersOnDrop { runtime }
                });
            });
        }
    }
    #[cfg_attr(not(miri), traced_test)]
    #[test]
    fn subthread_oneshot() {
        for runtime in various_mock_executors() {
            runtime.block_on(async {
                let (tx, rx) = oneshot::channel();
                info!("spawning subthread");
                let thr = runtime.subthread_spawn("thr1", {
                    let runtime = runtime.clone();
                    move || {
                        info!("subthread_block_on_future...");
                        let i = runtime.subthread_block_on_future(rx).unwrap();
                        info!("subthread_block_on_future => {i}");
                        i + 1
                    }
                });
                info!("main task sending");
                tx.send(12).unwrap();
                info!("main task sent");
                let r = thr.await.unwrap();
                info!("main task thr => {r}");
                assert_eq!(r, 13);
            });
        }
    }
    #[cfg_attr(not(miri), traced_test)]
    #[test]
    #[allow(clippy::cognitive_complexity)] // It's is not that complicated, really.
    fn subthread_pingpong() {
        for runtime in various_mock_executors() {
            runtime.block_on(async {
                let (mut i_tx, mut i_rx) = mpsc::channel(1);
                let (mut o_tx, mut o_rx) = mpsc::channel(1);
                info!("spawning subthread");
                let thr = runtime.subthread_spawn("thr", {
                    let runtime = runtime.clone();
                    move || {
                        while let Some(i) = {
                            info!("thread receiving ...");
                            runtime.subthread_block_on_future(i_rx.next())
                        } {
                            let o = i + 12;
                            info!("thread received {i}, sending {o}");
                            runtime.subthread_block_on_future(o_tx.send(o)).unwrap();
                            info!("thread sent {o}");
                        }
                        info!("thread exiting");
                        42
                    }
                });
                for i in 0..2 {
                    info!("main task sending {i}");
                    i_tx.send(i).await.unwrap();
                    info!("main task sent {i}");
                    let o = o_rx.next().await.unwrap();
                    info!("main task recv => {o}");
                    assert_eq!(o, i + 12);
                }
                info!("main task dropping sender");
                drop(i_tx);
                info!("main task awaiting thread");
                let r = thr.await.unwrap();
                info!("main task complete");
                assert_eq!(r, 42);
            });
        }
    }
}