1
//! Proof-of-concept for applied operation use.
2
//!
3
//! These functions should obviously be moved to [`super`] at one point.
4

            
5
use r2d2::Pool;
6
use r2d2_sqlite::SqliteConnectionManager;
7
use rand::{seq::SliceRandom, Rng};
8
use tor_basic_utils::retry::RetryDelay;
9
use tor_dircommon::{authority::AuthorityContacts, config::DirTolerance};
10
use tor_netdoc::doc::netstatus::ConsensusFlavor;
11
use tor_rtcompat::PreferredRuntime;
12

            
13
use crate::{
14
    database::Timestamp,
15
    err::IsFatal,
16
    mirror::operation::{ConsensusBoundData, StaticEngine},
17
};
18

            
19
/// Proof-of-concept main execution function for this module
20
///
21
/// Right now, this is a proof-of-concept that just panics in the case of a
22
/// fatal error, but does proper retry handling for non-fatal errors.
23
// TODO DIRMIRROR: Make this not a poc.
24
// TODO DIRMIRROR: Add logging.
25
// TODO DIRMIRROR: Diziet thinks the endpoint selection/retry logic is broken
26
//   eg that it could reach `expect("attempted all authorities")`.
27
//   At the very least it is confusing.  See
28
//   https://gitlab.torproject.org/tpo/core/arti/-/merge_requests/3664#note_3352738
29
async fn serve<R: Rng, F: Fn() -> Timestamp>(
30
    pool: &Pool<SqliteConnectionManager>,
31
    flavor: ConsensusFlavor,
32
    authorities: AuthorityContacts,
33
    tolerance: DirTolerance,
34
    rng: &mut R,
35
    now_fn: F,
36
) {
37
    let mut data = ConsensusBoundData::None;
38
    let engine = StaticEngine {
39
        flavor,
40
        authorities,
41
        tolerance,
42
        rt: PreferredRuntime::current().expect("unable to get runtime"),
43
    };
44

            
45
    // Shuffle the list of download endpoints.
46
    let mut downloads = engine.authorities.downloads().clone();
47
    downloads.shuffle(rng);
48
    // Keeps track of the authority we currently use, i.e. preferred authority.
49
    let mut current = 0;
50

            
51
    let mut retry = RetryDelay::default();
52
    loop {
53
        let endpoint = downloads.get(current).expect("attempted all authorities");
54

            
55
        // Perform the FSM execution.
56
        let res = engine
57
            .execute(pool, &mut data, endpoint, now_fn(), rng)
58
            .await;
59

            
60
        match res {
61
            Ok(()) => {
62
                retry.reset();
63

            
64
                // Swap the currently used authority with the front and reset
65
                // current to zero.
66
                //
67
                // With this design, we will loose track on which authorities
68
                // were successful and which were not on every successful
69
                // return.  At one point, we have to do this.  Probably after
70
                // every consensus, but not after every Ok.  However, for this
71
                // we would need a way to learn when we got a new consensus.
72
                // It would probably make most sense to modify the return type
73
                // of execute() to return something like the next state plus
74
                // previous state or maybe an even simpler bool that returns
75
                // true when the consensus got replaced.
76
                downloads.swap(0, current);
77
            }
78
            Err(e) => {
79
                // Check whether the error is fatal.
80
                if e.is_fatal() {
81
                    panic!("fatal error: {e}");
82
                }
83

            
84
                // Non-fatal error means we should wait and try again.
85
                current += 1;
86
                let delay = retry.next_delay(rng);
87
                tokio::time::sleep(delay).await;
88
            }
89
        }
90
    }
91
}