diff --git a/components/salsa-macros/src/query_group.rs b/components/salsa-macros/src/query_group.rs index d6f2ad46..160104d8 100644 --- a/components/salsa-macros/src/query_group.rs +++ b/components/salsa-macros/src/query_group.rs @@ -282,9 +282,9 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream See `{fn_name}` for details. - *Note:* Setting values will trigger cancelation + *Note:* Setting values will trigger cancellation of any ongoing queries; this method blocks until - those queries have been canceled. + those queries have been cancelled. ", fn_name = fn_name ); @@ -296,9 +296,9 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream See `{fn_name}` for details. - *Note:* Setting values will trigger cancelation + *Note:* Setting values will trigger cancellation of any ongoing queries; this method blocks until - those queries have been canceled. + those queries have been cancelled. ", fn_name = fn_name ); @@ -431,7 +431,7 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream /// Like `in_db`, but gives access to methods for setting the /// value of an input. Not applicable to derived queries. /// - /// # Threads, cancelation, and blocking + /// # Threads, cancellation, and blocking /// /// Mutating the value of a query cannot be done while there are /// still other queries executing. If you are using your database @@ -447,15 +447,15 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream /// deadlock. /// /// Before blocking, the thread that is attempting to `set` will - /// also set a cancelation flag. This will cause any query - /// invocations in other threads to unwind with a `Canceled` + /// also set a cancellation flag. This will cause any query + /// invocations in other threads to unwind with a `Cancelled` /// sentinel value and eventually let the `set` succeed once all /// threads have unwound past the salsa invocation. /// /// If your query implementations are performing expensive /// operations without invoking another query, you can also use - /// the `Runtime::unwind_if_canceled` method to check for an - /// ongoing cancelation and bring those operations to a close, + /// the `Runtime::unwind_if_cancelled` method to check for an + /// ongoing cancellation and bring those operations to a close, /// thus allowing the `set` to succeed. Otherwise, long-running /// computations may lead to "starvation", meaning that the /// thread attempting to `set` has to wait a long, long time. =) diff --git a/src/derived.rs b/src/derived.rs index afd09b80..14b7aa69 100644 --- a/src/derived.rs +++ b/src/derived.rs @@ -162,7 +162,7 @@ where db: &>::DynDb, key: &Q::Key, ) -> Result> { - db.salsa_runtime().unwind_if_canceled(); + db.salsa_runtime().unwind_if_cancelled(); let slot = self.slot(key); let StampedValue { diff --git a/src/derived/slot.rs b/src/derived/slot.rs index e0c608a6..b080e927 100644 --- a/src/derived/slot.rs +++ b/src/derived/slot.rs @@ -10,7 +10,7 @@ use crate::revision::Revision; use crate::runtime::Runtime; use crate::runtime::RuntimeId; use crate::runtime::StampedValue; -use crate::Canceled; +use crate::Cancelled; use crate::{ CycleError, Database, DatabaseKeyIndex, DiscardIf, DiscardWhat, Event, EventKind, QueryDb, SweepStrategy, @@ -355,10 +355,10 @@ where }); let result = future.wait().unwrap_or_else(|| { - // If the other thread panics, we treat this as cancelation: there is no + // If the other thread panics, we treat this as cancellation: there is no // need to panic ourselves, since the original panic will already invoke // the panic hook and bubble up to the thread boundary (or be caught). - Canceled::throw() + Cancelled::throw() }); ProbeState::UpToDate(if result.cycle.is_empty() { Ok(result.value) @@ -547,7 +547,7 @@ where let runtime = db.salsa_runtime(); let revision_now = runtime.current_revision(); - runtime.unwind_if_canceled(); + runtime.unwind_if_cancelled(); debug!( "maybe_changed_since({:?}) called with revision={:?}, revision_now={:?}", @@ -582,7 +582,7 @@ where // Release our lock on `self.state`, so other thread can complete. std::mem::drop(state); - let result = future.wait().unwrap_or_else(|| Canceled::throw()); + let result = future.wait().unwrap_or_else(|| Cancelled::throw()); return !result.cycle.is_empty() || result.value.changed_at > revision; } diff --git a/src/input.rs b/src/input.rs index eda891e4..51995709 100644 --- a/src/input.rs +++ b/src/input.rs @@ -99,7 +99,7 @@ where db: &>::DynDb, key: &Q::Key, ) -> Result> { - db.salsa_runtime().unwind_if_canceled(); + db.salsa_runtime().unwind_if_cancelled(); let slot = self .slot(key) diff --git a/src/interned.rs b/src/interned.rs index 7a8e14d4..073ec72f 100644 --- a/src/interned.rs +++ b/src/interned.rs @@ -322,7 +322,7 @@ where db: &>::DynDb, key: &Q::Key, ) -> Result> { - db.salsa_runtime().unwind_if_canceled(); + db.salsa_runtime().unwind_if_cancelled(); let slot = self.intern_index(db, key); let changed_at = slot.interned_at; diff --git a/src/lib.rs b/src/lib.rs index 910ee3ab..bd5cde47 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -272,8 +272,8 @@ pub trait ParallelDatabase: Database + Send { /// series of queries in parallel and arranging the results. Using /// this method for that purpose ensures that those queries will /// see a consistent view of the database (it is also advisable - /// for those queries to use the [`Runtime::unwind_if_canceled`] - /// method to check for cancelation). + /// for those queries to use the [`Runtime::unwind_if_cancelled`] + /// method to check for cancellation). /// /// # Panics /// @@ -537,7 +537,7 @@ where /// an active query computation. /// /// If you are using `snapshot`, see the notes on blocking - /// and cancelation on [the `query_mut` method]. + /// and cancellation on [the `query_mut` method]. /// /// [the `query_mut` method]: trait.Database.html#method.query_mut pub fn set(&mut self, key: Q::Key, value: Q::Value) @@ -552,7 +552,7 @@ where /// outside of an active query computation. /// /// If you are using `snapshot`, see the notes on blocking - /// and cancelation on [the `query_mut` method]. + /// and cancellation on [the `query_mut` method]. /// /// [the `query_mut` method]: trait.Database.html#method.query_mut pub fn set_with_durability(&mut self, key: Q::Key, value: Q::Value, durability: Durability) @@ -637,40 +637,40 @@ where } } -/// A panic payload indicating that a salsa revision was canceled. +/// A panic payload indicating that a salsa revision was cancelled. #[derive(Debug)] #[non_exhaustive] -pub struct Canceled; +pub struct Cancelled; -impl Canceled { +impl Cancelled { fn throw() -> ! { // We use resume and not panic here to avoid running the panic // hook (that is, to avoid collecting and printing backtrace). std::panic::resume_unwind(Box::new(Self)); } - /// Runs `f`, and catches any salsa cancelation. - pub fn catch(f: F) -> Result + /// Runs `f`, and catches any salsa cancellation. + pub fn catch(f: F) -> Result where F: FnOnce() -> T + UnwindSafe, { match panic::catch_unwind(f) { Ok(t) => Ok(t), Err(payload) => match payload.downcast() { - Ok(canceled) => Err(*canceled), + Ok(cancelled) => Err(*cancelled), Err(payload) => panic::resume_unwind(payload), }, } } } -impl std::fmt::Display for Canceled { +impl std::fmt::Display for Cancelled { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("canceled") + f.write_str("cancelled") } } -impl std::error::Error for Canceled {} +impl std::error::Error for Cancelled {} // Re-export the procedural macros. #[allow(unused_imports)] diff --git a/src/runtime.rs b/src/runtime.rs index 0384ec7f..23b14622 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -1,6 +1,6 @@ use crate::plumbing::CycleDetected; use crate::revision::{AtomicRevision, Revision}; -use crate::{durability::Durability, Canceled}; +use crate::{durability::Durability, Cancelled}; use crate::{CycleError, Database, DatabaseKeyIndex, Event, EventKind}; use log::debug; use parking_lot::lock_api::{RawRwLock, RawRwLockRecursive}; @@ -40,7 +40,7 @@ pub struct Runtime { /// Shared state that is accessible via all runtimes. shared_state: Arc, - on_cancelation_check: Option>, + on_cancellation_check: Option>, } impl Default for Runtime { @@ -50,7 +50,7 @@ impl Default for Runtime { revision_guard: None, shared_state: Default::default(), local_state: Default::default(), - on_cancelation_check: None, + on_cancellation_check: None, } } } @@ -89,7 +89,7 @@ impl Runtime { revision_guard: Some(revision_guard), shared_state: self.shared_state.clone(), local_state: Default::default(), - on_cancelation_check: None, + on_cancellation_check: None, } } @@ -117,7 +117,7 @@ impl Runtime { /// Q and then (c) doing a sweep. /// /// **WARNING:** Just like an ordinary write, this method triggers - /// cancelation. If you invoke it while a snapshot exists, it + /// cancellation. If you invoke it while a snapshot exists, it /// will block until that snapshot is dropped -- if that snapshot /// is owned by the current thread, this could trigger deadlock. pub fn synthetic_write(&mut self, durability: Durability) { @@ -161,44 +161,44 @@ impl Runtime { self.shared_state.pending_revision.load() } - /// Starts unwinding the stack if the current revision is canceled. + /// Starts unwinding the stack if the current revision is cancelled. /// /// This method can be called by query implementations that perform /// potentially expensive computations, in order to speed up propagation of - /// cancelation. + /// cancellation. /// - /// Cancelation will automatically be triggered by salsa on any query + /// Cancellation will automatically be triggered by salsa on any query /// invocation. #[inline] - pub fn unwind_if_canceled(&self) { - if let Some(callback) = &self.on_cancelation_check { + pub fn unwind_if_cancelled(&self) { + if let Some(callback) = &self.on_cancellation_check { callback(); } let current_revision = self.current_revision(); let pending_revision = self.pending_revision(); debug!( - "unwind_if_canceled: current_revision={:?}, pending_revision={:?}", + "unwind_if_cancelled: current_revision={:?}, pending_revision={:?}", current_revision, pending_revision ); if pending_revision > current_revision { - self.unwind_canceled(); + self.unwind_cancelled(); } } #[cold] - fn unwind_canceled(&self) { + fn unwind_cancelled(&self) { self.report_untracked_read(); - Canceled::throw(); + Cancelled::throw(); } - /// Registers a callback to be invoked every time [`Runtime::unwind_if_canceled`] is called + /// Registers a callback to be invoked every time [`Runtime::unwind_if_cancelled`] is called /// (either automatically by salsa, or manually by user code). - pub fn set_cancelation_check_callback(&mut self, callback: F) + pub fn set_cancellation_check_callback(&mut self, callback: F) where F: Fn() + Send + RefUnwindSafe + 'static, { - self.on_cancelation_check = Some(Box::new(callback)); + self.on_cancellation_check = Some(Box::new(callback)); } /// Acquires the **global query write lock** (ensuring that no queries are @@ -207,7 +207,7 @@ impl Runtime { /// /// While we wait to acquire the global query write lock, this method will /// also increment `pending_revision_increments`, thus signalling to queries - /// that their results are "canceled" and they should abort as expeditiously + /// that their results are "cancelled" and they should abort as expeditiously /// as possible. /// /// The `op` closure should actually perform the writes needed. It is given @@ -234,7 +234,7 @@ impl Runtime { } // Set the `pending_revision` field so that people - // know current revision is canceled. + // know current revision is cancelled. let current_revision = self.shared_state.pending_revision.fetch_then_increment(); // To modify the revision, we need the lock. @@ -464,7 +464,7 @@ struct SharedState { /// This is typically equal to `revision` -- set to `revision+1` /// when a new revision is pending (which implies that the current - /// revision is canceled). + /// revision is cancelled). pending_revision: AtomicRevision, /// Stores the "last change" revision for values of each duration. @@ -738,7 +738,7 @@ impl RevisionGuard { // // This has the side-effect that we are responsible to ensure // that people contending for the write lock do not starve, - // but this is what we achieve via the cancelation mechanism. + // but this is what we achieve via the cancellation mechanism. // // (In particular, since we only ever have one "mutating // handle" to the database, the only contention for the global diff --git a/tests/parallel/cancelation.rs b/tests/parallel/cancellation.rs similarity index 66% rename from tests/parallel/cancelation.rs rename to tests/parallel/cancellation.rs index 664c80fc..ef70100e 100644 --- a/tests/parallel/cancelation.rs +++ b/tests/parallel/cancellation.rs @@ -1,11 +1,11 @@ -use crate::setup::{CancelationFlag, Knobs, ParDatabase, ParDatabaseImpl, WithValue}; -use salsa::{Canceled, ParallelDatabase}; +use crate::setup::{CancellationFlag, Knobs, ParDatabase, ParDatabaseImpl, WithValue}; +use salsa::{Cancelled, ParallelDatabase}; -macro_rules! assert_canceled { +macro_rules! assert_cancelled { ($thread:expr) => { match $thread.join() { - Ok(value) => panic!("expected cancelation, got {:?}", value), - Err(payload) => match payload.downcast::() { + Ok(value) => panic!("expected cancellation, got {:?}", value), + Err(payload) => match payload.downcast::() { Ok(_) => {} Err(payload) => ::std::panic::resume_unwind(payload), }, @@ -13,11 +13,11 @@ macro_rules! assert_canceled { }; } -/// Add test where a call to `sum` is canceled by a simultaneous +/// Add test where a call to `sum` is cancelled by a simultaneous /// write. Check that we recompute the result in next revision, even /// though none of the inputs have changed. #[test] -fn in_par_get_set_cancelation_immediate() { +fn in_par_get_set_cancellation_immediate() { let mut db = ParDatabaseImpl::default(); db.set_input('a', 100); @@ -28,12 +28,12 @@ fn in_par_get_set_cancelation_immediate() { let thread1 = std::thread::spawn({ let db = db.snapshot(); move || { - // This will not return until it sees cancelation is + // This will not return until it sees cancellation is // signaled. db.knobs().sum_signal_on_entry.with_value(1, || { db.knobs() - .sum_wait_for_cancelation - .with_value(CancelationFlag::Panic, || db.sum("abc")) + .sum_wait_for_cancellation + .with_value(CancellationFlag::Panic, || db.sum("abc")) }) } }); @@ -41,7 +41,7 @@ fn in_par_get_set_cancelation_immediate() { // Wait until we have entered `sum` in the other thread. db.wait_for(1); - // Try to set the input. This will signal cancelation. + // Try to set the input. This will signal cancellation. db.set_input('d', 1000); // This should re-compute the value (even though no input has changed). @@ -51,14 +51,14 @@ fn in_par_get_set_cancelation_immediate() { }); assert_eq!(db.sum("d"), 1000); - assert_canceled!(thread1); + assert_cancelled!(thread1); assert_eq!(thread2.join().unwrap(), 111); } -/// Here, we check that `sum`'s cancelation is propagated +/// Here, we check that `sum`'s cancellation is propagated /// to `sum2` properly. #[test] -fn in_par_get_set_cancelation_transitive() { +fn in_par_get_set_cancellation_transitive() { let mut db = ParDatabaseImpl::default(); db.set_input('a', 100); @@ -69,12 +69,12 @@ fn in_par_get_set_cancelation_transitive() { let thread1 = std::thread::spawn({ let db = db.snapshot(); move || { - // This will not return until it sees cancelation is + // This will not return until it sees cancellation is // signaled. db.knobs().sum_signal_on_entry.with_value(1, || { db.knobs() - .sum_wait_for_cancelation - .with_value(CancelationFlag::Panic, || db.sum2("abc")) + .sum_wait_for_cancellation + .with_value(CancellationFlag::Panic, || db.sum2("abc")) }) } }); @@ -82,7 +82,7 @@ fn in_par_get_set_cancelation_transitive() { // Wait until we have entered `sum` in the other thread. db.wait_for(1); - // Try to set the input. This will signal cancelation. + // Try to set the input. This will signal cancellation. db.set_input('d', 1000); // This should re-compute the value (even though no input has changed). @@ -92,13 +92,13 @@ fn in_par_get_set_cancelation_transitive() { }); assert_eq!(db.sum2("d"), 1000); - assert_canceled!(thread1); + assert_cancelled!(thread1); assert_eq!(thread2.join().unwrap(), 111); } /// https://github.com/salsa-rs/salsa/issues/66 #[test] -fn no_back_dating_in_cancelation() { +fn no_back_dating_in_cancellation() { let mut db = ParDatabaseImpl::default(); db.set_input('a', 1); @@ -106,11 +106,11 @@ fn no_back_dating_in_cancelation() { let db = db.snapshot(); move || { // Here we compute a long-chain of queries, - // but the last one gets canceled. + // but the last one gets cancelled. db.knobs().sum_signal_on_entry.with_value(1, || { db.knobs() - .sum_wait_for_cancelation - .with_value(CancelationFlag::Panic, || db.sum3("a")) + .sum_wait_for_cancellation + .with_value(CancellationFlag::Panic, || db.sum3("a")) }) } }); @@ -120,11 +120,11 @@ fn no_back_dating_in_cancelation() { // Set unrelated input to bump revision db.set_input('b', 2); - // Here we should recompuet the whole chain again, clearing the cancelation + // Here we should recompuet the whole chain again, clearing the cancellation // state. If we get `usize::max()` here, it is a bug! assert_eq!(db.sum3("a"), 1); - assert_canceled!(thread1); + assert_cancelled!(thread1); db.set_input('a', 3); db.set_input('a', 4); diff --git a/tests/parallel/frozen.rs b/tests/parallel/frozen.rs index 6859e4c1..677ca835 100644 --- a/tests/parallel/frozen.rs +++ b/tests/parallel/frozen.rs @@ -6,11 +6,11 @@ use std::{ sync::Arc, }; -/// Add test where a call to `sum` is canceled by a simultaneous +/// Add test where a call to `sum` is cancelled by a simultaneous /// write. Check that we recompute the result in next revision, even /// though none of the inputs have changed. #[test] -fn in_par_get_set_cancelation() { +fn in_par_get_set_cancellation() { let mut db = ParDatabaseImpl::default(); db.set_input('a', 1); @@ -21,16 +21,19 @@ fn in_par_get_set_cancelation() { let db = db.snapshot(); let signal = signal.clone(); move || { - // Check that cancelation flag is not yet set, because + // Check that cancellation flag is not yet set, because // `set` cannot have been called yet. - catch_unwind(AssertUnwindSafe(|| db.salsa_runtime().unwind_if_canceled())).unwrap(); + catch_unwind(AssertUnwindSafe(|| { + db.salsa_runtime().unwind_if_cancelled() + })) + .unwrap(); // Signal other thread to proceed. signal.signal(1); - // Wait for other thread to signal cancelation + // Wait for other thread to signal cancellation catch_unwind(AssertUnwindSafe(|| loop { - db.salsa_runtime().unwind_if_canceled(); + db.salsa_runtime().unwind_if_cancelled(); std::thread::yield_now(); })) .unwrap_err(); @@ -40,7 +43,7 @@ fn in_par_get_set_cancelation() { let thread2 = std::thread::spawn({ let signal = signal.clone(); move || { - // Wait until thread 1 has asserted that they are not canceled + // Wait until thread 1 has asserted that they are not cancelled // before we invoke `set.` signal.wait_for(1); diff --git a/tests/parallel/main.rs b/tests/parallel/main.rs index 7d9a718f..9c6e5360 100644 --- a/tests/parallel/main.rs +++ b/tests/parallel/main.rs @@ -1,6 +1,6 @@ mod setup; -mod cancelation; +mod cancellation; mod frozen; mod independent; mod race; diff --git a/tests/parallel/race.rs b/tests/parallel/race.rs index 36899faf..914d6d81 100644 --- a/tests/parallel/race.rs +++ b/tests/parallel/race.rs @@ -1,7 +1,7 @@ use std::panic::AssertUnwindSafe; use crate::setup::{ParDatabase, ParDatabaseImpl}; -use salsa::{Canceled, ParallelDatabase}; +use salsa::{Cancelled, ParallelDatabase}; /// Test where a read and a set are racing with one another. /// Should be atomic. @@ -16,7 +16,7 @@ fn in_par_get_set_race() { let thread1 = std::thread::spawn({ let db = db.snapshot(); move || { - Canceled::catch(AssertUnwindSafe(|| { + Cancelled::catch(AssertUnwindSafe(|| { let v = db.sum("abc"); v })) @@ -30,13 +30,13 @@ fn in_par_get_set_race() { // If the 1st thread runs first, you get 111, otherwise you get // 1011; if they run concurrently and the 1st thread observes the - // cancelation, it'll unwind. + // cancellation, it'll unwind. let result1 = thread1.join().unwrap(); if let Ok(value1) = result1 { assert!(value1 == 111 || value1 == 1011, "illegal result {}", value1); } - // thread2 can not observe a cancelation because it performs a + // thread2 can not observe a cancellation because it performs a // database write before running any other queries. assert_eq!(thread2.join().unwrap(), 1000); } diff --git a/tests/parallel/setup.rs b/tests/parallel/setup.rs index c70a4807..78ef1ff0 100644 --- a/tests/parallel/setup.rs +++ b/tests/parallel/setup.rs @@ -58,14 +58,14 @@ impl WithValue for Cell { } #[derive(Clone, Copy, PartialEq, Eq)] -pub(crate) enum CancelationFlag { +pub(crate) enum CancellationFlag { Down, Panic, } -impl Default for CancelationFlag { - fn default() -> CancelationFlag { - CancelationFlag::Down +impl Default for CancellationFlag { + fn default() -> CancellationFlag { + CancellationFlag::Down } } @@ -90,9 +90,9 @@ pub(crate) struct KnobsStruct { /// If true, invocations of `sum` will panic before they exit. pub(crate) sum_should_panic: Cell, - /// If true, invocations of `sum` will wait for cancelation before + /// If true, invocations of `sum` will wait for cancellation before /// they exit. - pub(crate) sum_wait_for_cancelation: Cell, + pub(crate) sum_wait_for_cancellation: Cell, /// Invocations of `sum` will wait for this stage prior to exiting. pub(crate) sum_wait_for_on_exit: Cell, @@ -119,12 +119,12 @@ fn sum(db: &dyn ParDatabase, key: &'static str) -> usize { sum += db.input(ch); } - match db.knobs().sum_wait_for_cancelation.get() { - CancelationFlag::Down => (), - CancelationFlag::Panic => { - log::debug!("waiting for cancelation"); + match db.knobs().sum_wait_for_cancellation.get() { + CancellationFlag::Down => (), + CancellationFlag::Panic => { + log::debug!("waiting for cancellation"); loop { - db.salsa_runtime().unwind_if_canceled(); + db.salsa_runtime().unwind_if_cancelled(); std::thread::yield_now(); } } diff --git a/tests/parallel/stress.rs b/tests/parallel/stress.rs index 1c61b9de..fea9e3a6 100644 --- a/tests/parallel/stress.rs +++ b/tests/parallel/stress.rs @@ -4,7 +4,7 @@ use rand::Rng; use salsa::ParallelDatabase; use salsa::Snapshot; use salsa::SweepStrategy; -use salsa::{Canceled, Database}; +use salsa::{Cancelled, Database}; // Number of operations a reader performs const N_MUTATOR_OPS: usize = 100; @@ -21,7 +21,7 @@ trait StressDatabase: salsa::Database { } fn b(db: &dyn StressDatabase, key: usize) -> usize { - db.salsa_runtime().unwind_if_canceled(); + db.salsa_runtime().unwind_if_cancelled(); db.a(key) } @@ -56,7 +56,7 @@ enum MutatorOp { WriteOp(WriteOp), LaunchReader { ops: Vec, - check_cancelation: bool, + check_cancellation: bool, }, } @@ -85,7 +85,7 @@ impl rand::distributions::Distribution for rand::distributions::Stand } else { MutatorOp::LaunchReader { ops: (0..N_READER_OPS).map(|_| rng.gen()).collect(), - check_cancelation: rng.gen(), + check_cancellation: rng.gen(), } } } @@ -118,10 +118,10 @@ impl rand::distributions::Distribution for rand::distributions::Standard } } -fn db_reader_thread(db: &StressDatabaseImpl, ops: Vec, check_cancelation: bool) { +fn db_reader_thread(db: &StressDatabaseImpl, ops: Vec, check_cancellation: bool) { for op in ops { - if check_cancelation { - db.salsa_runtime().unwind_if_canceled(); + if check_cancellation { + db.salsa_runtime().unwind_if_cancelled(); } op.execute(db); } @@ -188,10 +188,10 @@ fn stress_test() { MutatorOp::WriteOp(w) => w.execute(&mut db), MutatorOp::LaunchReader { ops, - check_cancelation, + check_cancellation, } => all_threads.push(std::thread::spawn({ let db = db.snapshot(); - move || Canceled::catch(|| db_reader_thread(&db, ops, check_cancelation)) + move || Cancelled::catch(|| db_reader_thread(&db, ops, check_cancellation)) })), } }