mirror of
https://github.com/salsa-rs/salsa.git
synced 2024-11-25 04:27:52 +00:00
Merge pull request #527 from nikomatsakis/spindle
Some checks failed
Book / Book (push) Has been cancelled
Test / Test (false, beta) (push) Has been cancelled
Test / Test (false, stable) (push) Has been cancelled
Test / Test (true, nightly) (push) Has been cancelled
Test / Miri (push) Has been cancelled
Book / Deploy (push) Has been cancelled
Some checks failed
Book / Book (push) Has been cancelled
Test / Test (false, beta) (push) Has been cancelled
Test / Test (false, stable) (push) Has been cancelled
Test / Test (true, nightly) (push) Has been cancelled
Test / Miri (push) Has been cancelled
Book / Deploy (push) Has been cancelled
re-enable parallel tests
This commit is contained in:
commit
e4ce917f6e
49 changed files with 1126 additions and 1478 deletions
|
@ -35,7 +35,8 @@ macro_rules! setup_accumulator_impl {
|
|||
where
|
||||
Db: ?Sized + $zalsa::Database,
|
||||
{
|
||||
$ingredient(db.as_salsa_database()).push(db.runtime(), self);
|
||||
let db = db.as_salsa_database();
|
||||
$ingredient(db).push(db, self);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
@ -137,8 +137,11 @@ macro_rules! setup_input_struct {
|
|||
// FIXME(rust-lang/rust#65991): The `db` argument *should* have the type `dyn Database`
|
||||
$Db: ?Sized + $zalsa::Database,
|
||||
{
|
||||
let runtime = db.runtime();
|
||||
let fields = $Configuration::ingredient(db.as_salsa_database()).field(runtime, self, $field_index);
|
||||
let fields = $Configuration::ingredient(db.as_salsa_database()).field(
|
||||
db.as_salsa_database(),
|
||||
self,
|
||||
$field_index,
|
||||
);
|
||||
$zalsa::maybe_clone!(
|
||||
$field_option,
|
||||
$field_ty,
|
||||
|
|
|
@ -134,9 +134,8 @@ macro_rules! setup_interned_struct {
|
|||
// FIXME(rust-lang/rust#65991): The `db` argument *should* have the type `dyn Database`
|
||||
$Db: ?Sized + salsa::Database,
|
||||
{
|
||||
let runtime = db.runtime();
|
||||
let current_revision = $zalsa::current_revision(db);
|
||||
$Configuration::ingredient(db).intern(runtime, ($($field_id,)*))
|
||||
$Configuration::ingredient(db).intern(db.as_salsa_database(), ($($field_id,)*))
|
||||
}
|
||||
|
||||
$(
|
||||
|
|
|
@ -233,7 +233,7 @@ macro_rules! setup_tracked_fn {
|
|||
use salsa::plumbing as $zalsa;
|
||||
let key = $zalsa::macro_if! {
|
||||
if $needs_interner {
|
||||
$Configuration::intern_ingredient($db).intern_id($db.runtime(), ($($input_id),*))
|
||||
$Configuration::intern_ingredient($db).intern_id($db.as_salsa_database(), ($($input_id),*))
|
||||
} else {
|
||||
$zalsa::AsId::as_id(&($($input_id),*))
|
||||
}
|
||||
|
@ -265,11 +265,10 @@ macro_rules! setup_tracked_fn {
|
|||
} }
|
||||
}
|
||||
|
||||
$zalsa::attach_database($db, || {
|
||||
let result = $zalsa::macro_if! {
|
||||
if $needs_interner {
|
||||
{
|
||||
let key = $Configuration::intern_ingredient($db).intern_id($db.runtime(), ($($input_id),*));
|
||||
let key = $Configuration::intern_ingredient($db).intern_id($db.as_salsa_database(), ($($input_id),*));
|
||||
$Configuration::fn_ingredient($db).fetch($db, key)
|
||||
}
|
||||
} else {
|
||||
|
@ -284,7 +283,6 @@ macro_rules! setup_tracked_fn {
|
|||
<$output_ty as std::clone::Clone>::clone(result)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -193,7 +193,7 @@ macro_rules! setup_tracked_struct {
|
|||
$Db: ?Sized + $zalsa::Database,
|
||||
{
|
||||
$Configuration::ingredient(db.as_salsa_database()).new_struct(
|
||||
db.runtime(),
|
||||
db.as_salsa_database(),
|
||||
($($field_id,)*)
|
||||
)
|
||||
}
|
||||
|
@ -204,8 +204,7 @@ macro_rules! setup_tracked_struct {
|
|||
// FIXME(rust-lang/rust#65991): The `db` argument *should* have the type `dyn Database`
|
||||
$Db: ?Sized + $zalsa::Database,
|
||||
{
|
||||
let runtime = db.runtime();
|
||||
let fields = unsafe { self.0.as_ref() }.field(runtime, $field_index);
|
||||
let fields = unsafe { self.0.as_ref() }.field(db.as_salsa_database(), $field_index);
|
||||
$crate::maybe_clone!(
|
||||
$field_option,
|
||||
$field_ty,
|
||||
|
|
|
@ -268,8 +268,8 @@ fn fix_bad_variable_in_function() {
|
|||
"#]],
|
||||
expect![[r#"
|
||||
[
|
||||
"Event: Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: parse_statements(0) } }",
|
||||
"Event: Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: type_check_function(0) } }",
|
||||
"Event: Event { thread_id: ThreadId(11), kind: WillExecute { database_key: parse_statements(0) } }",
|
||||
"Event: Event { thread_id: ThreadId(11), kind: WillExecute { database_key: type_check_function(0) } }",
|
||||
]
|
||||
"#]],
|
||||
)],
|
||||
|
|
|
@ -10,9 +10,9 @@ use crate::{
|
|||
hash::FxDashMap,
|
||||
ingredient::{fmt_index, Ingredient, Jar},
|
||||
key::DependencyIndex,
|
||||
runtime::local_state::QueryOrigin,
|
||||
local_state::{self, LocalState, QueryOrigin},
|
||||
storage::IngredientIndex,
|
||||
Database, DatabaseKeyIndex, Event, EventKind, Id, Revision, Runtime,
|
||||
Database, DatabaseKeyIndex, Event, EventKind, Id, Revision,
|
||||
};
|
||||
|
||||
pub trait Accumulator: Clone + Debug + Send + Sync + 'static + Sized {
|
||||
|
@ -78,16 +78,19 @@ impl<A: Accumulator> IngredientImpl<A> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn push(&self, runtime: &Runtime, value: A) {
|
||||
pub fn push(&self, db: &dyn crate::Database, value: A) {
|
||||
local_state::attach(db, |state| {
|
||||
let runtime = db.runtime();
|
||||
let current_revision = runtime.current_revision();
|
||||
let (active_query, _) = match runtime.active_query() {
|
||||
let (active_query, _) = match state.active_query() {
|
||||
Some(pair) => pair,
|
||||
None => {
|
||||
panic!("cannot accumulate values outside of an active query")
|
||||
}
|
||||
};
|
||||
|
||||
let mut accumulated_values = self.map.entry(active_query).or_insert(AccumulatedValues {
|
||||
let mut accumulated_values =
|
||||
self.map.entry(active_query).or_insert(AccumulatedValues {
|
||||
values: vec![],
|
||||
produced_at: current_revision,
|
||||
});
|
||||
|
@ -95,27 +98,28 @@ impl<A: Accumulator> IngredientImpl<A> {
|
|||
// When we call `push' in a query, we will add the accumulator to the output of the query.
|
||||
// If we find here that this accumulator is not the output of the query,
|
||||
// we can say that the accumulated values we stored for this query is out of date.
|
||||
if !runtime.is_output_of_active_query(self.dependency_index()) {
|
||||
if !state.is_output_of_active_query(self.dependency_index()) {
|
||||
accumulated_values.values.truncate(0);
|
||||
accumulated_values.produced_at = current_revision;
|
||||
}
|
||||
|
||||
runtime.add_output(self.dependency_index());
|
||||
state.add_output(self.dependency_index());
|
||||
accumulated_values.values.push(value);
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn produced_by(
|
||||
&self,
|
||||
runtime: &Runtime,
|
||||
current_revision: Revision,
|
||||
local_state: &LocalState,
|
||||
query: DatabaseKeyIndex,
|
||||
output: &mut Vec<A>,
|
||||
) {
|
||||
let current_revision = runtime.current_revision();
|
||||
if let Some(v) = self.map.get(&query) {
|
||||
// FIXME: We don't currently have a good way to identify the value that was read.
|
||||
// You can't report is as a tracked read of `query`, because the return value of query is not being read here --
|
||||
// instead it is the set of values accumuated by `query`.
|
||||
runtime.report_untracked_read();
|
||||
local_state.report_untracked_read(current_revision);
|
||||
|
||||
let AccumulatedValues {
|
||||
values,
|
||||
|
@ -174,7 +178,7 @@ impl<A: Accumulator> Ingredient for IngredientImpl<A> {
|
|||
assert!(stale_output_key.is_none());
|
||||
if self.map.remove(&executor).is_some() {
|
||||
db.salsa_event(Event {
|
||||
runtime_id: db.runtime().id(),
|
||||
thread_id: std::thread::current().id(),
|
||||
kind: EventKind::DidDiscardAccumulated {
|
||||
executor_key: executor,
|
||||
accumulator: self.dependency_index(),
|
||||
|
@ -198,6 +202,10 @@ impl<A: Accumulator> Ingredient for IngredientImpl<A> {
|
|||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(A::DEBUG_NAME, index, fmt)
|
||||
}
|
||||
|
||||
fn debug_name(&self) -> &'static str {
|
||||
A::DEBUG_NAME
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> std::fmt::Debug for IngredientImpl<A>
|
||||
|
|
|
@ -9,16 +9,16 @@ use crate::{
|
|||
use super::local_state::{EdgeKind, QueryEdges, QueryOrigin, QueryRevisions};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct ActiveQuery {
|
||||
pub(crate) struct ActiveQuery {
|
||||
/// What query is executing
|
||||
pub(super) database_key_index: DatabaseKeyIndex,
|
||||
pub(crate) database_key_index: DatabaseKeyIndex,
|
||||
|
||||
/// Minimum durability of inputs observed so far.
|
||||
pub(super) durability: Durability,
|
||||
pub(crate) durability: Durability,
|
||||
|
||||
/// Maximum revision of all inputs observed. If we observe an
|
||||
/// untracked read, this will be set to the most recent revision.
|
||||
pub(super) changed_at: Revision,
|
||||
pub(crate) changed_at: Revision,
|
||||
|
||||
/// Inputs: Set of subqueries that were accessed thus far.
|
||||
/// Outputs: Tracks values written by this query. Could be...
|
||||
|
@ -26,38 +26,18 @@ pub(super) struct ActiveQuery {
|
|||
/// * tracked structs created
|
||||
/// * invocations of `specify`
|
||||
/// * accumulators pushed to
|
||||
pub(super) input_outputs: FxIndexSet<(EdgeKind, DependencyIndex)>,
|
||||
input_outputs: FxIndexSet<(EdgeKind, DependencyIndex)>,
|
||||
|
||||
/// True if there was an untracked read.
|
||||
pub(super) untracked_read: bool,
|
||||
untracked_read: bool,
|
||||
|
||||
/// Stores the entire cycle, if one is found and this query is part of it.
|
||||
pub(super) cycle: Option<Cycle>,
|
||||
pub(crate) cycle: Option<Cycle>,
|
||||
|
||||
/// When new entities are created, their data is hashed, and the resulting
|
||||
/// hash is added to this map. If it is not present, then the disambiguator is 0.
|
||||
/// Otherwise it is 1 more than the current value (which is incremented).
|
||||
pub(super) disambiguator_map: FxIndexMap<u64, Disambiguator>,
|
||||
}
|
||||
|
||||
pub(super) struct SavedQueryState {
|
||||
database_key_index: DatabaseKeyIndex,
|
||||
durability: Durability,
|
||||
changed_at: Revision,
|
||||
input_outputs_len: usize,
|
||||
untracked_read: bool,
|
||||
}
|
||||
|
||||
impl SavedQueryState {
|
||||
fn new(query: &ActiveQuery) -> Self {
|
||||
Self {
|
||||
database_key_index: query.database_key_index,
|
||||
durability: query.durability,
|
||||
changed_at: query.changed_at,
|
||||
input_outputs_len: query.input_outputs.len(),
|
||||
untracked_read: query.untracked_read,
|
||||
}
|
||||
}
|
||||
disambiguator_map: FxIndexMap<u64, Disambiguator>,
|
||||
}
|
||||
|
||||
impl ActiveQuery {
|
||||
|
@ -73,26 +53,6 @@ impl ActiveQuery {
|
|||
}
|
||||
}
|
||||
|
||||
pub(super) fn save_query_state(&self) -> SavedQueryState {
|
||||
SavedQueryState::new(self)
|
||||
}
|
||||
|
||||
pub(super) fn restore_query_state(&mut self, state: SavedQueryState) {
|
||||
assert_eq!(self.database_key_index, state.database_key_index);
|
||||
|
||||
assert!(self.durability <= state.durability);
|
||||
self.durability = state.durability;
|
||||
|
||||
assert!(self.changed_at >= state.changed_at);
|
||||
self.changed_at = state.changed_at;
|
||||
|
||||
assert!(self.input_outputs.len() >= state.input_outputs_len);
|
||||
self.input_outputs.truncate(state.input_outputs_len);
|
||||
|
||||
assert!(self.untracked_read >= state.untracked_read);
|
||||
self.untracked_read = state.untracked_read;
|
||||
}
|
||||
|
||||
pub(super) fn add_read(
|
||||
&mut self,
|
||||
input: DependencyIndex,
|
|
@ -1,4 +1,4 @@
|
|||
use crate::{database, key::DatabaseKeyIndex, Database};
|
||||
use crate::{key::DatabaseKeyIndex, local_state, Database};
|
||||
use std::{panic::AssertUnwindSafe, sync::Arc};
|
||||
|
||||
/// Captures the participants of a cycle that occurred when executing a query.
|
||||
|
@ -74,7 +74,7 @@ impl Cycle {
|
|||
|
||||
impl std::fmt::Debug for Cycle {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
database::with_attached_database(|db| {
|
||||
local_state::with_attached_database(|db| {
|
||||
f.debug_struct("UnexpectedCycle")
|
||||
.field("all_participants", &self.all_participants(db))
|
||||
.field("unexpected_participants", &self.unexpected_participants(db))
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
use std::{cell::Cell, ptr::NonNull};
|
||||
|
||||
use crate::{storage::DatabaseGen, Durability, Event, Revision};
|
||||
use crate::{local_state, storage::DatabaseGen, Durability, Event, Revision};
|
||||
|
||||
#[salsa_macros::db]
|
||||
pub trait Database: DatabaseGen {
|
||||
|
@ -31,7 +29,10 @@ pub trait Database: DatabaseGen {
|
|||
/// Queries which report untracked reads will be re-executed in the next
|
||||
/// revision.
|
||||
fn report_untracked_read(&self) {
|
||||
self.runtime().report_untracked_read();
|
||||
let db = self.as_salsa_database();
|
||||
local_state::attach(db, |state| {
|
||||
state.report_untracked_read(db.runtime().current_revision())
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute `op` with the database in thread-local storage for debug print-outs.
|
||||
|
@ -39,73 +40,7 @@ pub trait Database: DatabaseGen {
|
|||
where
|
||||
Self: Sized,
|
||||
{
|
||||
attach_database(self, || op(self))
|
||||
}
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
static DATABASE: Cell<AttachedDatabase> = const { Cell::new(AttachedDatabase::null()) };
|
||||
}
|
||||
|
||||
/// Access the "attached" database. Returns `None` if no database is attached.
|
||||
/// Databases are attached with `attach_database`.
|
||||
pub fn with_attached_database<R>(op: impl FnOnce(&dyn Database) -> R) -> Option<R> {
|
||||
// SAFETY: We always attach the database in for the entire duration of a function,
|
||||
// so it cannot become "unattached" while this function is running.
|
||||
let db = DATABASE.get();
|
||||
Some(op(unsafe { db.ptr?.as_ref() }))
|
||||
}
|
||||
|
||||
/// Attach database and returns a guard that will un-attach the database when dropped.
|
||||
/// Has no effect if a database is already attached.
|
||||
pub fn attach_database<Db: ?Sized + Database, R>(db: &Db, op: impl FnOnce() -> R) -> R {
|
||||
let _guard = AttachedDb::new(db);
|
||||
op()
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
struct AttachedDatabase {
|
||||
ptr: Option<NonNull<dyn Database>>,
|
||||
}
|
||||
|
||||
impl AttachedDatabase {
|
||||
pub const fn null() -> Self {
|
||||
Self { ptr: None }
|
||||
}
|
||||
|
||||
pub fn from<Db: ?Sized + Database>(db: &Db) -> Self {
|
||||
unsafe {
|
||||
let db: *const dyn Database = db.as_salsa_database();
|
||||
Self {
|
||||
ptr: Some(NonNull::new_unchecked(db as *mut dyn Database)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct AttachedDb<'db, Db: ?Sized + Database> {
|
||||
db: &'db Db,
|
||||
previous: AttachedDatabase,
|
||||
}
|
||||
|
||||
impl<'db, Db: ?Sized + Database> AttachedDb<'db, Db> {
|
||||
pub fn new(db: &'db Db) -> Self {
|
||||
let previous = DATABASE.replace(AttachedDatabase::from(db));
|
||||
AttachedDb { db, previous }
|
||||
}
|
||||
}
|
||||
|
||||
impl<Db: ?Sized + Database> Drop for AttachedDb<'_, Db> {
|
||||
fn drop(&mut self) {
|
||||
DATABASE.set(self.previous);
|
||||
}
|
||||
}
|
||||
|
||||
impl<Db: ?Sized + Database> std::ops::Deref for AttachedDb<'_, Db> {
|
||||
type Target = Db;
|
||||
|
||||
fn deref(&self) -> &Db {
|
||||
self.db
|
||||
local_state::attach(self, |_state| op(self))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
23
src/event.rs
23
src/event.rs
|
@ -1,13 +1,14 @@
|
|||
use crate::{key::DatabaseKeyIndex, key::DependencyIndex, runtime::RuntimeId};
|
||||
use std::thread::ThreadId;
|
||||
|
||||
use crate::{key::DatabaseKeyIndex, key::DependencyIndex};
|
||||
|
||||
/// The `Event` struct identifies various notable things that can
|
||||
/// occur during salsa execution. Instances of this struct are given
|
||||
/// to `salsa_event`.
|
||||
#[derive(Debug)]
|
||||
pub struct Event {
|
||||
/// The id of the snapshot that triggered the event. Usually
|
||||
/// 1-to-1 with a thread, as well.
|
||||
pub runtime_id: RuntimeId,
|
||||
/// The id of the thread that triggered the event.
|
||||
pub thread_id: ThreadId,
|
||||
|
||||
/// What sort of event was it.
|
||||
pub kind: EventKind,
|
||||
|
@ -26,18 +27,15 @@ pub enum EventKind {
|
|||
database_key: DatabaseKeyIndex,
|
||||
},
|
||||
|
||||
/// Indicates that another thread (with id `other_runtime_id`) is processing the
|
||||
/// Indicates that another thread (with id `other_thread_id`) is processing the
|
||||
/// given query (`database_key`), so we will block until they
|
||||
/// finish.
|
||||
///
|
||||
/// Executes after we have registered with the other thread but
|
||||
/// before they have answered us.
|
||||
///
|
||||
/// (NB: you can find the `id` of the current thread via the
|
||||
/// `runtime`)
|
||||
WillBlockOn {
|
||||
/// The id of the runtime we will block on.
|
||||
other_runtime_id: RuntimeId,
|
||||
/// The id of the thread we will block on.
|
||||
other_thread_id: ThreadId,
|
||||
|
||||
/// The database-key for the affected value. Implements `Debug`.
|
||||
database_key: DatabaseKeyIndex,
|
||||
|
@ -55,6 +53,11 @@ pub enum EventKind {
|
|||
/// the current revision has been cancelled.
|
||||
WillCheckCancellation,
|
||||
|
||||
/// Indicates that one [`Handle`](`crate::Handle`) has set the cancellation flag.
|
||||
/// When other active handles execute salsa methods, they will observe this flag
|
||||
/// and panic with a sentinel value of type [`Cancelled`](`crate::Cancelled`).
|
||||
DidSetCancellationFlag,
|
||||
|
||||
/// Discovered that a query used to output a given output but no longer does.
|
||||
WillDiscardStaleOutput {
|
||||
/// Key for the query that is executing and which no longer outputs the given value.
|
||||
|
|
|
@ -6,7 +6,7 @@ use crate::{
|
|||
cycle::CycleRecoveryStrategy,
|
||||
ingredient::fmt_index,
|
||||
key::DatabaseKeyIndex,
|
||||
runtime::local_state::QueryOrigin,
|
||||
local_state::QueryOrigin,
|
||||
salsa_struct::SalsaStructInDb,
|
||||
storage::{DatabaseGen, IngredientIndex},
|
||||
Cycle, Database, Event, EventKind, Id, Revision,
|
||||
|
@ -269,7 +269,7 @@ where
|
|||
if let Some(origin) = self.delete_memo(id) {
|
||||
let key = self.database_key_index(id);
|
||||
db.salsa_event(Event {
|
||||
runtime_id: db.runtime().id(),
|
||||
thread_id: std::thread::current().id(),
|
||||
kind: EventKind::DidDiscard { key },
|
||||
});
|
||||
|
||||
|
@ -285,6 +285,10 @@ where
|
|||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(C::DEBUG_NAME, index, fmt)
|
||||
}
|
||||
|
||||
fn debug_name(&self) -> &'static str {
|
||||
C::DEBUG_NAME
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> std::fmt::Debug for IngredientImpl<C>
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
use crate::{accumulator, hash::FxHashSet, storage::DatabaseGen, DatabaseKeyIndex, Id};
|
||||
use crate::{
|
||||
accumulator, hash::FxHashSet, local_state, storage::DatabaseGen, DatabaseKeyIndex, Id,
|
||||
};
|
||||
|
||||
use super::{Configuration, IngredientImpl};
|
||||
|
||||
|
@ -12,10 +14,12 @@ where
|
|||
where
|
||||
A: accumulator::Accumulator,
|
||||
{
|
||||
local_state::attach(db, |local_state| {
|
||||
let current_revision = db.runtime().current_revision();
|
||||
|
||||
let Some(accumulator) = <accumulator::IngredientImpl<A>>::from_db(db) else {
|
||||
return vec![];
|
||||
};
|
||||
let runtime = db.runtime();
|
||||
let mut output = vec![];
|
||||
|
||||
// First ensure the result is up to date
|
||||
|
@ -27,7 +31,7 @@ where
|
|||
|
||||
while let Some(k) = stack.pop() {
|
||||
if visited.insert(k) {
|
||||
accumulator.produced_by(runtime, k, &mut output);
|
||||
accumulator.produced_by(current_revision, local_state, k, &mut output);
|
||||
|
||||
let origin = db.lookup_ingredient(k.ingredient_index).origin(k.key_index);
|
||||
let inputs = origin.iter().flat_map(|origin| origin.inputs());
|
||||
|
@ -36,12 +40,15 @@ where
|
|||
// from the stack.
|
||||
stack.extend(
|
||||
inputs
|
||||
.flat_map(|input| TryInto::<DatabaseKeyIndex>::try_into(input).into_iter())
|
||||
.flat_map(|input| {
|
||||
TryInto::<DatabaseKeyIndex>::try_into(input).into_iter()
|
||||
})
|
||||
.rev(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
output
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::runtime::local_state::QueryRevisions;
|
||||
use crate::local_state::QueryRevisions;
|
||||
|
||||
use super::{memo::Memo, Configuration, IngredientImpl};
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use arc_swap::ArcSwap;
|
||||
use crossbeam::queue::SegQueue;
|
||||
|
||||
use crate::{runtime::local_state::QueryOrigin, Id};
|
||||
use crate::{local_state::QueryOrigin, Id};
|
||||
|
||||
use super::{memo, Configuration, IngredientImpl};
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use crate::{
|
||||
hash::FxHashSet, key::DependencyIndex, runtime::local_state::QueryRevisions,
|
||||
storage::DatabaseGen, Database, DatabaseKeyIndex, Event, EventKind,
|
||||
hash::FxHashSet, key::DependencyIndex, local_state::QueryRevisions, storage::DatabaseGen,
|
||||
Database, DatabaseKeyIndex, Event, EventKind,
|
||||
};
|
||||
|
||||
use super::{memo::Memo, Configuration, IngredientImpl};
|
||||
|
@ -38,9 +38,8 @@ where
|
|||
}
|
||||
|
||||
fn report_stale_output(db: &C::DbView, key: DatabaseKeyIndex, output: DependencyIndex) {
|
||||
let runtime_id = db.runtime().id();
|
||||
db.salsa_event(Event {
|
||||
runtime_id,
|
||||
thread_id: std::thread::current().id(),
|
||||
kind: EventKind::WillDiscardStaleOutput {
|
||||
execute_key: key,
|
||||
output_key: output,
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use crate::{
|
||||
runtime::{local_state::ActiveQueryGuard, StampedValue},
|
||||
storage::DatabaseGen,
|
||||
Cycle, Database, Event, EventKind,
|
||||
local_state::ActiveQueryGuard, runtime::StampedValue, storage::DatabaseGen, Cycle, Database,
|
||||
Event, EventKind,
|
||||
};
|
||||
|
||||
use super::{memo::Memo, Configuration, IngredientImpl};
|
||||
|
@ -34,7 +33,7 @@ where
|
|||
tracing::info!("{:?}: executing query", database_key_index);
|
||||
|
||||
db.salsa_event(Event {
|
||||
runtime_id: runtime.id(),
|
||||
thread_id: std::thread::current().id(),
|
||||
kind: EventKind::WillExecute {
|
||||
database_key: database_key_index,
|
||||
},
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
use arc_swap::Guard;
|
||||
|
||||
use crate::{runtime::StampedValue, storage::DatabaseGen, Id};
|
||||
use crate::{
|
||||
local_state::{self, LocalState},
|
||||
runtime::StampedValue,
|
||||
storage::DatabaseGen,
|
||||
Id,
|
||||
};
|
||||
|
||||
use super::{Configuration, IngredientImpl};
|
||||
|
||||
|
@ -9,37 +14,41 @@ where
|
|||
C: Configuration,
|
||||
{
|
||||
pub fn fetch<'db>(&'db self, db: &'db C::DbView, key: Id) -> &C::Output<'db> {
|
||||
let runtime = db.runtime();
|
||||
|
||||
runtime.unwind_if_revision_cancelled(db);
|
||||
local_state::attach(db.as_salsa_database(), |local_state| {
|
||||
local_state.unwind_if_revision_cancelled(db.as_salsa_database());
|
||||
|
||||
let StampedValue {
|
||||
value,
|
||||
durability,
|
||||
changed_at,
|
||||
} = self.compute_value(db, key);
|
||||
} = self.compute_value(db, local_state, key);
|
||||
|
||||
if let Some(evicted) = self.lru.record_use(key) {
|
||||
self.evict(evicted);
|
||||
}
|
||||
|
||||
db.runtime().report_tracked_read(
|
||||
local_state.report_tracked_read(
|
||||
self.database_key_index(key).into(),
|
||||
durability,
|
||||
changed_at,
|
||||
);
|
||||
|
||||
value
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn compute_value<'db>(
|
||||
&'db self,
|
||||
db: &'db C::DbView,
|
||||
local_state: &LocalState,
|
||||
key: Id,
|
||||
) -> StampedValue<&'db C::Output<'db>> {
|
||||
loop {
|
||||
if let Some(value) = self.fetch_hot(db, key).or_else(|| self.fetch_cold(db, key)) {
|
||||
if let Some(value) = self
|
||||
.fetch_hot(db, key)
|
||||
.or_else(|| self.fetch_cold(db, local_state, key))
|
||||
{
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
@ -70,18 +79,18 @@ where
|
|||
fn fetch_cold<'db>(
|
||||
&'db self,
|
||||
db: &'db C::DbView,
|
||||
local_state: &LocalState,
|
||||
key: Id,
|
||||
) -> Option<StampedValue<&'db C::Output<'db>>> {
|
||||
let runtime = db.runtime();
|
||||
let database_key_index = self.database_key_index(key);
|
||||
|
||||
// Try to claim this query: if someone else has claimed it already, go back and start again.
|
||||
let _claim_guard = self
|
||||
.sync_map
|
||||
.claim(db.as_salsa_database(), database_key_index)?;
|
||||
let _claim_guard =
|
||||
self.sync_map
|
||||
.claim(db.as_salsa_database(), local_state, database_key_index)?;
|
||||
|
||||
// Push the query on the stack.
|
||||
let active_query = runtime.push_query(database_key_index);
|
||||
let active_query = local_state.push_query(database_key_index);
|
||||
|
||||
// Now that we've claimed the item, check again to see if there's a "hot" value.
|
||||
// This time we can do a *deep* verify. Because this can recurse, don't hold the arcswap guard.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::{runtime::local_state::QueryOrigin, Id};
|
||||
use crate::{local_state::QueryOrigin, Id};
|
||||
|
||||
use super::{Configuration, IngredientImpl};
|
||||
|
||||
|
|
|
@ -2,10 +2,8 @@ use arc_swap::Guard;
|
|||
|
||||
use crate::{
|
||||
key::DatabaseKeyIndex,
|
||||
runtime::{
|
||||
local_state::{ActiveQueryGuard, EdgeKind, QueryOrigin},
|
||||
StampedValue,
|
||||
},
|
||||
local_state::{self, ActiveQueryGuard, EdgeKind, LocalState, QueryOrigin},
|
||||
runtime::StampedValue,
|
||||
storage::DatabaseGen,
|
||||
Id, Revision, Runtime,
|
||||
};
|
||||
|
@ -22,13 +20,16 @@ where
|
|||
key: Id,
|
||||
revision: Revision,
|
||||
) -> bool {
|
||||
local_state::attach(db.as_salsa_database(), |local_state| {
|
||||
let runtime = db.runtime();
|
||||
runtime.unwind_if_revision_cancelled(db);
|
||||
local_state.unwind_if_revision_cancelled(db.as_salsa_database());
|
||||
|
||||
loop {
|
||||
let database_key_index = self.database_key_index(key);
|
||||
|
||||
tracing::debug!("{database_key_index:?}: maybe_changed_after(revision = {revision:?})");
|
||||
tracing::debug!(
|
||||
"{database_key_index:?}: maybe_changed_after(revision = {revision:?})"
|
||||
);
|
||||
|
||||
// Check if we have a verified version: this is the hot path.
|
||||
let memo_guard = self.memo_map.get(key);
|
||||
|
@ -37,7 +38,8 @@ where
|
|||
return memo.revisions.changed_at > revision;
|
||||
}
|
||||
drop(memo_guard); // release the arc-swap guard before cold path
|
||||
if let Some(mcs) = self.maybe_changed_after_cold(db, key, revision) {
|
||||
if let Some(mcs) = self.maybe_changed_after_cold(db, local_state, key, revision)
|
||||
{
|
||||
return mcs;
|
||||
} else {
|
||||
// We failed to claim, have to retry.
|
||||
|
@ -47,21 +49,22 @@ where
|
|||
return true;
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn maybe_changed_after_cold<'db>(
|
||||
&'db self,
|
||||
db: &'db C::DbView,
|
||||
local_state: &LocalState,
|
||||
key_index: Id,
|
||||
revision: Revision,
|
||||
) -> Option<bool> {
|
||||
let runtime = db.runtime();
|
||||
let database_key_index = self.database_key_index(key_index);
|
||||
|
||||
let _claim_guard = self
|
||||
.sync_map
|
||||
.claim(db.as_salsa_database(), database_key_index)?;
|
||||
let active_query = runtime.push_query(database_key_index);
|
||||
let _claim_guard =
|
||||
self.sync_map
|
||||
.claim(db.as_salsa_database(), local_state, database_key_index)?;
|
||||
let active_query = local_state.push_query(database_key_index);
|
||||
|
||||
// Load the current memo, if any. Use a real arc, not an arc-swap guard,
|
||||
// since we may recurse.
|
||||
|
|
|
@ -4,8 +4,8 @@ use arc_swap::{ArcSwap, Guard};
|
|||
use crossbeam::atomic::AtomicCell;
|
||||
|
||||
use crate::{
|
||||
hash::FxDashMap, key::DatabaseKeyIndex, runtime::local_state::QueryRevisions, Event, EventKind,
|
||||
Id, Revision, Runtime,
|
||||
hash::FxDashMap, key::DatabaseKeyIndex, local_state::QueryRevisions, Event, EventKind, Id,
|
||||
Revision, Runtime,
|
||||
};
|
||||
|
||||
use super::Configuration;
|
||||
|
@ -78,7 +78,7 @@ impl<C: Configuration> MemoMap<C> {
|
|||
/// with an equivalent memo that has no value. If the memo is untracked, BaseInput,
|
||||
/// or has values assigned as output of another query, this has no effect.
|
||||
pub(super) fn evict(&self, key: Id) {
|
||||
use crate::runtime::local_state::QueryOrigin;
|
||||
use crate::local_state::QueryOrigin;
|
||||
use dashmap::mapref::entry::Entry::*;
|
||||
|
||||
if let Occupied(entry) = self.map.entry(key) {
|
||||
|
@ -150,7 +150,7 @@ impl<V> Memo<V> {
|
|||
database_key_index: DatabaseKeyIndex,
|
||||
) {
|
||||
db.salsa_event(Event {
|
||||
runtime_id: runtime.id(),
|
||||
thread_id: std::thread::current().id(),
|
||||
kind: EventKind::DidValidateMemoizedValue {
|
||||
database_key: database_key_index,
|
||||
},
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use crossbeam::atomic::AtomicCell;
|
||||
|
||||
use crate::{
|
||||
runtime::local_state::{QueryOrigin, QueryRevisions},
|
||||
local_state::{self, QueryOrigin, QueryRevisions},
|
||||
storage::DatabaseGen,
|
||||
tracked_struct::TrackedStructInDb,
|
||||
Database, DatabaseKeyIndex, Id,
|
||||
|
@ -13,21 +13,14 @@ impl<C> IngredientImpl<C>
|
|||
where
|
||||
C: Configuration,
|
||||
{
|
||||
/// Specifies the value of the function for the given key.
|
||||
/// This is a way to imperatively set the value of a function.
|
||||
/// It only works if the key is a tracked struct created in the current query.
|
||||
fn specify<'db>(
|
||||
&'db self,
|
||||
db: &'db C::DbView,
|
||||
key: Id,
|
||||
value: C::Output<'db>,
|
||||
origin: impl Fn(DatabaseKeyIndex) -> QueryOrigin,
|
||||
) where
|
||||
/// Specify the value for `key` *and* record that we did so.
|
||||
/// Used for explicit calls to `specify`, but not needed for pre-declared tracked struct fields.
|
||||
pub fn specify_and_record<'db>(&'db self, db: &'db C::DbView, key: Id, value: C::Output<'db>)
|
||||
where
|
||||
C::Input<'db>: TrackedStructInDb,
|
||||
{
|
||||
let runtime = db.runtime();
|
||||
|
||||
let (active_query_key, current_deps) = match runtime.active_query() {
|
||||
local_state::attach(db.as_salsa_database(), |state| {
|
||||
let (active_query_key, current_deps) = match state.active_query() {
|
||||
Some(v) => v,
|
||||
None => panic!("can only use `specify` inside a tracked function"),
|
||||
};
|
||||
|
@ -44,10 +37,13 @@ where
|
|||
// * Q4 invokes Q2 and then Q1
|
||||
//
|
||||
// Now, if We invoke Q3 first, We get one result for Q2, but if We invoke Q4 first, We get a different value. That's no good.
|
||||
let database_key_index = <C::Input<'db>>::database_key_index(db.as_salsa_database(), key);
|
||||
let database_key_index =
|
||||
<C::Input<'db>>::database_key_index(db.as_salsa_database(), key);
|
||||
let dependency_index = database_key_index.into();
|
||||
if !runtime.is_output_of_active_query(dependency_index) {
|
||||
panic!("can only use `specify` on salsa structs created during the current tracked fn");
|
||||
if !state.is_output_of_active_query(dependency_index) {
|
||||
panic!(
|
||||
"can only use `specify` on salsa structs created during the current tracked fn"
|
||||
);
|
||||
}
|
||||
|
||||
// Subtle: we treat the "input" to a set query as if it were
|
||||
|
@ -69,11 +65,11 @@ where
|
|||
// - a result that is verified in the current revision, because it was set, which will use the set value
|
||||
// - a result that is NOT verified and has untracked inputs, which will re-execute (and likely panic)
|
||||
|
||||
let revision = runtime.current_revision();
|
||||
let revision = db.runtime().current_revision();
|
||||
let mut revisions = QueryRevisions {
|
||||
changed_at: current_deps.changed_at,
|
||||
durability: current_deps.durability,
|
||||
origin: origin(active_query_key),
|
||||
origin: QueryOrigin::Assigned(active_query_key),
|
||||
};
|
||||
|
||||
if let Some(old_memo) = self.memo_map.get(key) {
|
||||
|
@ -89,21 +85,11 @@ where
|
|||
|
||||
tracing::debug!("specify: about to add memo {:#?} for key {:?}", memo, key);
|
||||
self.insert_memo(db, key, memo);
|
||||
}
|
||||
|
||||
/// Specify the value for `key` *and* record that we did so.
|
||||
/// Used for explicit calls to `specify`, but not needed for pre-declared tracked struct fields.
|
||||
pub fn specify_and_record<'db>(&'db self, db: &'db C::DbView, key: Id, value: C::Output<'db>)
|
||||
where
|
||||
C::Input<'db>: TrackedStructInDb,
|
||||
{
|
||||
self.specify(db, key, value, |database_key_index| {
|
||||
QueryOrigin::Assigned(database_key_index)
|
||||
});
|
||||
|
||||
// Record that the current query *specified* a value for this cell.
|
||||
let database_key_index = self.database_key_index(key);
|
||||
db.runtime().add_output(database_key_index.into());
|
||||
state.add_output(database_key_index.into());
|
||||
})
|
||||
}
|
||||
|
||||
/// Invoked when the query `executor` has been validated as having green inputs
|
||||
|
|
|
@ -4,7 +4,7 @@ use crossbeam::atomic::AtomicCell;
|
|||
|
||||
use crate::{
|
||||
durability::Durability,
|
||||
runtime::local_state::{QueryOrigin, QueryRevisions},
|
||||
local_state::{QueryOrigin, QueryRevisions},
|
||||
Id, Runtime,
|
||||
};
|
||||
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
thread::ThreadId,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
hash::FxDashMap,
|
||||
key::DatabaseKeyIndex,
|
||||
runtime::{RuntimeId, WaitResult},
|
||||
Database, Id, Runtime,
|
||||
hash::FxDashMap, key::DatabaseKeyIndex, local_state::LocalState, runtime::WaitResult, Database,
|
||||
Id, Runtime,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
|
@ -13,7 +14,7 @@ pub(super) struct SyncMap {
|
|||
}
|
||||
|
||||
struct SyncState {
|
||||
id: RuntimeId,
|
||||
id: ThreadId,
|
||||
|
||||
/// Set to true if any other queries are blocked,
|
||||
/// waiting for this query to complete.
|
||||
|
@ -24,13 +25,15 @@ impl SyncMap {
|
|||
pub(super) fn claim<'me>(
|
||||
&'me self,
|
||||
db: &'me dyn Database,
|
||||
local_state: &LocalState,
|
||||
database_key_index: DatabaseKeyIndex,
|
||||
) -> Option<ClaimGuard<'me>> {
|
||||
let runtime = db.runtime();
|
||||
let thread_id = std::thread::current().id();
|
||||
match self.sync_map.entry(database_key_index.key_index) {
|
||||
dashmap::mapref::entry::Entry::Vacant(entry) => {
|
||||
entry.insert(SyncState {
|
||||
id: runtime.id(),
|
||||
id: thread_id,
|
||||
anyone_waiting: AtomicBool::new(false),
|
||||
});
|
||||
Some(ClaimGuard {
|
||||
|
@ -48,7 +51,7 @@ impl SyncMap {
|
|||
// not to gate future atomic reads.
|
||||
entry.get().anyone_waiting.store(true, Ordering::Relaxed);
|
||||
let other_id = entry.get().id;
|
||||
runtime.block_on_or_unwind(db, database_key_index, other_id, entry);
|
||||
runtime.block_on_or_unwind(db, local_state, database_key_index, other_id, entry);
|
||||
None
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,14 +2,17 @@ use std::sync::Arc;
|
|||
|
||||
use parking_lot::{Condvar, Mutex};
|
||||
|
||||
use crate::storage::HasStorage;
|
||||
use crate::{storage::HasStorage, Event, EventKind};
|
||||
|
||||
/// A database "handle" allows coordination of multiple async tasks accessing the same database.
|
||||
/// So long as you are just doing reads, you can freely clone.
|
||||
/// When you attempt to modify the database, you call `get_mut`, which will set the cancellation flag,
|
||||
/// causing other handles to get panics. Once all other handles are dropped, you can proceed.
|
||||
pub struct Handle<Db: HasStorage> {
|
||||
db: Arc<Db>,
|
||||
/// Reference to the database. This is always `Some` except during destruction.
|
||||
db: Option<Arc<Db>>,
|
||||
|
||||
/// Coordination data.
|
||||
coordinate: Arc<Coordinate>,
|
||||
}
|
||||
|
||||
|
@ -21,9 +24,10 @@ struct Coordinate {
|
|||
}
|
||||
|
||||
impl<Db: HasStorage> Handle<Db> {
|
||||
/// Create a new handle wrapping `db`.
|
||||
pub fn new(db: Db) -> Self {
|
||||
Self {
|
||||
db: Arc::new(db),
|
||||
db: Some(Arc::new(db)),
|
||||
coordinate: Arc::new(Coordinate {
|
||||
clones: Mutex::new(1),
|
||||
cvar: Default::default(),
|
||||
|
@ -31,12 +35,29 @@ impl<Db: HasStorage> Handle<Db> {
|
|||
}
|
||||
}
|
||||
|
||||
fn db(&self) -> &Arc<Db> {
|
||||
self.db.as_ref().unwrap()
|
||||
}
|
||||
|
||||
fn db_mut(&mut self) -> &mut Arc<Db> {
|
||||
self.db.as_mut().unwrap()
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the inner database.
|
||||
/// If other handles are active, this method sets the cancellation flag
|
||||
/// and blocks until they are dropped.
|
||||
pub fn get_mut(&mut self) -> &mut Db {
|
||||
self.cancel_others();
|
||||
Arc::get_mut(&mut self.db).expect("no other handles")
|
||||
|
||||
// Once cancellation above completes, the other handles are being dropped.
|
||||
// However, because the signal is sent before the destructor completes, it's
|
||||
// possible that they have not *yet* dropped.
|
||||
//
|
||||
// Therefore, we may have to do a (short) bit of
|
||||
// spinning before we observe the thread-count reducing to 0.
|
||||
//
|
||||
// An alternative would be to
|
||||
Arc::get_mut(self.db_mut()).expect("other threads remain active despite cancellation")
|
||||
}
|
||||
|
||||
// ANCHOR: cancel_other_workers
|
||||
|
@ -46,9 +67,15 @@ impl<Db: HasStorage> Handle<Db> {
|
|||
/// This could deadlock if there is a single worker with two handles to the
|
||||
/// same database!
|
||||
fn cancel_others(&mut self) {
|
||||
let storage = self.db.storage();
|
||||
let storage = self.db().storage();
|
||||
storage.runtime().set_cancellation_flag();
|
||||
|
||||
self.db().salsa_event(Event {
|
||||
thread_id: std::thread::current().id(),
|
||||
|
||||
kind: EventKind::DidSetCancellationFlag,
|
||||
});
|
||||
|
||||
let mut clones = self.coordinate.clones.lock();
|
||||
while *clones != 1 {
|
||||
self.coordinate.cvar.wait(&mut clones);
|
||||
|
@ -59,6 +86,10 @@ impl<Db: HasStorage> Handle<Db> {
|
|||
|
||||
impl<Db: HasStorage> Drop for Handle<Db> {
|
||||
fn drop(&mut self) {
|
||||
// Drop the database handle *first*
|
||||
self.db.take();
|
||||
|
||||
// *Now* decrement the number of clones and notify once we have completed
|
||||
*self.coordinate.clones.lock() -= 1;
|
||||
self.coordinate.cvar.notify_all();
|
||||
}
|
||||
|
@ -68,7 +99,7 @@ impl<Db: HasStorage> std::ops::Deref for Handle<Db> {
|
|||
type Target = Db;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.db
|
||||
self.db()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -77,7 +108,7 @@ impl<Db: HasStorage> Clone for Handle<Db> {
|
|||
*self.coordinate.clones.lock() += 1;
|
||||
|
||||
Self {
|
||||
db: Arc::clone(&self.db),
|
||||
db: Some(Arc::clone(self.db())),
|
||||
coordinate: Arc::clone(&self.coordinate),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ use std::{
|
|||
};
|
||||
|
||||
use crate::{
|
||||
cycle::CycleRecoveryStrategy, runtime::local_state::QueryOrigin, storage::IngredientIndex,
|
||||
Database, DatabaseKeyIndex, Id,
|
||||
cycle::CycleRecoveryStrategy, local_state::QueryOrigin, storage::IngredientIndex, Database,
|
||||
DatabaseKeyIndex, Id,
|
||||
};
|
||||
|
||||
use super::Revision;
|
||||
|
@ -19,6 +19,8 @@ pub trait Jar: Any {
|
|||
}
|
||||
|
||||
pub trait Ingredient: Any + std::fmt::Debug + Send + Sync {
|
||||
fn debug_name(&self) -> &'static str;
|
||||
|
||||
/// Has the value for `input` in this ingredient changed after `revision`?
|
||||
fn maybe_changed_after<'db>(
|
||||
&'db self,
|
||||
|
|
13
src/input.rs
13
src/input.rs
|
@ -17,8 +17,9 @@ use crate::{
|
|||
id::{AsId, FromId},
|
||||
ingredient::{fmt_index, Ingredient},
|
||||
key::{DatabaseKeyIndex, DependencyIndex},
|
||||
local_state::{self, QueryOrigin},
|
||||
plumbing::{Jar, Stamp},
|
||||
runtime::{local_state::QueryOrigin, Runtime},
|
||||
runtime::Runtime,
|
||||
storage::IngredientIndex,
|
||||
Database, Durability, Id, Revision,
|
||||
};
|
||||
|
@ -149,15 +150,16 @@ impl<C: Configuration> IngredientImpl<C> {
|
|||
/// The caller is responible for selecting the appropriate element.
|
||||
pub fn field<'db>(
|
||||
&'db self,
|
||||
runtime: &'db Runtime,
|
||||
db: &'db dyn crate::Database,
|
||||
id: C::Struct,
|
||||
field_index: usize,
|
||||
) -> &'db C::Fields {
|
||||
local_state::attach(db, |state| {
|
||||
let field_ingredient_index = self.ingredient_index.successor(field_index);
|
||||
let id = id.as_id();
|
||||
let value = self.struct_map.get(id);
|
||||
let stamp = &value.stamps[field_index];
|
||||
runtime.report_tracked_read(
|
||||
state.report_tracked_read(
|
||||
DependencyIndex {
|
||||
ingredient_index: field_ingredient_index,
|
||||
key_index: Some(id),
|
||||
|
@ -166,6 +168,7 @@ impl<C: Configuration> IngredientImpl<C> {
|
|||
stamp.changed_at,
|
||||
);
|
||||
&value.fields
|
||||
})
|
||||
}
|
||||
|
||||
/// Peek at the field values without recording any read dependency.
|
||||
|
@ -242,6 +245,10 @@ impl<C: Configuration> Ingredient for IngredientImpl<C> {
|
|||
fn fmt_index(&self, index: Option<Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(C::DEBUG_NAME, index, fmt)
|
||||
}
|
||||
|
||||
fn debug_name(&self) -> &'static str {
|
||||
C::DEBUG_NAME
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: Configuration> std::fmt::Debug for IngredientImpl<C> {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use crate::cycle::CycleRecoveryStrategy;
|
||||
use crate::ingredient::{fmt_index, Ingredient};
|
||||
use crate::input::Configuration;
|
||||
use crate::runtime::local_state::QueryOrigin;
|
||||
use crate::local_state::QueryOrigin;
|
||||
use crate::storage::IngredientIndex;
|
||||
use crate::{Database, DatabaseKeyIndex, Id, Revision};
|
||||
use std::fmt;
|
||||
|
@ -97,6 +97,10 @@ where
|
|||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(C::FIELD_DEBUG_NAMES[self.field_index], index, fmt)
|
||||
}
|
||||
|
||||
fn debug_name(&self) -> &'static str {
|
||||
C::FIELD_DEBUG_NAMES[self.field_index]
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> std::fmt::Debug for FieldIngredientImpl<C>
|
||||
|
|
|
@ -9,9 +9,8 @@ use crate::durability::Durability;
|
|||
use crate::id::AsId;
|
||||
use crate::ingredient::fmt_index;
|
||||
use crate::key::DependencyIndex;
|
||||
use crate::local_state::{self, QueryOrigin};
|
||||
use crate::plumbing::Jar;
|
||||
use crate::runtime::local_state::QueryOrigin;
|
||||
use crate::runtime::Runtime;
|
||||
use crate::storage::IngredientIndex;
|
||||
use crate::{Database, DatabaseKeyIndex, Id};
|
||||
|
||||
|
@ -123,13 +122,22 @@ where
|
|||
unsafe { std::mem::transmute(data) }
|
||||
}
|
||||
|
||||
pub fn intern_id<'db>(&'db self, runtime: &'db Runtime, data: C::Data<'db>) -> crate::Id {
|
||||
C::deref_struct(self.intern(runtime, data)).as_id()
|
||||
pub fn intern_id<'db>(
|
||||
&'db self,
|
||||
db: &'db dyn crate::Database,
|
||||
data: C::Data<'db>,
|
||||
) -> crate::Id {
|
||||
C::deref_struct(self.intern(db, data)).as_id()
|
||||
}
|
||||
|
||||
/// Intern data to a unique reference.
|
||||
pub fn intern<'db>(&'db self, runtime: &'db Runtime, data: C::Data<'db>) -> C::Struct<'db> {
|
||||
runtime.report_tracked_read(
|
||||
pub fn intern<'db>(
|
||||
&'db self,
|
||||
db: &'db dyn crate::Database,
|
||||
data: C::Data<'db>,
|
||||
) -> C::Struct<'db> {
|
||||
local_state::attach(db, |state| {
|
||||
state.report_tracked_read(
|
||||
DependencyIndex::for_table(self.ingredient_index),
|
||||
Durability::MAX,
|
||||
self.reset_at,
|
||||
|
@ -167,6 +175,7 @@ where
|
|||
unsafe { C::struct_from_raw(value_raw) }
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn interned_value(&self, id: Id) -> C::Struct<'_> {
|
||||
|
@ -271,6 +280,10 @@ where
|
|||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(C::DEBUG_NAME, index, fmt)
|
||||
}
|
||||
|
||||
fn debug_name(&self) -> &'static str {
|
||||
C::DEBUG_NAME
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> std::fmt::Debug for IngredientImpl<C>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::{cycle::CycleRecoveryStrategy, database, storage::IngredientIndex, Database, Id};
|
||||
use crate::{cycle::CycleRecoveryStrategy, local_state, storage::IngredientIndex, Database, Id};
|
||||
|
||||
/// An integer that uniquely identifies a particular query instance within the
|
||||
/// database. Used to track dependencies between queries. Fully ordered and
|
||||
|
@ -57,7 +57,7 @@ impl DependencyIndex {
|
|||
|
||||
impl std::fmt::Debug for DependencyIndex {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
database::with_attached_database(|db| {
|
||||
local_state::with_attached_database(|db| {
|
||||
let ingredient = db.lookup_ingredient(self.ingredient_index);
|
||||
ingredient.fmt_index(self.key_index, f)
|
||||
})
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
mod accumulator;
|
||||
mod active_query;
|
||||
mod alloc;
|
||||
mod array;
|
||||
mod cancelled;
|
||||
|
@ -15,6 +16,7 @@ mod ingredient_list;
|
|||
mod input;
|
||||
mod interned;
|
||||
mod key;
|
||||
mod local_state;
|
||||
mod nonce;
|
||||
mod revision;
|
||||
mod runtime;
|
||||
|
@ -39,7 +41,7 @@ pub use self::revision::Revision;
|
|||
pub use self::runtime::Runtime;
|
||||
pub use self::storage::Storage;
|
||||
pub use self::update::Update;
|
||||
pub use crate::database::with_attached_database;
|
||||
pub use crate::local_state::with_attached_database;
|
||||
pub use salsa_macros::accumulator;
|
||||
pub use salsa_macros::db;
|
||||
pub use salsa_macros::input;
|
||||
|
@ -77,9 +79,7 @@ pub mod plumbing {
|
|||
pub use crate::array::Array;
|
||||
pub use crate::cycle::Cycle;
|
||||
pub use crate::cycle::CycleRecoveryStrategy;
|
||||
pub use crate::database::attach_database;
|
||||
pub use crate::database::current_revision;
|
||||
pub use crate::database::with_attached_database;
|
||||
pub use crate::database::Database;
|
||||
pub use crate::function::should_backdate_value;
|
||||
pub use crate::id::AsId;
|
||||
|
@ -89,6 +89,7 @@ pub mod plumbing {
|
|||
pub use crate::ingredient::Ingredient;
|
||||
pub use crate::ingredient::Jar;
|
||||
pub use crate::key::DatabaseKeyIndex;
|
||||
pub use crate::local_state::with_attached_database;
|
||||
pub use crate::revision::Revision;
|
||||
pub use crate::runtime::stamp;
|
||||
pub use crate::runtime::Runtime;
|
||||
|
|
|
@ -1,17 +1,51 @@
|
|||
use tracing::debug;
|
||||
|
||||
use crate::active_query::ActiveQuery;
|
||||
use crate::durability::Durability;
|
||||
use crate::key::DatabaseKeyIndex;
|
||||
use crate::key::DependencyIndex;
|
||||
use crate::runtime::Revision;
|
||||
use crate::runtime::StampedValue;
|
||||
use crate::storage::IngredientIndex;
|
||||
use crate::tracked_struct::Disambiguator;
|
||||
use crate::Cancelled;
|
||||
use crate::Cycle;
|
||||
use crate::Database;
|
||||
use crate::Event;
|
||||
use crate::EventKind;
|
||||
use crate::Revision;
|
||||
use crate::Runtime;
|
||||
use std::cell::Cell;
|
||||
use std::cell::RefCell;
|
||||
use std::ptr::NonNull;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::active_query::ActiveQuery;
|
||||
use super::StampedValue;
|
||||
thread_local! {
|
||||
/// The thread-local state salsa requires for a given thread
|
||||
static LOCAL_STATE: LocalState = const { LocalState::new() }
|
||||
}
|
||||
|
||||
/// Attach the database to the current thread and execute `op`.
|
||||
/// Panics if a different database has already been attached.
|
||||
pub(crate) fn attach<R, DB>(db: &DB, op: impl FnOnce(&LocalState) -> R) -> R
|
||||
where
|
||||
DB: ?Sized + Database,
|
||||
{
|
||||
LOCAL_STATE.with(|state| state.attach(db.as_salsa_database(), || op(state)))
|
||||
}
|
||||
|
||||
/// Access the "attached" database. Returns `None` if no database is attached.
|
||||
/// Databases are attached with `attach_database`.
|
||||
pub fn with_attached_database<R>(op: impl FnOnce(&dyn Database) -> R) -> Option<R> {
|
||||
LOCAL_STATE.with(|state| {
|
||||
if let Some(db) = state.database.get() {
|
||||
// SAFETY: We always attach the database in for the entire duration of a function,
|
||||
// so it cannot become "unattached" while this function is running.
|
||||
Some(op(unsafe { db.as_ref() }))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// State that is specific to a single execution thread.
|
||||
///
|
||||
|
@ -19,7 +53,10 @@ use super::StampedValue;
|
|||
///
|
||||
/// **Note also that all mutations to the database handle (and hence
|
||||
/// to the local-state) must be undone during unwinding.**
|
||||
pub(super) struct LocalState {
|
||||
pub(crate) struct LocalState {
|
||||
/// Pointer to the currently attached database.
|
||||
database: Cell<Option<NonNull<dyn Database>>>,
|
||||
|
||||
/// Vector of active queries.
|
||||
///
|
||||
/// This is normally `Some`, but it is set to `None`
|
||||
|
@ -30,6 +67,278 @@ pub(super) struct LocalState {
|
|||
query_stack: RefCell<Option<Vec<ActiveQuery>>>,
|
||||
}
|
||||
|
||||
impl LocalState {
|
||||
const fn new() -> Self {
|
||||
LocalState {
|
||||
database: Cell::new(None),
|
||||
query_stack: RefCell::new(Some(vec![])),
|
||||
}
|
||||
}
|
||||
|
||||
fn attach<R>(&self, db: &dyn Database, op: impl FnOnce() -> R) -> R {
|
||||
struct DbGuard<'s> {
|
||||
state: Option<&'s LocalState>,
|
||||
}
|
||||
|
||||
impl<'s> DbGuard<'s> {
|
||||
fn new(state: &'s LocalState, db: &dyn Database) -> Self {
|
||||
if let Some(current_db) = state.database.get() {
|
||||
// Already attached? Assert that the database has not changed.
|
||||
assert_eq!(
|
||||
current_db,
|
||||
NonNull::from(db),
|
||||
"cannot change database mid-query",
|
||||
);
|
||||
Self { state: None }
|
||||
} else {
|
||||
// Otherwise, set the database.
|
||||
state.database.set(Some(NonNull::from(db)));
|
||||
Self { state: Some(state) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DbGuard<'_> {
|
||||
fn drop(&mut self) {
|
||||
// Reset database to null if we did anything in `DbGuard::new`.
|
||||
if let Some(state) = self.state {
|
||||
state.database.set(None);
|
||||
|
||||
// All stack frames should have been popped from the local stack.
|
||||
assert!(state.query_stack.borrow().as_ref().unwrap().is_empty());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let _guard = DbGuard::new(self, db);
|
||||
op()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> {
|
||||
let mut query_stack = self.query_stack.borrow_mut();
|
||||
let query_stack = query_stack.as_mut().expect("local stack taken");
|
||||
query_stack.push(ActiveQuery::new(database_key_index));
|
||||
ActiveQueryGuard {
|
||||
local_state: self,
|
||||
database_key_index,
|
||||
push_len: query_stack.len(),
|
||||
}
|
||||
}
|
||||
|
||||
fn with_query_stack<R>(&self, c: impl FnOnce(&mut Vec<ActiveQuery>) -> R) -> R {
|
||||
c(self
|
||||
.query_stack
|
||||
.borrow_mut()
|
||||
.as_mut()
|
||||
.expect("query stack taken"))
|
||||
}
|
||||
|
||||
fn query_in_progress(&self) -> bool {
|
||||
self.with_query_stack(|stack| !stack.is_empty())
|
||||
}
|
||||
|
||||
/// Returns the index of the active query along with its *current* durability/changed-at
|
||||
/// information. As the query continues to execute, naturally, that information may change.
|
||||
pub(crate) fn active_query(&self) -> Option<(DatabaseKeyIndex, StampedValue<()>)> {
|
||||
self.with_query_stack(|stack| {
|
||||
stack.last().map(|active_query| {
|
||||
(
|
||||
active_query.database_key_index,
|
||||
StampedValue {
|
||||
value: (),
|
||||
durability: active_query.durability,
|
||||
changed_at: active_query.changed_at,
|
||||
},
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Add an output to the current query's list of dependencies
|
||||
pub(crate) fn add_output(&self, entity: DependencyIndex) {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_output(entity)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Check whether `entity` is an output of the currently active query (if any)
|
||||
pub(crate) fn is_output_of_active_query(&self, entity: DependencyIndex) -> bool {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.is_output(entity)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Register that currently active query reads the given input
|
||||
pub(crate) fn report_tracked_read(
|
||||
&self,
|
||||
input: DependencyIndex,
|
||||
durability: Durability,
|
||||
changed_at: Revision,
|
||||
) {
|
||||
debug!(
|
||||
"report_query_read_and_unwind_if_cycle_resulted(input={:?}, durability={:?}, changed_at={:?})",
|
||||
input, durability, changed_at
|
||||
);
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_read(input, durability, changed_at);
|
||||
|
||||
// We are a cycle participant:
|
||||
//
|
||||
// C0 --> ... --> Ci --> Ci+1 -> ... -> Cn --> C0
|
||||
// ^ ^
|
||||
// : |
|
||||
// This edge -----+ |
|
||||
// |
|
||||
// |
|
||||
// N0
|
||||
//
|
||||
// In this case, the value we have just read from `Ci+1`
|
||||
// is actually the cycle fallback value and not especially
|
||||
// interesting. We unwind now with `CycleParticipant` to avoid
|
||||
// executing the rest of our query function. This unwinding
|
||||
// will be caught and our own fallback value will be used.
|
||||
//
|
||||
// Note that `Ci+1` may` have *other* callers who are not
|
||||
// participants in the cycle (e.g., N0 in the graph above).
|
||||
// They will not have the `cycle` marker set in their
|
||||
// stack frames, so they will just read the fallback value
|
||||
// from `Ci+1` and continue on their merry way.
|
||||
if let Some(cycle) = &top_query.cycle {
|
||||
cycle.clone().throw()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Register that the current query read an untracked value
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// * `current_revision`, the current revision
|
||||
pub(crate) fn report_untracked_read(&self, current_revision: Revision) {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_untracked_read(current_revision);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Update the top query on the stack to act as though it read a value
|
||||
/// of durability `durability` which changed in `revision`.
|
||||
// FIXME: Use or remove this.
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn report_synthetic_read(&self, durability: Durability, revision: Revision) {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_synthetic_read(durability, revision);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Takes the query stack and returns it. This is used when
|
||||
/// the current thread is blocking. The stack must be restored
|
||||
/// with [`Self::restore_query_stack`] when the thread unblocks.
|
||||
pub(crate) fn take_query_stack(&self) -> Vec<ActiveQuery> {
|
||||
assert!(
|
||||
self.query_stack.borrow().is_some(),
|
||||
"query stack already taken"
|
||||
);
|
||||
self.query_stack.take().unwrap()
|
||||
}
|
||||
|
||||
/// Restores a query stack taken with [`Self::take_query_stack`] once
|
||||
/// the thread unblocks.
|
||||
pub(crate) fn restore_query_stack(&self, stack: Vec<ActiveQuery>) {
|
||||
assert!(self.query_stack.borrow().is_none(), "query stack not taken");
|
||||
self.query_stack.replace(Some(stack));
|
||||
}
|
||||
|
||||
/// Called when the active queries creates an index from the
|
||||
/// entity table with the index `entity_index`. Has the following effects:
|
||||
///
|
||||
/// * Add a query read on `DatabaseKeyIndex::for_table(entity_index)`
|
||||
/// * Identify a unique disambiguator for the hash within the current query,
|
||||
/// adding the hash to the current query's disambiguator table.
|
||||
/// * Returns a tuple of:
|
||||
/// * the id of the current query
|
||||
/// * the current dependencies (durability, changed_at) of current query
|
||||
/// * the disambiguator index
|
||||
#[track_caller]
|
||||
pub(crate) fn disambiguate(
|
||||
&self,
|
||||
entity_index: IngredientIndex,
|
||||
reset_at: Revision,
|
||||
data_hash: u64,
|
||||
) -> (DatabaseKeyIndex, StampedValue<()>, Disambiguator) {
|
||||
assert!(
|
||||
self.query_in_progress(),
|
||||
"cannot create a tracked struct disambiguator outside of a tracked function"
|
||||
);
|
||||
|
||||
self.report_tracked_read(
|
||||
DependencyIndex::for_table(entity_index),
|
||||
Durability::MAX,
|
||||
reset_at,
|
||||
);
|
||||
|
||||
self.with_query_stack(|stack| {
|
||||
let top_query = stack.last_mut().unwrap();
|
||||
let disambiguator = top_query.disambiguate(data_hash);
|
||||
(
|
||||
top_query.database_key_index,
|
||||
StampedValue {
|
||||
value: (),
|
||||
durability: top_query.durability,
|
||||
changed_at: top_query.changed_at,
|
||||
},
|
||||
disambiguator,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Starts unwinding the stack if the current revision is cancelled.
|
||||
///
|
||||
/// This method can be called by query implementations that perform
|
||||
/// potentially expensive computations, in order to speed up propagation of
|
||||
/// cancellation.
|
||||
///
|
||||
/// Cancellation will automatically be triggered by salsa on any query
|
||||
/// invocation.
|
||||
///
|
||||
/// This method should not be overridden by `Database` implementors. A
|
||||
/// `salsa_event` is emitted when this method is called, so that should be
|
||||
/// used instead.
|
||||
pub(crate) fn unwind_if_revision_cancelled(&self, db: &dyn Database) {
|
||||
let runtime = db.runtime();
|
||||
let thread_id = std::thread::current().id();
|
||||
db.salsa_event(Event {
|
||||
thread_id,
|
||||
|
||||
kind: EventKind::WillCheckCancellation,
|
||||
});
|
||||
if runtime.load_cancellation_flag() {
|
||||
self.unwind_cancelled(runtime);
|
||||
}
|
||||
}
|
||||
|
||||
#[cold]
|
||||
pub(crate) fn unwind_cancelled(&self, runtime: &Runtime) {
|
||||
let current_revision = runtime.current_revision();
|
||||
self.report_untracked_read(current_revision);
|
||||
Cancelled::PendingWrite.throw();
|
||||
}
|
||||
}
|
||||
|
||||
impl std::panic::RefUnwindSafe for LocalState {}
|
||||
|
||||
/// Summarizes "all the inputs that a query used"
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct QueryRevisions {
|
||||
|
@ -150,200 +459,6 @@ impl QueryEdges {
|
|||
}
|
||||
}
|
||||
|
||||
impl Default for LocalState {
|
||||
fn default() -> Self {
|
||||
LocalState {
|
||||
query_stack: RefCell::new(Some(Vec::new())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalState {
|
||||
#[inline]
|
||||
pub(super) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> {
|
||||
let mut query_stack = self.query_stack.borrow_mut();
|
||||
let query_stack = query_stack.as_mut().expect("local stack taken");
|
||||
query_stack.push(ActiveQuery::new(database_key_index));
|
||||
ActiveQueryGuard {
|
||||
local_state: self,
|
||||
database_key_index,
|
||||
push_len: query_stack.len(),
|
||||
}
|
||||
}
|
||||
|
||||
fn with_query_stack<R>(&self, c: impl FnOnce(&mut Vec<ActiveQuery>) -> R) -> R {
|
||||
c(self
|
||||
.query_stack
|
||||
.borrow_mut()
|
||||
.as_mut()
|
||||
.expect("query stack taken"))
|
||||
}
|
||||
|
||||
pub(super) fn query_in_progress(&self) -> bool {
|
||||
self.with_query_stack(|stack| !stack.is_empty())
|
||||
}
|
||||
|
||||
/// Dangerous operation: executes `op` but ignores its effect on
|
||||
/// the query dependencies. Useful for debugging statements, but
|
||||
/// otherwise not to be toyed with!
|
||||
pub(super) fn debug_probe<R>(&self, op: impl FnOnce() -> R) -> R {
|
||||
let saved_state: Option<_> =
|
||||
self.with_query_stack(|stack| Some(stack.last()?.save_query_state()));
|
||||
|
||||
let result = op();
|
||||
|
||||
if let Some(saved_state) = saved_state {
|
||||
self.with_query_stack(|stack| {
|
||||
let active_query = stack.last_mut().expect("query stack not empty");
|
||||
active_query.restore_query_state(saved_state);
|
||||
});
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Returns the index of the active query along with its *current* durability/changed-at
|
||||
/// information. As the query continues to execute, naturally, that information may change.
|
||||
pub(super) fn active_query(&self) -> Option<(DatabaseKeyIndex, StampedValue<()>)> {
|
||||
self.with_query_stack(|stack| {
|
||||
stack.last().map(|active_query| {
|
||||
(
|
||||
active_query.database_key_index,
|
||||
StampedValue {
|
||||
value: (),
|
||||
durability: active_query.durability,
|
||||
changed_at: active_query.changed_at,
|
||||
},
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn add_output(&self, entity: DependencyIndex) {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_output(entity)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn is_output(&self, entity: DependencyIndex) -> bool {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.is_output(entity)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn report_tracked_read(
|
||||
&self,
|
||||
input: DependencyIndex,
|
||||
durability: Durability,
|
||||
changed_at: Revision,
|
||||
) {
|
||||
debug!(
|
||||
"report_query_read_and_unwind_if_cycle_resulted(input={:?}, durability={:?}, changed_at={:?})",
|
||||
input, durability, changed_at
|
||||
);
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_read(input, durability, changed_at);
|
||||
|
||||
// We are a cycle participant:
|
||||
//
|
||||
// C0 --> ... --> Ci --> Ci+1 -> ... -> Cn --> C0
|
||||
// ^ ^
|
||||
// : |
|
||||
// This edge -----+ |
|
||||
// |
|
||||
// |
|
||||
// N0
|
||||
//
|
||||
// In this case, the value we have just read from `Ci+1`
|
||||
// is actually the cycle fallback value and not especially
|
||||
// interesting. We unwind now with `CycleParticipant` to avoid
|
||||
// executing the rest of our query function. This unwinding
|
||||
// will be caught and our own fallback value will be used.
|
||||
//
|
||||
// Note that `Ci+1` may` have *other* callers who are not
|
||||
// participants in the cycle (e.g., N0 in the graph above).
|
||||
// They will not have the `cycle` marker set in their
|
||||
// stack frames, so they will just read the fallback value
|
||||
// from `Ci+1` and continue on their merry way.
|
||||
if let Some(cycle) = &top_query.cycle {
|
||||
cycle.clone().throw()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn report_untracked_read(&self, current_revision: Revision) {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_untracked_read(current_revision);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Update the top query on the stack to act as though it read a value
|
||||
/// of durability `durability` which changed in `revision`.
|
||||
// FIXME: Use or remove this.
|
||||
#[allow(dead_code)]
|
||||
pub(super) fn report_synthetic_read(&self, durability: Durability, revision: Revision) {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_synthetic_read(durability, revision);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Takes the query stack and returns it. This is used when
|
||||
/// the current thread is blocking. The stack must be restored
|
||||
/// with [`Self::restore_query_stack`] when the thread unblocks.
|
||||
pub(super) fn take_query_stack(&self) -> Vec<ActiveQuery> {
|
||||
assert!(
|
||||
self.query_stack.borrow().is_some(),
|
||||
"query stack already taken"
|
||||
);
|
||||
self.query_stack.take().unwrap()
|
||||
}
|
||||
|
||||
/// Restores a query stack taken with [`Self::take_query_stack`] once
|
||||
/// the thread unblocks.
|
||||
pub(super) fn restore_query_stack(&self, stack: Vec<ActiveQuery>) {
|
||||
assert!(self.query_stack.borrow().is_none(), "query stack not taken");
|
||||
self.query_stack.replace(Some(stack));
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
pub(crate) fn disambiguate(
|
||||
&self,
|
||||
data_hash: u64,
|
||||
) -> (DatabaseKeyIndex, StampedValue<()>, Disambiguator) {
|
||||
assert!(
|
||||
self.query_in_progress(),
|
||||
"cannot create a tracked struct disambiguator outside of a tracked function"
|
||||
);
|
||||
self.with_query_stack(|stack| {
|
||||
let top_query = stack.last_mut().unwrap();
|
||||
let disambiguator = top_query.disambiguate(data_hash);
|
||||
(
|
||||
top_query.database_key_index,
|
||||
StampedValue {
|
||||
value: (),
|
||||
durability: top_query.durability,
|
||||
changed_at: top_query.changed_at,
|
||||
},
|
||||
disambiguator,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl std::panic::RefUnwindSafe for LocalState {}
|
||||
|
||||
/// When a query is pushed onto the `active_query` stack, this guard
|
||||
/// is returned to represent its slot. The guard can be used to pop
|
||||
/// the query from the stack -- in the case of unwinding, the guard's
|
||||
|
@ -368,7 +483,7 @@ impl ActiveQueryGuard<'_> {
|
|||
}
|
||||
|
||||
/// Invoked when the query has successfully completed execution.
|
||||
pub(super) fn complete(self) -> ActiveQuery {
|
||||
pub(crate) fn complete(self) -> ActiveQuery {
|
||||
let query = self.pop_helper();
|
||||
std::mem::forget(self);
|
||||
query
|
262
src/runtime.rs
262
src/runtime.rs
|
@ -1,38 +1,52 @@
|
|||
use std::{
|
||||
panic::panic_any,
|
||||
sync::{atomic::Ordering, Arc},
|
||||
sync::{atomic::AtomicUsize, Arc},
|
||||
thread::ThreadId,
|
||||
};
|
||||
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use crate::{
|
||||
active_query::ActiveQuery,
|
||||
cycle::CycleRecoveryStrategy,
|
||||
durability::Durability,
|
||||
key::{DatabaseKeyIndex, DependencyIndex},
|
||||
runtime::active_query::ActiveQuery,
|
||||
storage::IngredientIndex,
|
||||
local_state::{EdgeKind, LocalState},
|
||||
revision::AtomicRevision,
|
||||
Cancelled, Cycle, Database, Event, EventKind, Revision,
|
||||
};
|
||||
|
||||
use self::{
|
||||
dependency_graph::DependencyGraph,
|
||||
local_state::{ActiveQueryGuard, EdgeKind},
|
||||
};
|
||||
use self::dependency_graph::DependencyGraph;
|
||||
|
||||
use super::tracked_struct::Disambiguator;
|
||||
|
||||
mod active_query;
|
||||
mod dependency_graph;
|
||||
pub mod local_state;
|
||||
mod shared_state;
|
||||
|
||||
pub struct Runtime {
|
||||
/// Our unique runtime id.
|
||||
id: RuntimeId,
|
||||
/// Stores the next id to use for a snapshotted runtime (starts at 1).
|
||||
next_id: AtomicUsize,
|
||||
|
||||
/// Local state that is specific to this runtime (thread).
|
||||
local_state: local_state::LocalState,
|
||||
/// Vector we can clone
|
||||
empty_dependencies: Arc<[(EdgeKind, DependencyIndex)]>,
|
||||
|
||||
/// Shared state that is accessible via all runtimes.
|
||||
shared_state: Arc<shared_state::SharedState>,
|
||||
/// Set to true when the current revision has been canceled.
|
||||
/// This is done when we an input is being changed. The flag
|
||||
/// is set back to false once the input has been changed.
|
||||
revision_canceled: AtomicCell<bool>,
|
||||
|
||||
/// Stores the "last change" revision for values of each duration.
|
||||
/// This vector is always of length at least 1 (for Durability 0)
|
||||
/// but its total length depends on the number of durations. The
|
||||
/// element at index 0 is special as it represents the "current
|
||||
/// revision". In general, we have the invariant that revisions
|
||||
/// in here are *declining* -- that is, `revisions[i] >=
|
||||
/// revisions[i + 1]`, for all `i`. This is because when you
|
||||
/// modify a value with durability D, that implies that values
|
||||
/// with durability less than D may have changed too.
|
||||
revisions: Vec<AtomicRevision>,
|
||||
|
||||
/// The dependency graph tracks which runtimes are blocked on one
|
||||
/// another, waiting for queries to terminate.
|
||||
dependency_graph: Mutex<DependencyGraph>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
|
@ -42,14 +56,6 @@ pub(crate) enum WaitResult {
|
|||
Cycle(Cycle),
|
||||
}
|
||||
|
||||
/// A unique identifier for a particular runtime. Each time you create
|
||||
/// a snapshot, a fresh `RuntimeId` is generated. Once a snapshot is
|
||||
/// complete, its `RuntimeId` may potentially be re-used.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
pub struct RuntimeId {
|
||||
counter: usize,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct StampedValue<V> {
|
||||
pub value: V,
|
||||
|
@ -79,9 +85,13 @@ impl<V> StampedValue<V> {
|
|||
impl Default for Runtime {
|
||||
fn default() -> Self {
|
||||
Runtime {
|
||||
id: RuntimeId { counter: 0 },
|
||||
shared_state: Default::default(),
|
||||
local_state: Default::default(),
|
||||
revisions: (0..Durability::LEN)
|
||||
.map(|_| AtomicRevision::start())
|
||||
.collect(),
|
||||
next_id: AtomicUsize::new(1),
|
||||
empty_dependencies: None.into_iter().collect(),
|
||||
revision_canceled: Default::default(),
|
||||
dependency_graph: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -89,80 +99,21 @@ impl Default for Runtime {
|
|||
impl std::fmt::Debug for Runtime {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
fmt.debug_struct("Runtime")
|
||||
.field("id", &self.id())
|
||||
.field("shared_state", &self.shared_state)
|
||||
.field("revisions", &self.revisions)
|
||||
.field("next_id", &self.next_id)
|
||||
.field("revision_canceled", &self.revision_canceled)
|
||||
.field("dependency_graph", &self.dependency_graph)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Runtime {
|
||||
pub(crate) fn id(&self) -> RuntimeId {
|
||||
self.id
|
||||
}
|
||||
|
||||
pub(crate) fn current_revision(&self) -> Revision {
|
||||
self.shared_state.revisions[0].load()
|
||||
}
|
||||
|
||||
/// Returns the index of the active query along with its *current* durability/changed-at
|
||||
/// information. As the query continues to execute, naturally, that information may change.
|
||||
pub(crate) fn active_query(&self) -> Option<(DatabaseKeyIndex, StampedValue<()>)> {
|
||||
self.local_state.active_query()
|
||||
self.revisions[0].load()
|
||||
}
|
||||
|
||||
pub(crate) fn empty_dependencies(&self) -> Arc<[(EdgeKind, DependencyIndex)]> {
|
||||
self.shared_state.empty_dependencies.clone()
|
||||
}
|
||||
|
||||
/// Executes `op` but ignores its effect on
|
||||
/// the query dependencies; intended for use
|
||||
/// by `DebugWithDb` only.
|
||||
///
|
||||
/// # Danger: intended for debugging only
|
||||
///
|
||||
/// This operation is intended for **debugging only**.
|
||||
/// Misuse will cause Salsa to give incorrect results.
|
||||
/// The expectation is that the type `R` produced will be
|
||||
/// logged or printed out. **The type `R` that is produced
|
||||
/// should not affect the result or other outputs
|
||||
/// (such as accumulators) from the current Salsa query.**
|
||||
pub fn debug_probe<R>(&self, op: impl FnOnce() -> R) -> R {
|
||||
self.local_state.debug_probe(op)
|
||||
}
|
||||
|
||||
pub fn snapshot(&self) -> Self {
|
||||
if self.local_state.query_in_progress() {
|
||||
panic!("it is not legal to `snapshot` during a query (see salsa-rs/salsa#80)");
|
||||
}
|
||||
|
||||
let id = RuntimeId {
|
||||
counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst),
|
||||
};
|
||||
|
||||
Runtime {
|
||||
id,
|
||||
shared_state: self.shared_state.clone(),
|
||||
local_state: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn report_tracked_read(
|
||||
&self,
|
||||
key_index: DependencyIndex,
|
||||
durability: Durability,
|
||||
changed_at: Revision,
|
||||
) {
|
||||
self.local_state
|
||||
.report_tracked_read(key_index, durability, changed_at)
|
||||
}
|
||||
|
||||
/// Reports that the query depends on some state unknown to salsa.
|
||||
///
|
||||
/// Queries which report untracked reads will be re-executed in the next
|
||||
/// revision.
|
||||
pub fn report_untracked_read(&self) {
|
||||
self.local_state
|
||||
.report_untracked_read(self.current_revision());
|
||||
self.empty_dependencies.clone()
|
||||
}
|
||||
|
||||
/// Reports that an input with durability `durability` changed.
|
||||
|
@ -170,46 +121,11 @@ impl Runtime {
|
|||
/// less than or equal to `durability` to the current revision.
|
||||
pub(crate) fn report_tracked_write(&mut self, durability: Durability) {
|
||||
let new_revision = self.current_revision();
|
||||
for rev in &self.shared_state.revisions[1..=durability.index()] {
|
||||
for rev in &self.revisions[1..=durability.index()] {
|
||||
rev.store(new_revision);
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds `key` to the list of output created by the current query
|
||||
/// (if not already present).
|
||||
pub(crate) fn add_output(&self, key: DependencyIndex) {
|
||||
self.local_state.add_output(key);
|
||||
}
|
||||
|
||||
/// Check whether `entity` is contained the list of outputs written by the current query.
|
||||
pub(super) fn is_output_of_active_query(&self, entity: DependencyIndex) -> bool {
|
||||
self.local_state.is_output(entity)
|
||||
}
|
||||
|
||||
/// Called when the active queries creates an index from the
|
||||
/// entity table with the index `entity_index`. Has the following effects:
|
||||
///
|
||||
/// * Add a query read on `DatabaseKeyIndex::for_table(entity_index)`
|
||||
/// * Identify a unique disambiguator for the hash within the current query,
|
||||
/// adding the hash to the current query's disambiguator table.
|
||||
/// * Returns a tuple of:
|
||||
/// * the id of the current query
|
||||
/// * the current dependencies (durability, changed_at) of current query
|
||||
/// * the disambiguator index
|
||||
pub(crate) fn disambiguate_entity(
|
||||
&self,
|
||||
entity_index: IngredientIndex,
|
||||
reset_at: Revision,
|
||||
data_hash: u64,
|
||||
) -> (DatabaseKeyIndex, StampedValue<()>, Disambiguator) {
|
||||
self.report_tracked_read(
|
||||
DependencyIndex::for_table(entity_index),
|
||||
Durability::MAX,
|
||||
reset_at,
|
||||
);
|
||||
self.local_state.disambiguate(data_hash)
|
||||
}
|
||||
|
||||
/// The revision in which values with durability `d` may have last
|
||||
/// changed. For D0, this is just the current revision. But for
|
||||
/// higher levels of durability, this value may lag behind the
|
||||
|
@ -219,43 +135,15 @@ impl Runtime {
|
|||
/// dependencies.
|
||||
#[inline]
|
||||
pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision {
|
||||
self.shared_state.revisions[d.index()].load()
|
||||
self.revisions[d.index()].load()
|
||||
}
|
||||
|
||||
/// Starts unwinding the stack if the current revision is cancelled.
|
||||
///
|
||||
/// This method can be called by query implementations that perform
|
||||
/// potentially expensive computations, in order to speed up propagation of
|
||||
/// cancellation.
|
||||
///
|
||||
/// Cancellation will automatically be triggered by salsa on any query
|
||||
/// invocation.
|
||||
///
|
||||
/// This method should not be overridden by `Database` implementors. A
|
||||
/// `salsa_event` is emitted when this method is called, so that should be
|
||||
/// used instead.
|
||||
pub(crate) fn unwind_if_revision_cancelled<DB: ?Sized + Database>(&self, db: &DB) {
|
||||
db.salsa_event(Event {
|
||||
runtime_id: self.id(),
|
||||
kind: EventKind::WillCheckCancellation,
|
||||
});
|
||||
if self.shared_state.revision_canceled.load() {
|
||||
db.salsa_event(Event {
|
||||
runtime_id: self.id(),
|
||||
kind: EventKind::WillCheckCancellation,
|
||||
});
|
||||
self.unwind_cancelled();
|
||||
}
|
||||
}
|
||||
|
||||
#[cold]
|
||||
pub(crate) fn unwind_cancelled(&self) {
|
||||
self.report_untracked_read();
|
||||
Cancelled::PendingWrite.throw();
|
||||
pub(crate) fn load_cancellation_flag(&self) -> bool {
|
||||
self.revision_canceled.load()
|
||||
}
|
||||
|
||||
pub(crate) fn set_cancellation_flag(&self) {
|
||||
self.shared_state.revision_canceled.store(true);
|
||||
self.revision_canceled.store(true);
|
||||
}
|
||||
|
||||
/// Increments the "current revision" counter and clears
|
||||
|
@ -265,16 +153,11 @@ impl Runtime {
|
|||
pub(crate) fn new_revision(&mut self) -> Revision {
|
||||
let r_old = self.current_revision();
|
||||
let r_new = r_old.next();
|
||||
self.shared_state.revisions[0].store(r_new);
|
||||
self.shared_state.revision_canceled.store(false);
|
||||
self.revisions[0].store(r_new);
|
||||
self.revision_canceled.store(false);
|
||||
r_new
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> {
|
||||
self.local_state.push_query(database_key_index)
|
||||
}
|
||||
|
||||
/// Block until `other_id` completes executing `database_key`;
|
||||
/// panic or unwind in the case of a cycle.
|
||||
///
|
||||
|
@ -300,40 +183,42 @@ impl Runtime {
|
|||
pub(crate) fn block_on_or_unwind<QueryMutexGuard>(
|
||||
&self,
|
||||
db: &dyn Database,
|
||||
local_state: &LocalState,
|
||||
database_key: DatabaseKeyIndex,
|
||||
other_id: RuntimeId,
|
||||
other_id: ThreadId,
|
||||
query_mutex_guard: QueryMutexGuard,
|
||||
) {
|
||||
let mut dg = self.shared_state.dependency_graph.lock();
|
||||
let mut dg = self.dependency_graph.lock();
|
||||
let thread_id = std::thread::current().id();
|
||||
|
||||
if dg.depends_on(other_id, self.id()) {
|
||||
self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id);
|
||||
if dg.depends_on(other_id, thread_id) {
|
||||
self.unblock_cycle_and_maybe_throw(db, local_state, &mut dg, database_key, other_id);
|
||||
|
||||
// If the above fn returns, then (via cycle recovery) it has unblocked the
|
||||
// cycle, so we can continue.
|
||||
assert!(!dg.depends_on(other_id, self.id()));
|
||||
assert!(!dg.depends_on(other_id, thread_id));
|
||||
}
|
||||
|
||||
db.salsa_event(Event {
|
||||
runtime_id: self.id(),
|
||||
thread_id,
|
||||
kind: EventKind::WillBlockOn {
|
||||
other_runtime_id: other_id,
|
||||
other_thread_id: other_id,
|
||||
database_key,
|
||||
},
|
||||
});
|
||||
|
||||
let stack = self.local_state.take_query_stack();
|
||||
let stack = local_state.take_query_stack();
|
||||
|
||||
let (stack, result) = DependencyGraph::block_on(
|
||||
dg,
|
||||
self.id(),
|
||||
thread_id,
|
||||
database_key,
|
||||
other_id,
|
||||
stack,
|
||||
query_mutex_guard,
|
||||
);
|
||||
|
||||
self.local_state.restore_query_stack(stack);
|
||||
local_state.restore_query_stack(stack);
|
||||
|
||||
match result {
|
||||
WaitResult::Completed => (),
|
||||
|
@ -358,17 +243,18 @@ impl Runtime {
|
|||
fn unblock_cycle_and_maybe_throw(
|
||||
&self,
|
||||
db: &dyn Database,
|
||||
local_state: &LocalState,
|
||||
dg: &mut DependencyGraph,
|
||||
database_key_index: DatabaseKeyIndex,
|
||||
to_id: RuntimeId,
|
||||
to_id: ThreadId,
|
||||
) {
|
||||
tracing::debug!(
|
||||
"unblock_cycle_and_maybe_throw(database_key={:?})",
|
||||
database_key_index
|
||||
);
|
||||
|
||||
let mut from_stack = self.local_state.take_query_stack();
|
||||
let from_id = self.id();
|
||||
let mut from_stack = local_state.take_query_stack();
|
||||
let from_id = std::thread::current().id();
|
||||
|
||||
// Make a "dummy stack frame". As we iterate through the cycle, we will collect the
|
||||
// inputs from each participant. Then, if we are participating in cycle recovery, we
|
||||
|
@ -395,7 +281,12 @@ impl Runtime {
|
|||
// (at least for this execution, not necessarily across executions),
|
||||
// no matter where it started on the stack. Find the minimum
|
||||
// key and rotate it to the front.
|
||||
let min = v.iter().min().unwrap();
|
||||
let min = v
|
||||
.iter()
|
||||
.map(|key| (key.ingredient_index.debug_name(db), key))
|
||||
.min()
|
||||
.unwrap()
|
||||
.1;
|
||||
let index = v.iter().position(|p| p == min).unwrap();
|
||||
v.rotate_left(index);
|
||||
|
||||
|
@ -440,7 +331,7 @@ impl Runtime {
|
|||
let (me_recovered, others_recovered) =
|
||||
dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id);
|
||||
|
||||
self.local_state.restore_query_stack(from_stack);
|
||||
local_state.restore_query_stack(from_stack);
|
||||
|
||||
if me_recovered {
|
||||
// If the current thread has recovery, we want to throw
|
||||
|
@ -464,8 +355,7 @@ impl Runtime {
|
|||
database_key: DatabaseKeyIndex,
|
||||
wait_result: WaitResult,
|
||||
) {
|
||||
self.shared_state
|
||||
.dependency_graph
|
||||
self.dependency_graph
|
||||
.lock()
|
||||
.unblock_runtimes_blocked_on(database_key, wait_result);
|
||||
}
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
use std::sync::Arc;
|
||||
use std::thread::ThreadId;
|
||||
|
||||
use crate::active_query::ActiveQuery;
|
||||
use crate::key::DatabaseKeyIndex;
|
||||
use crate::runtime::WaitResult;
|
||||
use parking_lot::{Condvar, MutexGuard};
|
||||
use rustc_hash::FxHashMap;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use super::{active_query::ActiveQuery, RuntimeId, WaitResult};
|
||||
|
||||
type QueryStack = Vec<ActiveQuery>;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
|
@ -15,21 +16,21 @@ pub(super) struct DependencyGraph {
|
|||
/// `K` is blocked on some query executing in the runtime `V`.
|
||||
/// This encodes a graph that must be acyclic (or else deadlock
|
||||
/// will result).
|
||||
edges: FxHashMap<RuntimeId, Edge>,
|
||||
edges: FxHashMap<ThreadId, Edge>,
|
||||
|
||||
/// Encodes the `RuntimeId` that are blocked waiting for the result
|
||||
/// Encodes the `ThreadId` that are blocked waiting for the result
|
||||
/// of a given query.
|
||||
query_dependents: FxHashMap<DatabaseKeyIndex, SmallVec<[RuntimeId; 4]>>,
|
||||
query_dependents: FxHashMap<DatabaseKeyIndex, SmallVec<[ThreadId; 4]>>,
|
||||
|
||||
/// When a key K completes which had dependent queries Qs blocked on it,
|
||||
/// it stores its `WaitResult` here. As they wake up, each query Q in Qs will
|
||||
/// come here to fetch their results.
|
||||
wait_results: FxHashMap<RuntimeId, (QueryStack, WaitResult)>,
|
||||
wait_results: FxHashMap<ThreadId, (QueryStack, WaitResult)>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Edge {
|
||||
blocked_on_id: RuntimeId,
|
||||
blocked_on_id: ThreadId,
|
||||
blocked_on_key: DatabaseKeyIndex,
|
||||
stack: QueryStack,
|
||||
|
||||
|
@ -42,7 +43,7 @@ impl DependencyGraph {
|
|||
/// True if `from_id` depends on `to_id`.
|
||||
///
|
||||
/// (i.e., there is a path from `from_id` to `to_id` in the graph.)
|
||||
pub(super) fn depends_on(&mut self, from_id: RuntimeId, to_id: RuntimeId) -> bool {
|
||||
pub(super) fn depends_on(&mut self, from_id: ThreadId, to_id: ThreadId) -> bool {
|
||||
let mut p = from_id;
|
||||
while let Some(q) = self.edges.get(&p).map(|edge| edge.blocked_on_id) {
|
||||
if q == to_id {
|
||||
|
@ -62,10 +63,10 @@ impl DependencyGraph {
|
|||
/// 3. ...and `to_id` is transitively dependent on something which is present on `from_stack`.
|
||||
pub(super) fn for_each_cycle_participant(
|
||||
&mut self,
|
||||
from_id: RuntimeId,
|
||||
from_id: ThreadId,
|
||||
from_stack: &mut QueryStack,
|
||||
database_key: DatabaseKeyIndex,
|
||||
to_id: RuntimeId,
|
||||
to_id: ThreadId,
|
||||
mut closure: impl FnMut(&mut [ActiveQuery]),
|
||||
) {
|
||||
debug_assert!(self.depends_on(to_id, from_id));
|
||||
|
@ -130,10 +131,10 @@ impl DependencyGraph {
|
|||
/// * Others is true if other runtimes were unblocked.
|
||||
pub(super) fn maybe_unblock_runtimes_in_cycle(
|
||||
&mut self,
|
||||
from_id: RuntimeId,
|
||||
from_id: ThreadId,
|
||||
from_stack: &QueryStack,
|
||||
database_key: DatabaseKeyIndex,
|
||||
to_id: RuntimeId,
|
||||
to_id: ThreadId,
|
||||
) -> (bool, bool) {
|
||||
// See diagram in `for_each_cycle_participant`.
|
||||
let mut id = to_id;
|
||||
|
@ -194,9 +195,9 @@ impl DependencyGraph {
|
|||
/// * `held_mutex` is a read lock (or stronger) on `database_key`
|
||||
pub(super) fn block_on<QueryMutexGuard>(
|
||||
mut me: MutexGuard<'_, Self>,
|
||||
from_id: RuntimeId,
|
||||
from_id: ThreadId,
|
||||
database_key: DatabaseKeyIndex,
|
||||
to_id: RuntimeId,
|
||||
to_id: ThreadId,
|
||||
from_stack: QueryStack,
|
||||
query_mutex_guard: QueryMutexGuard,
|
||||
) -> (QueryStack, WaitResult) {
|
||||
|
@ -220,9 +221,9 @@ impl DependencyGraph {
|
|||
/// computing `database_key`.
|
||||
fn add_edge(
|
||||
&mut self,
|
||||
from_id: RuntimeId,
|
||||
from_id: ThreadId,
|
||||
database_key: DatabaseKeyIndex,
|
||||
to_id: RuntimeId,
|
||||
to_id: ThreadId,
|
||||
from_stack: QueryStack,
|
||||
) -> Arc<parking_lot::Condvar> {
|
||||
assert_ne!(from_id, to_id);
|
||||
|
@ -266,7 +267,7 @@ impl DependencyGraph {
|
|||
/// Unblock the runtime with the given id with the given wait-result.
|
||||
/// This will cause it resume execution (though it will have to grab
|
||||
/// the lock on this data structure first, to recover the wait result).
|
||||
fn unblock_runtime(&mut self, id: RuntimeId, wait_result: WaitResult) {
|
||||
fn unblock_runtime(&mut self, id: ThreadId, wait_result: WaitResult) {
|
||||
let edge = self.edges.remove(&id).expect("not blocked");
|
||||
self.wait_results.insert(id, (edge.stack, wait_result));
|
||||
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
use std::sync::{atomic::AtomicUsize, Arc};
|
||||
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use crate::{durability::Durability, key::DependencyIndex, revision::AtomicRevision};
|
||||
|
||||
use super::{dependency_graph::DependencyGraph, local_state::EdgeKind};
|
||||
|
||||
/// State that will be common to all threads (when we support multiple threads)
|
||||
#[derive(Debug)]
|
||||
pub(super) struct SharedState {
|
||||
/// Stores the next id to use for a snapshotted runtime (starts at 1).
|
||||
pub(super) next_id: AtomicUsize,
|
||||
|
||||
/// Vector we can clone
|
||||
pub(super) empty_dependencies: Arc<[(EdgeKind, DependencyIndex)]>,
|
||||
|
||||
/// Set to true when the current revision has been canceled.
|
||||
/// This is done when we an input is being changed. The flag
|
||||
/// is set back to false once the input has been changed.
|
||||
pub(super) revision_canceled: AtomicCell<bool>,
|
||||
|
||||
/// Stores the "last change" revision for values of each duration.
|
||||
/// This vector is always of length at least 1 (for Durability 0)
|
||||
/// but its total length depends on the number of durations. The
|
||||
/// element at index 0 is special as it represents the "current
|
||||
/// revision". In general, we have the invariant that revisions
|
||||
/// in here are *declining* -- that is, `revisions[i] >=
|
||||
/// revisions[i + 1]`, for all `i`. This is because when you
|
||||
/// modify a value with durability D, that implies that values
|
||||
/// with durability less than D may have changed too.
|
||||
pub(super) revisions: Vec<AtomicRevision>,
|
||||
|
||||
/// The dependency graph tracks which runtimes are blocked on one
|
||||
/// another, waiting for queries to terminate.
|
||||
pub(super) dependency_graph: Mutex<DependencyGraph>,
|
||||
}
|
||||
|
||||
impl Default for SharedState {
|
||||
fn default() -> Self {
|
||||
Self::with_durabilities(Durability::LEN)
|
||||
}
|
||||
}
|
||||
|
||||
impl SharedState {
|
||||
fn with_durabilities(durabilities: usize) -> Self {
|
||||
SharedState {
|
||||
next_id: AtomicUsize::new(1),
|
||||
empty_dependencies: None.into_iter().collect(),
|
||||
revision_canceled: Default::default(),
|
||||
revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(),
|
||||
dependency_graph: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -111,11 +111,11 @@ unsafe impl<T: HasStorage> DatabaseGen for T {
|
|||
}
|
||||
|
||||
fn views(&self) -> &Views {
|
||||
&self.storage().shared.upcasts
|
||||
&self.storage().upcasts
|
||||
}
|
||||
|
||||
fn nonce(&self) -> Nonce<StorageNonce> {
|
||||
self.storage().shared.nonce
|
||||
self.storage().nonce
|
||||
}
|
||||
|
||||
fn lookup_jar_by_type(&self, jar: &dyn Jar) -> Option<IngredientIndex> {
|
||||
|
@ -201,25 +201,16 @@ impl IngredientIndex {
|
|||
pub fn successor(self, index: usize) -> Self {
|
||||
IngredientIndex(self.0 + 1 + index as u32)
|
||||
}
|
||||
|
||||
/// Return the "debug name" of this ingredient (e.g., the name of the tracked struct it represents)
|
||||
pub(crate) fn debug_name(self, db: &dyn Database) -> &'static str {
|
||||
db.lookup_ingredient(self).debug_name()
|
||||
}
|
||||
}
|
||||
|
||||
/// The "storage" struct stores all the data for the jars.
|
||||
/// It is shared between the main database and any active snapshots.
|
||||
pub struct Storage<Db: Database> {
|
||||
/// Data shared across all databases. This contains the ingredients needed by each jar.
|
||||
/// See the ["jars and ingredients" chapter](https://salsa-rs.github.io/salsa/plumbing/jars_and_ingredients.html)
|
||||
/// for more detailed description.
|
||||
shared: Shared<Db>,
|
||||
|
||||
/// The runtime for this particular salsa database handle.
|
||||
/// Each handle gets its own runtime, but the runtimes have shared state between them.
|
||||
runtime: Runtime,
|
||||
}
|
||||
|
||||
/// Data shared between all threads.
|
||||
/// This is where the actual data for tracked functions, structs, inputs, etc lives,
|
||||
/// along with some coordination variables between treads.
|
||||
struct Shared<Db: Database> {
|
||||
upcasts: ViewsOf<Db>,
|
||||
|
||||
nonce: Nonce<StorageNonce>,
|
||||
|
@ -239,19 +230,21 @@ struct Shared<Db: Database> {
|
|||
|
||||
/// Indices of ingredients that require reset when a new revision starts.
|
||||
ingredients_requiring_reset: ConcurrentVec<IngredientIndex>,
|
||||
|
||||
/// The runtime for this particular salsa database handle.
|
||||
/// Each handle gets its own runtime, but the runtimes have shared state between them.
|
||||
runtime: Runtime,
|
||||
}
|
||||
|
||||
// ANCHOR: default
|
||||
impl<Db: Database> Default for Storage<Db> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
shared: Shared {
|
||||
upcasts: Default::default(),
|
||||
nonce: NONCE.nonce(),
|
||||
jar_map: Default::default(),
|
||||
ingredients_vec: Default::default(),
|
||||
ingredients_requiring_reset: Default::default(),
|
||||
},
|
||||
runtime: Runtime::default(),
|
||||
}
|
||||
}
|
||||
|
@ -265,35 +258,34 @@ impl<Db: Database> Storage<Db> {
|
|||
func: fn(&Db) -> &T,
|
||||
func_mut: fn(&mut Db) -> &mut T,
|
||||
) {
|
||||
self.shared.upcasts.add::<T>(func, func_mut)
|
||||
self.upcasts.add::<T>(func, func_mut)
|
||||
}
|
||||
|
||||
/// Adds the ingredients in `jar` to the database if not already present.
|
||||
/// If a jar of this type is already present, returns the index.
|
||||
fn add_or_lookup_jar_by_type(&self, jar: &dyn Jar) -> IngredientIndex {
|
||||
let jar_type_id = jar.type_id();
|
||||
let mut jar_map = self.shared.jar_map.lock();
|
||||
let mut jar_map = self.jar_map.lock();
|
||||
*jar_map
|
||||
.entry(jar_type_id)
|
||||
.or_insert_with(|| {
|
||||
let index = IngredientIndex::from(self.shared.ingredients_vec.len());
|
||||
let index = IngredientIndex::from(self.ingredients_vec.len());
|
||||
let ingredients = jar.create_ingredients(index);
|
||||
for ingredient in ingredients {
|
||||
let expected_index = ingredient.ingredient_index();
|
||||
|
||||
if ingredient.requires_reset_for_new_revision() {
|
||||
self.shared.ingredients_requiring_reset.push(expected_index);
|
||||
self.ingredients_requiring_reset.push(expected_index);
|
||||
}
|
||||
|
||||
let actual_index = self
|
||||
.shared
|
||||
.ingredients_vec
|
||||
.push(ingredient);
|
||||
assert_eq!(
|
||||
expected_index.as_usize(),
|
||||
actual_index,
|
||||
"ingredient `{:?}` was predicted to have index `{:?}` but actually has index `{:?}`",
|
||||
self.shared.ingredients_vec.get(actual_index).unwrap(),
|
||||
self.ingredients_vec.get(actual_index).unwrap(),
|
||||
expected_index,
|
||||
actual_index,
|
||||
);
|
||||
|
@ -305,11 +297,11 @@ impl<Db: Database> Storage<Db> {
|
|||
|
||||
/// Return the index of the 1st ingredient from the given jar.
|
||||
pub fn lookup_jar_by_type(&self, jar: &dyn Jar) -> Option<IngredientIndex> {
|
||||
self.shared.jar_map.lock().get(&jar.type_id()).copied()
|
||||
self.jar_map.lock().get(&jar.type_id()).copied()
|
||||
}
|
||||
|
||||
pub fn lookup_ingredient(&self, index: IngredientIndex) -> &dyn Ingredient {
|
||||
&**self.shared.ingredients_vec.get(index.as_usize()).unwrap()
|
||||
&**self.ingredients_vec.get(index.as_usize()).unwrap()
|
||||
}
|
||||
|
||||
fn lookup_ingredient_mut(
|
||||
|
@ -318,20 +310,15 @@ impl<Db: Database> Storage<Db> {
|
|||
) -> (&mut dyn Ingredient, &mut Runtime) {
|
||||
self.runtime.new_revision();
|
||||
|
||||
for index in self.shared.ingredients_requiring_reset.iter() {
|
||||
self.shared
|
||||
.ingredients_vec
|
||||
for index in self.ingredients_requiring_reset.iter() {
|
||||
self.ingredients_vec
|
||||
.get_mut(index.as_usize())
|
||||
.unwrap()
|
||||
.reset_for_new_revision();
|
||||
}
|
||||
|
||||
(
|
||||
&mut **self
|
||||
.shared
|
||||
.ingredients_vec
|
||||
.get_mut(index.as_usize())
|
||||
.unwrap(),
|
||||
&mut **self.ingredients_vec.get_mut(index.as_usize()).unwrap(),
|
||||
&mut self.runtime,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -11,7 +11,8 @@ use crate::{
|
|||
ingredient::{fmt_index, Ingredient, Jar},
|
||||
ingredient_list::IngredientList,
|
||||
key::{DatabaseKeyIndex, DependencyIndex},
|
||||
runtime::{local_state::QueryOrigin, Runtime},
|
||||
local_state::{self, QueryOrigin},
|
||||
runtime::Runtime,
|
||||
salsa_struct::SalsaStructInDb,
|
||||
storage::IngredientIndex,
|
||||
Database, Durability, Event, Id, Revision,
|
||||
|
@ -287,13 +288,14 @@ where
|
|||
|
||||
pub fn new_struct<'db>(
|
||||
&'db self,
|
||||
runtime: &'db Runtime,
|
||||
db: &'db dyn Database,
|
||||
fields: C::Fields<'db>,
|
||||
) -> C::Struct<'db> {
|
||||
local_state::attach(db, |local_state| {
|
||||
let data_hash = crate::hash::hash(&C::id_fields(&fields));
|
||||
|
||||
let (query_key, current_deps, disambiguator) =
|
||||
runtime.disambiguate_entity(self.ingredient_index, Revision::start(), data_hash);
|
||||
local_state.disambiguate(self.ingredient_index, Revision::start(), data_hash);
|
||||
|
||||
let entity_key = KeyStruct {
|
||||
query_key,
|
||||
|
@ -302,14 +304,14 @@ where
|
|||
};
|
||||
|
||||
let (id, new_id) = self.intern(entity_key);
|
||||
runtime.add_output(self.database_key_index(id).into());
|
||||
local_state.add_output(self.database_key_index(id).into());
|
||||
|
||||
let current_revision = runtime.current_revision();
|
||||
let current_revision = db.runtime().current_revision();
|
||||
if new_id {
|
||||
// This is a new tracked struct, so create an entry in the struct map.
|
||||
|
||||
self.struct_map.insert(
|
||||
runtime,
|
||||
current_revision,
|
||||
Value {
|
||||
id,
|
||||
key: entity_key,
|
||||
|
@ -330,7 +332,7 @@ where
|
|||
// which means the interned key could exist but `struct_map` not yet have
|
||||
// been updated).
|
||||
|
||||
match self.struct_map.update(runtime, id) {
|
||||
match self.struct_map.update(current_revision, id) {
|
||||
Update::Current(r) => {
|
||||
// All inputs up to this point were previously
|
||||
// observed to be green and this struct was already
|
||||
|
@ -366,6 +368,7 @@ where
|
|||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Given the id of a tracked struct created in this revision,
|
||||
|
@ -375,7 +378,8 @@ where
|
|||
///
|
||||
/// If the struct has not been created in this revision.
|
||||
pub fn lookup_struct<'db>(&'db self, runtime: &'db Runtime, id: Id) -> C::Struct<'db> {
|
||||
self.struct_map.get(runtime, id)
|
||||
let current_revision = runtime.current_revision();
|
||||
self.struct_map.get(current_revision, id)
|
||||
}
|
||||
|
||||
/// Deletes the given entities. This is used after a query `Q` executes and we can compare
|
||||
|
@ -390,7 +394,7 @@ where
|
|||
/// discussion and important considerations.
|
||||
pub(crate) fn delete_entity(&self, db: &dyn crate::Database, id: Id) {
|
||||
db.salsa_event(Event {
|
||||
runtime_id: db.runtime().id(),
|
||||
thread_id: std::thread::current().id(),
|
||||
kind: crate::EventKind::DidDiscard {
|
||||
key: self.database_key_index(id),
|
||||
},
|
||||
|
@ -485,6 +489,10 @@ where
|
|||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(C::DEBUG_NAME, index, fmt)
|
||||
}
|
||||
|
||||
fn debug_name(&self) -> &'static str {
|
||||
C::DEBUG_NAME
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> std::fmt::Debug for IngredientImpl<C>
|
||||
|
@ -505,11 +513,16 @@ where
|
|||
/// Access to this value field.
|
||||
/// Note that this function returns the entire tuple of value fields.
|
||||
/// The caller is responible for selecting the appropriate element.
|
||||
pub fn field<'db>(&'db self, runtime: &'db Runtime, field_index: usize) -> &'db C::Fields<'db> {
|
||||
pub fn field<'db>(
|
||||
&'db self,
|
||||
db: &dyn crate::Database,
|
||||
field_index: usize,
|
||||
) -> &'db C::Fields<'db> {
|
||||
local_state::attach(db, |local_state| {
|
||||
let field_ingredient_index = self.struct_ingredient_index.successor(field_index);
|
||||
let changed_at = self.revisions[field_index];
|
||||
|
||||
runtime.report_tracked_read(
|
||||
local_state.report_tracked_read(
|
||||
DependencyIndex {
|
||||
ingredient_index: field_ingredient_index,
|
||||
key_index: Some(self.id.as_id()),
|
||||
|
@ -519,6 +532,7 @@ where
|
|||
);
|
||||
|
||||
unsafe { self.to_self_ref(&self.fields) }
|
||||
})
|
||||
}
|
||||
|
||||
unsafe fn to_self_ref<'db>(&'db self, fields: &'db C::Fields<'static>) -> &'db C::Fields<'db> {
|
||||
|
|
|
@ -6,7 +6,7 @@ use std::{
|
|||
use crossbeam::queue::SegQueue;
|
||||
use dashmap::mapref::one::RefMut;
|
||||
|
||||
use crate::{alloc::Alloc, hash::FxDashMap, Id, Runtime};
|
||||
use crate::{alloc::Alloc, hash::FxDashMap, Id, Revision, Runtime};
|
||||
|
||||
use super::{Configuration, KeyStruct, Value};
|
||||
|
||||
|
@ -80,8 +80,8 @@ where
|
|||
///
|
||||
/// * If value with same `value.id` is already present in the map.
|
||||
/// * If value not created in current revision.
|
||||
pub fn insert<'db>(&'db self, runtime: &'db Runtime, value: Value<C>) -> C::Struct<'db> {
|
||||
assert_eq!(value.created_at, runtime.current_revision());
|
||||
pub fn insert<'db>(&'db self, current_revision: Revision, value: Value<C>) -> C::Struct<'db> {
|
||||
assert_eq!(value.created_at, current_revision);
|
||||
|
||||
let id = value.id;
|
||||
let boxed_value = Alloc::new(value);
|
||||
|
@ -119,12 +119,9 @@ where
|
|||
///
|
||||
/// * If the value is not present in the map.
|
||||
/// * If the value is already updated in this revision.
|
||||
pub fn update<'db>(&'db self, runtime: &'db Runtime, id: Id) -> Update<'db, C> {
|
||||
pub fn update<'db>(&'db self, current_revision: Revision, id: Id) -> Update<'db, C> {
|
||||
let mut data = self.map.get_mut(&id).unwrap();
|
||||
|
||||
// Never update a struct twice in the same revision.
|
||||
let current_revision = runtime.current_revision();
|
||||
|
||||
// UNSAFE: We never permit `&`-access in the current revision until data.created_at
|
||||
// has been updated to the current revision (which we check below).
|
||||
let data_ref = unsafe { data.as_mut() };
|
||||
|
@ -154,7 +151,7 @@ where
|
|||
// code cannot violate that `&`-reference.
|
||||
if data_ref.created_at == current_revision {
|
||||
drop(data);
|
||||
return Update::Current(Self::get_from_map(&self.map, runtime, id));
|
||||
return Update::Current(Self::get_from_map(&self.map, current_revision, id));
|
||||
}
|
||||
|
||||
data_ref.created_at = current_revision;
|
||||
|
@ -167,8 +164,8 @@ where
|
|||
///
|
||||
/// * If the value is not present in the map.
|
||||
/// * If the value has not been updated in this revision.
|
||||
pub fn get<'db>(&'db self, runtime: &'db Runtime, id: Id) -> C::Struct<'db> {
|
||||
Self::get_from_map(&self.map, runtime, id)
|
||||
pub fn get<'db>(&'db self, current_revision: Revision, id: Id) -> C::Struct<'db> {
|
||||
Self::get_from_map(&self.map, current_revision, id)
|
||||
}
|
||||
|
||||
/// Helper function, provides shared functionality for [`StructMapView`][]
|
||||
|
@ -179,7 +176,7 @@ where
|
|||
/// * If the value has not been updated in this revision.
|
||||
fn get_from_map<'db>(
|
||||
map: &'db FxDashMap<Id, Alloc<Value<C>>>,
|
||||
runtime: &'db Runtime,
|
||||
current_revision: Revision,
|
||||
id: Id,
|
||||
) -> C::Struct<'db> {
|
||||
let data = map.get(&id).unwrap();
|
||||
|
@ -190,7 +187,6 @@ where
|
|||
|
||||
// Before we drop the lock, check that the value has
|
||||
// been updated in this revision. This is what allows us to return a ``
|
||||
let current_revision = runtime.current_revision();
|
||||
let created_at = data_ref.created_at;
|
||||
assert!(
|
||||
created_at == current_revision,
|
||||
|
@ -235,8 +231,8 @@ where
|
|||
///
|
||||
/// * If the value is not present in the map.
|
||||
/// * If the value has not been updated in this revision.
|
||||
pub fn get<'db>(&'db self, runtime: &'db Runtime, id: Id) -> C::Struct<'db> {
|
||||
StructMap::get_from_map(&self.map, runtime, id)
|
||||
pub fn get<'db>(&'db self, current_revision: Revision, id: Id) -> C::Struct<'db> {
|
||||
StructMap::get_from_map(&self.map, current_revision, id)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use crate::{
|
||||
id::AsId, ingredient::Ingredient, key::DependencyIndex, storage::IngredientIndex, Database, Id,
|
||||
Runtime,
|
||||
id::AsId, ingredient::Ingredient, key::DependencyIndex, local_state, storage::IngredientIndex,
|
||||
Database, Id,
|
||||
};
|
||||
|
||||
use super::{struct_map::StructMapView, Configuration};
|
||||
|
@ -46,12 +46,14 @@ where
|
|||
/// Access to this value field.
|
||||
/// Note that this function returns the entire tuple of value fields.
|
||||
/// The caller is responible for selecting the appropriate element.
|
||||
pub fn field<'db>(&'db self, runtime: &'db Runtime, id: Id) -> &'db C::Fields<'db> {
|
||||
let data = self.struct_map.get(runtime, id);
|
||||
pub fn field<'db>(&'db self, db: &'db dyn Database, id: Id) -> &'db C::Fields<'db> {
|
||||
local_state::attach(db, |local_state| {
|
||||
let current_revision = db.runtime().current_revision();
|
||||
let data = self.struct_map.get(current_revision, id);
|
||||
let data = C::deref_struct(data);
|
||||
let changed_at = data.revisions[self.field_index];
|
||||
|
||||
runtime.report_tracked_read(
|
||||
local_state.report_tracked_read(
|
||||
DependencyIndex {
|
||||
ingredient_index: self.ingredient_index,
|
||||
key_index: Some(id.as_id()),
|
||||
|
@ -61,6 +63,7 @@ where
|
|||
);
|
||||
|
||||
unsafe { self.to_self_ref(&data.fields) }
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,15 +85,15 @@ where
|
|||
input: Option<Id>,
|
||||
revision: crate::Revision,
|
||||
) -> bool {
|
||||
let runtime = db.runtime();
|
||||
let current_revision = db.runtime().current_revision();
|
||||
let id = input.unwrap();
|
||||
let data = self.struct_map.get(runtime, id);
|
||||
let data = self.struct_map.get(current_revision, id);
|
||||
let data = C::deref_struct(data);
|
||||
let field_changed_at = data.revisions[self.field_index];
|
||||
field_changed_at > revision
|
||||
}
|
||||
|
||||
fn origin(&self, _key_index: crate::Id) -> Option<crate::runtime::local_state::QueryOrigin> {
|
||||
fn origin(&self, _key_index: crate::Id) -> Option<crate::local_state::QueryOrigin> {
|
||||
None
|
||||
}
|
||||
|
||||
|
@ -137,6 +140,10 @@ where
|
|||
index.unwrap()
|
||||
)
|
||||
}
|
||||
|
||||
fn debug_name(&self) -> &'static str {
|
||||
C::FIELD_DEBUG_NAMES[self.field_index]
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> std::fmt::Debug for FieldIngredientImpl<C>
|
||||
|
|
|
@ -220,8 +220,8 @@ fn inner_cycle() {
|
|||
assert!(err.is_err());
|
||||
let expected = expect![[r#"
|
||||
[
|
||||
"cycle_b(0)",
|
||||
"cycle_a(0)",
|
||||
"cycle_b(0)",
|
||||
]
|
||||
"#]];
|
||||
expected.assert_debug_eq(&err.unwrap_err().cycle);
|
||||
|
@ -328,8 +328,8 @@ fn cycle_mixed_1() {
|
|||
|
||||
let expected = expect![[r#"
|
||||
[
|
||||
"cycle_c(0)",
|
||||
"cycle_b(0)",
|
||||
"cycle_c(0)",
|
||||
]
|
||||
"#]];
|
||||
expected.assert_debug_eq(&cycle_c(db, abc).unwrap_err().cycle);
|
||||
|
@ -379,8 +379,8 @@ fn cycle_deterministic_order() {
|
|||
"cycle_b(0)",
|
||||
],
|
||||
[
|
||||
"cycle_b(0)",
|
||||
"cycle_a(0)",
|
||||
"cycle_b(0)",
|
||||
],
|
||||
)
|
||||
"#]];
|
||||
|
|
|
@ -1,13 +1,8 @@
|
|||
#[cfg(disabled)]
|
||||
mod setup;
|
||||
|
||||
#[cfg(disabled)]
|
||||
mod parallel_cancellation;
|
||||
mod parallel_cycle_all_recover;
|
||||
#[cfg(disabled)]
|
||||
mod parallel_cycle_mid_recover;
|
||||
#[cfg(disabled)]
|
||||
mod parallel_cycle_none_recover;
|
||||
#[cfg(disabled)]
|
||||
mod parallel_cycle_one_recover;
|
||||
#[cfg(disabled)]
|
||||
mod signal;
|
||||
|
|
76
tests/parallel/parallel_cancellation.rs
Normal file
76
tests/parallel/parallel_cancellation.rs
Normal file
|
@ -0,0 +1,76 @@
|
|||
//! Test for cycle recover spread across two threads.
|
||||
//! See `../cycles.rs` for a complete listing of cycle tests,
|
||||
//! both intra and cross thread.
|
||||
|
||||
use salsa::Cancelled;
|
||||
use salsa::Handle;
|
||||
use salsa::Setter;
|
||||
|
||||
use crate::setup::Database;
|
||||
use crate::setup::Knobs;
|
||||
|
||||
#[salsa::db]
|
||||
pub(crate) trait Db: salsa::Database + Knobs {}
|
||||
|
||||
#[salsa::db]
|
||||
impl<T: salsa::Database + Knobs> Db for T {}
|
||||
|
||||
#[salsa::input]
|
||||
struct MyInput {
|
||||
field: i32,
|
||||
}
|
||||
|
||||
#[salsa::tracked]
|
||||
fn a1(db: &dyn Db, input: MyInput) -> MyInput {
|
||||
db.signal(1);
|
||||
db.wait_for(2);
|
||||
dummy(db, input)
|
||||
}
|
||||
|
||||
#[salsa::tracked]
|
||||
fn dummy(_db: &dyn Db, _input: MyInput) -> MyInput {
|
||||
panic!("should never get here!")
|
||||
}
|
||||
|
||||
// Cancellation signalling test
|
||||
//
|
||||
// The pattern is as follows.
|
||||
//
|
||||
// Thread A Thread B
|
||||
// -------- --------
|
||||
// a1
|
||||
// | wait for stage 1
|
||||
// signal stage 1 set input, triggers cancellation
|
||||
// wait for stage 2 (blocks) triggering cancellation sends stage 2
|
||||
// |
|
||||
// (unblocked)
|
||||
// dummy
|
||||
// panics
|
||||
|
||||
#[test]
|
||||
fn execute() {
|
||||
let mut db = Handle::new(Database::default());
|
||||
db.knobs().signal_on_will_block.store(3);
|
||||
|
||||
let input = MyInput::new(&*db, 1);
|
||||
|
||||
let thread_a = std::thread::spawn({
|
||||
let db = db.clone();
|
||||
move || a1(&*db, input)
|
||||
});
|
||||
|
||||
input.set_field(db.get_mut()).to(2);
|
||||
|
||||
// Assert thread A *should* was cancelled
|
||||
let cancelled = thread_a
|
||||
.join()
|
||||
.unwrap_err()
|
||||
.downcast::<Cancelled>()
|
||||
.unwrap();
|
||||
|
||||
// and inspect the output
|
||||
expect_test::expect![[r#"
|
||||
PendingWrite
|
||||
"#]]
|
||||
.assert_debug_eq(&cancelled);
|
||||
}
|
|
@ -2,6 +2,8 @@
|
|||
//! See `../cycles.rs` for a complete listing of cycle tests,
|
||||
//! both intra and cross thread.
|
||||
|
||||
use salsa::Handle;
|
||||
|
||||
use crate::setup::Database;
|
||||
use crate::setup::Knobs;
|
||||
|
||||
|
@ -90,18 +92,18 @@ fn recover_b2(db: &dyn Db, _cycle: &salsa::Cycle, key: MyInput) -> i32 {
|
|||
|
||||
#[test]
|
||||
fn execute() {
|
||||
let db = Database::default();
|
||||
db.knobs().signal_on_will_block.set(3);
|
||||
let db = Handle::new(Database::default());
|
||||
db.knobs().signal_on_will_block.store(3);
|
||||
|
||||
let input = MyInput::new(&db, 1);
|
||||
let input = MyInput::new(&*db, 1);
|
||||
|
||||
let thread_a = std::thread::spawn({
|
||||
let db = db.snapshot();
|
||||
let db = db.clone();
|
||||
move || a1(&*db, input)
|
||||
});
|
||||
|
||||
let thread_b = std::thread::spawn({
|
||||
let db = db.snapshot();
|
||||
let db = db.clone();
|
||||
move || b1(&*db, input)
|
||||
});
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
//! See `../cycles.rs` for a complete listing of cycle tests,
|
||||
//! both intra and cross thread.
|
||||
|
||||
use salsa::Handle;
|
||||
|
||||
use crate::setup::Database;
|
||||
use crate::setup::Knobs;
|
||||
|
||||
|
@ -86,18 +88,18 @@ fn recover_b3(db: &dyn Db, _cycle: &salsa::Cycle, key: MyInput) -> i32 {
|
|||
|
||||
#[test]
|
||||
fn execute() {
|
||||
let db = Database::default();
|
||||
db.knobs().signal_on_will_block.set(3);
|
||||
let db = Handle::new(Database::default());
|
||||
db.knobs().signal_on_will_block.store(3);
|
||||
|
||||
let input = MyInput::new(&db, 1);
|
||||
let input = MyInput::new(&*db, 1);
|
||||
|
||||
let thread_a = std::thread::spawn({
|
||||
let db = db.snapshot();
|
||||
let db = db.clone();
|
||||
move || a1(&*db, input)
|
||||
});
|
||||
|
||||
let thread_b = std::thread::spawn({
|
||||
let db = db.snapshot();
|
||||
let db = db.clone();
|
||||
move || b1(&*db, input)
|
||||
});
|
||||
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
use crate::setup::Database;
|
||||
use crate::setup::Knobs;
|
||||
use expect_test::expect;
|
||||
use salsa::Database as _;
|
||||
use salsa::Handle;
|
||||
|
||||
#[salsa::db]
|
||||
pub(crate) trait Db: salsa::Database + Knobs {}
|
||||
|
@ -41,35 +43,37 @@ pub(crate) fn b(db: &dyn Db, input: MyInput) -> i32 {
|
|||
|
||||
#[test]
|
||||
fn execute() {
|
||||
let db = Database::default();
|
||||
db.knobs().signal_on_will_block.set(3);
|
||||
let db = Handle::new(Database::default());
|
||||
db.knobs().signal_on_will_block.store(3);
|
||||
|
||||
let input = MyInput::new(&db, -1);
|
||||
let input = MyInput::new(&*db, -1);
|
||||
|
||||
let thread_a = std::thread::spawn({
|
||||
let db = db.snapshot();
|
||||
let db = db.clone();
|
||||
move || a(&*db, input)
|
||||
});
|
||||
|
||||
let thread_b = std::thread::spawn({
|
||||
let db = db.snapshot();
|
||||
let db = db.clone();
|
||||
move || b(&*db, input)
|
||||
});
|
||||
|
||||
// We expect B to panic because it detects a cycle (it is the one that calls A, ultimately).
|
||||
// Right now, it panics with a string.
|
||||
let err_b = thread_b.join().unwrap_err();
|
||||
db.attach(|_| {
|
||||
if let Some(c) = err_b.downcast_ref::<salsa::Cycle>() {
|
||||
let expected = expect![[r#"
|
||||
[
|
||||
"a(0)",
|
||||
"b(0)",
|
||||
a(0),
|
||||
b(0),
|
||||
]
|
||||
"#]];
|
||||
expected.assert_debug_eq(&c.all_participants(&db));
|
||||
expected.assert_debug_eq(&c.all_participants(&*db));
|
||||
} else {
|
||||
panic!("b failed in an unexpected way: {:?}", err_b);
|
||||
}
|
||||
});
|
||||
|
||||
// We expect A to propagate a panic, which causes us to use the sentinel
|
||||
// type `Canceled`.
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
//! See `../cycles.rs` for a complete listing of cycle tests,
|
||||
//! both intra and cross thread.
|
||||
|
||||
use salsa::Handle;
|
||||
|
||||
use crate::setup::Database;
|
||||
use crate::setup::Knobs;
|
||||
|
||||
|
@ -75,18 +77,18 @@ pub(crate) fn b2(db: &dyn Db, input: MyInput) -> i32 {
|
|||
|
||||
#[test]
|
||||
fn execute() {
|
||||
let db = Database::default();
|
||||
db.knobs().signal_on_will_block.set(3);
|
||||
let db = Handle::new(Database::default());
|
||||
db.knobs().signal_on_will_block.store(3);
|
||||
|
||||
let input = MyInput::new(&db, 1);
|
||||
let input = MyInput::new(&*db, 1);
|
||||
|
||||
let thread_a = std::thread::spawn({
|
||||
let db = db.snapshot();
|
||||
let db = db.clone();
|
||||
move || a1(&*db, input)
|
||||
});
|
||||
|
||||
let thread_b = std::thread::spawn({
|
||||
let db = db.snapshot();
|
||||
let db = db.clone();
|
||||
move || b1(&*db, input)
|
||||
});
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use std::{cell::Cell, sync::Arc};
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
|
||||
use crate::signal::Signal;
|
||||
|
||||
|
@ -15,14 +15,17 @@ pub(crate) trait Knobs {
|
|||
/// Various "knobs" that can be used to customize how the queries
|
||||
/// behave on one specific thread. Note that this state is
|
||||
/// intentionally thread-local (apart from `signal`).
|
||||
#[derive(Clone, Default)]
|
||||
#[derive(Default)]
|
||||
pub(crate) struct KnobsStruct {
|
||||
/// A kind of flexible barrier used to coordinate execution across
|
||||
/// threads to ensure we reach various weird states.
|
||||
pub(crate) signal: Arc<Signal>,
|
||||
pub(crate) signal: Signal,
|
||||
|
||||
/// When this database is about to block, send a signal.
|
||||
pub(crate) signal_on_will_block: Cell<usize>,
|
||||
/// When this database is about to block, send this signal.
|
||||
pub(crate) signal_on_will_block: AtomicCell<usize>,
|
||||
|
||||
/// When this database has set the cancellation flag, send this signal.
|
||||
pub(crate) signal_on_did_cancel: AtomicCell<usize>,
|
||||
}
|
||||
|
||||
#[salsa::db]
|
||||
|
@ -35,8 +38,14 @@ pub(crate) struct Database {
|
|||
#[salsa::db]
|
||||
impl salsa::Database for Database {
|
||||
fn salsa_event(&self, event: salsa::Event) {
|
||||
if let salsa::EventKind::WillBlockOn { .. } = event.kind {
|
||||
self.signal(self.knobs().signal_on_will_block.get());
|
||||
match event.kind {
|
||||
salsa::EventKind::WillBlockOn { .. } => {
|
||||
self.signal(self.knobs().signal_on_will_block.load());
|
||||
}
|
||||
salsa::EventKind::DidSetCancellationFlag => {
|
||||
self.signal(self.knobs().signal_on_did_cancel.load());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,8 +76,8 @@ fn test_leaked_inputs_ignored() {
|
|||
let result_in_rev_1 = function(&db, input);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: function(0) } }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: function(0) } }",
|
||||
]"#]]);
|
||||
|
||||
assert_eq!(result_in_rev_1, 0);
|
||||
|
@ -92,8 +92,8 @@ fn test_leaked_inputs_ignored() {
|
|||
let result_in_rev_2 = function(&db, input);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: function(0) } }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: function(0) } }",
|
||||
]"#]]);
|
||||
|
||||
// Because salsa did not see any way for the tracked
|
||||
|
|
|
@ -1,370 +0,0 @@
|
|||
//! Test that a `tracked` fn on a `salsa::input`
|
||||
//! compiles and executes successfully.
|
||||
|
||||
use expect_test::expect;
|
||||
mod common;
|
||||
use common::{HasLogger, Logger};
|
||||
use salsa::Setter;
|
||||
use test_log::test;
|
||||
|
||||
#[salsa::db]
|
||||
trait Db: salsa::Database + HasLogger {}
|
||||
|
||||
#[salsa::input]
|
||||
struct MyInput {
|
||||
field: u32,
|
||||
}
|
||||
|
||||
#[salsa::tracked]
|
||||
struct MyTracked<'db> {
|
||||
input: MyInput,
|
||||
}
|
||||
|
||||
/// If the input is in the range 0..10, this is specified to return 10.
|
||||
/// Otherwise, the default occurs, and it returns the input.
|
||||
#[salsa::tracked(specify)]
|
||||
fn maybe_specified<'db>(db: &'db dyn Db, tracked: MyTracked<'db>) -> u32 {
|
||||
db.push_log(format!("maybe_specified({:?})", tracked));
|
||||
tracked.input(db).field(db)
|
||||
}
|
||||
|
||||
/// Reads maybe-specified and multiplies it by 10.
|
||||
/// This is here to show whether we can detect when `maybe_specified` has changed
|
||||
/// and control down-stream work accordingly.
|
||||
#[salsa::tracked]
|
||||
fn read_maybe_specified<'db>(db: &'db dyn Db, tracked: MyTracked<'db>) -> u32 {
|
||||
db.push_log(format!("read_maybe_specified({:?})", tracked));
|
||||
maybe_specified(db, tracked) * 10
|
||||
}
|
||||
|
||||
/// Create a tracked value and *maybe* specify a value for
|
||||
/// `maybe_specified`
|
||||
#[salsa::tracked]
|
||||
fn create_tracked(db: &dyn Db, input: MyInput) -> MyTracked<'_> {
|
||||
db.push_log(format!("create_tracked({:?})", input));
|
||||
let tracked = MyTracked::new(db, input);
|
||||
if input.field(db) < 10 {
|
||||
maybe_specified::specify(db, tracked, 10);
|
||||
}
|
||||
tracked
|
||||
}
|
||||
|
||||
#[salsa::tracked]
|
||||
fn final_result(db: &dyn Db, input: MyInput) -> u32 {
|
||||
db.push_log(format!("final_result({:?})", input));
|
||||
let tracked = create_tracked(db, input);
|
||||
read_maybe_specified(db, tracked)
|
||||
}
|
||||
|
||||
#[salsa::db]
|
||||
#[derive(Default)]
|
||||
struct Database {
|
||||
storage: salsa::Storage<Self>,
|
||||
logger: Logger,
|
||||
}
|
||||
|
||||
#[salsa::db]
|
||||
impl salsa::Database for Database {
|
||||
fn salsa_event(&self, event: salsa::Event) {
|
||||
self.push_log(format!("{event:?}"));
|
||||
}
|
||||
}
|
||||
|
||||
#[salsa::db]
|
||||
impl Db for Database {}
|
||||
|
||||
impl HasLogger for Database {
|
||||
fn logger(&self) -> &Logger {
|
||||
&self.logger
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_0() {
|
||||
let mut db = Database::default();
|
||||
|
||||
let input = MyInput::new(&db, 0);
|
||||
assert_eq!(final_result(&db, input), 100);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: final_result(0) } }",
|
||||
"final_result(MyInput { [salsa id]: Id(0), field: 0 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 0 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: read_maybe_specified(0) } }",
|
||||
"read_maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 0 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
]"#]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_5() {
|
||||
let mut db = Database::default();
|
||||
|
||||
let input = MyInput::new(&db, 5);
|
||||
assert_eq!(final_result(&db, input), 100);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: final_result(0) } }",
|
||||
"final_result(MyInput { [salsa id]: Id(0), field: 5 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 5 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: read_maybe_specified(0) } }",
|
||||
"read_maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 5 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
]"#]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_10() {
|
||||
let mut db = Database::default();
|
||||
|
||||
let input = MyInput::new(&db, 10);
|
||||
assert_eq!(final_result(&db, input), 100);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: final_result(0) } }",
|
||||
"final_result(MyInput { [salsa id]: Id(0), field: 10 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 10 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: read_maybe_specified(0) } }",
|
||||
"read_maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 10 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: maybe_specified(0) } }",
|
||||
"maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 10 } })",
|
||||
]"#]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_20() {
|
||||
let mut db = Database::default();
|
||||
|
||||
let input = MyInput::new(&db, 20);
|
||||
assert_eq!(final_result(&db, input), 200);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: final_result(0) } }",
|
||||
"final_result(MyInput { [salsa id]: Id(0), field: 20 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 20 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: read_maybe_specified(0) } }",
|
||||
"read_maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 20 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: maybe_specified(0) } }",
|
||||
"maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 20 } })",
|
||||
]"#]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_0_then_5_then_20() {
|
||||
let mut db = Database::default();
|
||||
|
||||
// Set input to 0:
|
||||
//
|
||||
// * `create_tracked` specifies `10` for `maybe_specified`
|
||||
// * final resuilt of `100` is derived by executing `read_maybe_specified`
|
||||
let input = MyInput::new(&db, 0);
|
||||
assert_eq!(final_result(&db, input), 100);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: final_result(0) } }",
|
||||
"final_result(MyInput { [salsa id]: Id(0), field: 0 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 0 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: read_maybe_specified(0) } }",
|
||||
"read_maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 0 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
]"#]]);
|
||||
|
||||
// Set input to 5:
|
||||
//
|
||||
// * `create_tracked` does re-execute, but specifies same value for `maybe_specified` as before
|
||||
// * `read_maybe_specified` does not re-execute (its input has not changed)
|
||||
input.set_field(&mut db).to(5);
|
||||
assert_eq!(final_result(&db, input), 100);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 5 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: read_maybe_specified(0) } }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: final_result(0) } }",
|
||||
]"#]]);
|
||||
|
||||
// Set input to 20:
|
||||
//
|
||||
// * `create_tracked` re-executes but does not specify any value
|
||||
// * `read_maybe_specified` is invoked and it calls `maybe_specified`, which now executes
|
||||
// (its value has not been specified)
|
||||
input.set_field(&mut db).to(20);
|
||||
assert_eq!(final_result(&db, input), 200);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 20 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillDiscardStaleOutput { execute_key: create_tracked(0), output_key: maybe_specified(0) } }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: maybe_specified(0) } }",
|
||||
"maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 20 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: read_maybe_specified(0) } }",
|
||||
"read_maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 20 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: final_result(0) } }",
|
||||
"final_result(MyInput { [salsa id]: Id(0), field: 20 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
]"#]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_0_then_5_then_10_then_20() {
|
||||
let mut db = Database::default();
|
||||
|
||||
// Set input to 0:
|
||||
//
|
||||
// * `create_tracked` specifies `10` for `maybe_specified`
|
||||
// * final resuilt of `100` is derived by executing `read_maybe_specified`
|
||||
let input = MyInput::new(&db, 0);
|
||||
assert_eq!(final_result(&db, input), 100);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: final_result(0) } }",
|
||||
"final_result(MyInput { [salsa id]: Id(0), field: 0 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 0 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: read_maybe_specified(0) } }",
|
||||
"read_maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 0 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
]"#]]);
|
||||
|
||||
// Set input to 5:
|
||||
//
|
||||
// * `create_tracked` does re-execute, but specifies same value for `maybe_specified` as before
|
||||
// * `read_maybe_specified` does not re-execute (its input has not changed)
|
||||
input.set_field(&mut db).to(5);
|
||||
assert_eq!(final_result(&db, input), 100);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 5 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: read_maybe_specified(0) } }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: final_result(0) } }",
|
||||
]"#]]);
|
||||
|
||||
// Set input to 10:
|
||||
//
|
||||
// * `create_tracked` does re-execute and specifies no value for `maybe_specified`
|
||||
// * `maybe_specified_value` returns 10; this is the same value as was specified.
|
||||
// * `read_maybe_specified` therefore does NOT need to execute.
|
||||
input.set_field(&mut db).to(10);
|
||||
assert_eq!(final_result(&db, input), 100);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 10 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillDiscardStaleOutput { execute_key: create_tracked(0), output_key: maybe_specified(0) } }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: maybe_specified(0) } }",
|
||||
"maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 10 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: read_maybe_specified(0) } }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: final_result(0) } }",
|
||||
]"#]]);
|
||||
|
||||
// Set input to 20:
|
||||
//
|
||||
// * Everything re-executes to get new result (200).
|
||||
input.set_field(&mut db).to(20);
|
||||
assert_eq!(final_result(&db, input), 200);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 20 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: maybe_specified(0) } }",
|
||||
"maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 20 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: read_maybe_specified(0) } }",
|
||||
"read_maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 20 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: final_result(0) } }",
|
||||
"final_result(MyInput { [salsa id]: Id(0), field: 20 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
]"#]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_5_then_20() {
|
||||
let mut db = Database::default();
|
||||
|
||||
let input = MyInput::new(&db, 5);
|
||||
assert_eq!(final_result(&db, input), 100);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: final_result(0) } }",
|
||||
"final_result(MyInput { [salsa id]: Id(0), field: 5 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 5 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: read_maybe_specified(0) } }",
|
||||
"read_maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 5 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
]"#]]);
|
||||
|
||||
input.set_field(&mut db).to(20);
|
||||
assert_eq!(final_result(&db, input), 200);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
|
||||
"create_tracked(MyInput { [salsa id]: Id(0), field: 20 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillDiscardStaleOutput { execute_key: create_tracked(0), output_key: maybe_specified(0) } }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: maybe_specified(0) } }",
|
||||
"maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 20 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: read_maybe_specified(0) } }",
|
||||
"read_maybe_specified(MyTracked { [salsa id]: Id(0), input: MyInput { [salsa id]: Id(0), field: 20 } })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: final_result(0) } }",
|
||||
"final_result(MyInput { [salsa id]: Id(0), field: 20 })",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
|
||||
]"#]]);
|
||||
}
|
Loading…
Reference in a new issue