413: panic when reading fields of tracked structs from older revisions r=nikomatsakis a=XFFXFF

fix #407 

Co-authored-by: XFFXFF <1247714429@qq.com>
Co-authored-by: zhou fan <1247714429@qq.com>
This commit is contained in:
bors[bot] 2022-09-26 20:41:48 +00:00 committed by GitHub
commit ccd604d028
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
19 changed files with 265 additions and 160 deletions

View file

@ -158,7 +158,7 @@ impl TrackedStruct {
let __ingredients = <#jar_ty as salsa::storage::HasIngredientsFor< #ident >>::ingredient(__jar);
let __id = __ingredients.#struct_index.new_struct(__runtime, (#(#id_field_names,)*));
#(
__ingredients.#value_field_indices.specify_field(__db, __id, #value_field_names);
__ingredients.#value_field_indices.specify_and_record(__db, __id, #value_field_names);
)*
__id
}

View file

@ -61,8 +61,10 @@ impl<Data: Clone> AccumulatorIngredient<Data> {
produced_at: current_revision,
});
// This is the first push in a new revision. Reset.
if accumulated_values.produced_at != current_revision {
// When we call `push' in a query, we will add the accumulator to the output of the query.
// If we find here that this accumulator is not the output of the query,
// we can say that the accumulated values we stored for this query is out of date.
if !runtime.is_output_of_active_query(self.dependency_index()) {
accumulated_values.values.truncate(0);
accumulated_values.produced_at = current_revision;
}

View file

@ -1,6 +1,5 @@
use crate::{
hash::FxHashSet,
key::DependencyIndex,
runtime::local_state::QueryOrigin,
storage::{HasJar, HasJarsDyn},
DatabaseKeyIndex,
@ -65,23 +64,12 @@ impl Stack {
/// Extend the stack of queries with the dependencies from `origin`.
fn extend(&mut self, origin: Option<QueryOrigin>) {
match origin {
None
| Some(QueryOrigin::Assigned(_))
| Some(QueryOrigin::BaseInput)
| Some(QueryOrigin::Field) => {}
None | Some(QueryOrigin::Assigned(_)) | Some(QueryOrigin::BaseInput) => {}
Some(QueryOrigin::Derived(edges)) | Some(QueryOrigin::DerivedUntracked(edges)) => {
for DependencyIndex {
ingredient_index,
key_index,
} in edges.inputs().iter().copied()
{
if let Some(key_index) = key_index {
let i = DatabaseKeyIndex {
ingredient_index,
key_index,
};
for dependency_index in edges.inputs() {
if let Ok(i) = DatabaseKeyIndex::try_from(dependency_index) {
if self.s.insert(i) {
self.v.push(i);
self.v.push(i)
}
}
}

View file

@ -1,6 +1,6 @@
use crate::{
key::DependencyIndex, runtime::local_state::QueryRevisions, storage::HasJarsDyn, Database,
DatabaseKeyIndex, Event, EventKind,
hash::FxHashSet, key::DependencyIndex, runtime::local_state::QueryRevisions,
storage::HasJarsDyn, Database, DatabaseKeyIndex, Event, EventKind,
};
use super::{memo::Memo, Configuration, DynDb, FunctionIngredient};
@ -18,25 +18,17 @@ where
old_memo: &Memo<C::Value>,
revisions: &QueryRevisions,
) {
let mut old_outputs = old_memo.revisions.origin.outputs().peekable();
let mut new_outputs = revisions.origin.outputs().peekable();
// Iterate over the outputs of the `old_memo` and put them into a hashset
let mut old_outputs = FxHashSet::default();
old_memo.revisions.origin.outputs().for_each(|i| {
old_outputs.insert(i);
});
// two list are in sorted order, we can merge them in linear time.
while let (Some(&old_output), Some(&new_output)) = (old_outputs.peek(), new_outputs.peek())
{
#[allow(clippy::comparison_chain)]
if old_output < new_output {
// Output that was generated but is no longer.
Self::report_stale_output(db, key, old_output);
old_outputs.next();
} else if new_output < old_output {
// This is a new output that was not generated before.
// No action needed.
new_outputs.next();
} else {
// Output generated both times.
old_outputs.next();
new_outputs.next();
// Iterate over the outputs of the current query
// and remove elements from `old_outputs` when we find them
for new_output in revisions.origin.outputs() {
if old_outputs.contains(&new_output) {
old_outputs.remove(&new_output);
}
}

View file

@ -5,7 +5,7 @@ use crate::{
debug::DebugWithDb,
key::DatabaseKeyIndex,
runtime::{
local_state::{ActiveQueryGuard, QueryOrigin},
local_state::{ActiveQueryGuard, EdgeKind, QueryOrigin},
StampedValue,
},
storage::HasJarsDyn,
@ -170,10 +170,8 @@ where
// during this revision or is otherwise stale.
return false;
}
QueryOrigin::BaseInput | QueryOrigin::Field => {
// BaseInput: This value was `set` by the mutator thread -- ie, it's a base input and it cannot be out of date.
// Field: This value is the value of a field of some tracked struct S. It is always updated whenever S is created.
// So if a query has access to S, then they will have an up-to-date value.
QueryOrigin::BaseInput => {
// This value was `set` by the mutator thread -- ie, it's a base input and it cannot be out of date.
return true;
}
QueryOrigin::DerivedUntracked(_) => {
@ -188,9 +186,32 @@ where
// valid, then some later input I1 might never have executed at all, so verifying
// it is still up to date is meaningless.
let last_verified_at = old_memo.verified_at.load();
for &input in edges.inputs().iter() {
if db.maybe_changed_after(input, last_verified_at) {
return false;
for &(edge_kind, dependency_index) in edges.input_outputs.iter() {
match edge_kind {
EdgeKind::Input => {
if db.maybe_changed_after(dependency_index, last_verified_at) {
return false;
}
}
EdgeKind::Output => {
// Subtle: Mark outputs as validated now, even though we may
// later find an input that requires us to re-execute the function.
// Even if it re-execute, the function will wind up writing the same value,
// since all prior inputs were green. It's important to do this during
// this loop, because it's possible that one of our input queries will
// re-execute and may read one of our earlier outputs
// (e.g., in a scenario where we do something like
// `e = Entity::new(..); query(e);` and `query` reads a field of `e`).
//
// NB. Accumulators are also outputs, but the above logic doesn't
// quite apply to them. Since multiple values are pushed, the first value
// may be unchanged, but later values could be different.
// In that case, however, the data accumulated
// by this function cannot be read until this function is marked green,
// so even if we mark them as valid here, the function will re-execute
// and overwrite the contents.
db.mark_validated_output(database_key_index, dependency_index);
}
}
}
}

View file

@ -55,8 +55,7 @@ impl<K: AsId, V> MemoMap<K, V> {
match memo.revisions.origin {
QueryOrigin::Assigned(_)
| QueryOrigin::DerivedUntracked(_)
| QueryOrigin::BaseInput
| QueryOrigin::Field => {
| QueryOrigin::BaseInput => {
// Careful: Cannot evict memos whose values were
// assigned as output of another query
// or those with untracked inputs

View file

@ -46,7 +46,8 @@ where
//
// Now, if We invoke Q3 first, We get one result for Q2, but if We invoke Q4 first, We get a different value. That's no good.
let database_key_index = key.database_key_index(db);
if !runtime.is_output_of_active_query(database_key_index) {
let dependency_index = database_key_index.into();
if !runtime.is_output_of_active_query(dependency_index) {
panic!("can only use `specfiy` on entities created during current query");
}
@ -91,17 +92,6 @@ where
self.insert_memo(db, key, memo);
}
/// Specify the value for `key` but do not record it is an output.
/// This is used for the value fields declared on a tracked struct.
/// They are different from other calls to specify because we KNOW they will be given a value by construction,
/// so recording them as an explicit output (and checking them for validity, etc) is pure overhead.
pub fn specify_field<'db>(&self, db: &'db DynDb<'db, C>, key: C::Key, value: C::Value)
where
C::Key: TrackedStructInDb<DynDb<'db, C>>,
{
self.specify(db, key, value, |_| QueryOrigin::Field);
}
/// Specify the value for `key` *and* record that we did so.
/// Used for explicit calls to `specify`, but not needed for pre-declared tracked struct fields.
pub fn specify_and_record<'db>(&self, db: &'db DynDb<'db, C>, key: C::Key, value: C::Value)

View file

@ -12,7 +12,10 @@ use crate::{
Cancelled, Cycle, Database, Event, EventKind, Revision,
};
use self::{dependency_graph::DependencyGraph, local_state::ActiveQueryGuard};
use self::{
dependency_graph::DependencyGraph,
local_state::{ActiveQueryGuard, EdgeKind},
};
use super::{tracked_struct::Disambiguator, IngredientIndex};
@ -97,7 +100,7 @@ impl Runtime {
self.local_state.active_query()
}
pub(crate) fn empty_dependencies(&self) -> Arc<[DependencyIndex]> {
pub(crate) fn empty_dependencies(&self) -> Arc<[(EdgeKind, DependencyIndex)]> {
self.shared_state.empty_dependencies.clone()
}
@ -153,7 +156,7 @@ impl Runtime {
}
/// Check whether `entity` is contained the list of outputs written by the current query.
pub(super) fn is_output_of_active_query(&self, entity: DatabaseKeyIndex) -> bool {
pub(super) fn is_output_of_active_query(&self, entity: DependencyIndex) -> bool {
self.local_state.is_output(entity)
}

View file

@ -1,5 +1,3 @@
use std::collections::BTreeSet;
use crate::{
durability::Durability,
hash::{FxIndexMap, FxIndexSet},
@ -8,7 +6,7 @@ use crate::{
Cycle, Revision, Runtime,
};
use super::local_state::{QueryEdges, QueryOrigin, QueryRevisions};
use super::local_state::{EdgeKind, QueryEdges, QueryOrigin, QueryRevisions};
#[derive(Debug)]
pub(super) struct ActiveQuery {
@ -22,8 +20,13 @@ pub(super) struct ActiveQuery {
/// untracked read, this will be set to the most recent revision.
pub(super) changed_at: Revision,
/// Set of subqueries that were accessed thus far.
pub(super) dependencies: FxIndexSet<DependencyIndex>,
/// Inputs: Set of subqueries that were accessed thus far.
/// Outputs: Tracks values written by this query. Could be...
///
/// * tracked structs created
/// * invocations of `specify`
/// * accumulators pushed to
pub(super) input_outputs: FxIndexSet<(EdgeKind, DependencyIndex)>,
/// True if there was an untracked read.
pub(super) untracked_read: bool,
@ -35,16 +38,6 @@ pub(super) struct ActiveQuery {
/// hash is added to this map. If it is not present, then the disambiguator is 0.
/// Otherwise it is 1 more than the current value (which is incremented).
pub(super) disambiguator_map: FxIndexMap<u64, Disambiguator>,
/// Tracks values written by this query. Could be...
///
/// * tracked structs created
/// * invocations of `specify`
/// * accumulators pushed to
///
/// We use a btree-set because we want to be able to
/// extract the keys in sorted order.
pub(super) outputs: BTreeSet<DependencyIndex>,
}
impl ActiveQuery {
@ -53,11 +46,10 @@ impl ActiveQuery {
database_key_index,
durability: Durability::MAX,
changed_at: Revision::start(),
dependencies: FxIndexSet::default(),
input_outputs: FxIndexSet::default(),
untracked_read: false,
cycle: None,
disambiguator_map: Default::default(),
outputs: Default::default(),
}
}
@ -67,7 +59,7 @@ impl ActiveQuery {
durability: Durability,
revision: Revision,
) {
self.dependencies.insert(input);
self.input_outputs.insert((EdgeKind::Input, input));
self.durability = self.durability.min(durability);
self.changed_at = self.changed_at.max(revision);
}
@ -86,29 +78,22 @@ impl ActiveQuery {
/// Adds a key to our list of outputs.
pub(super) fn add_output(&mut self, key: DependencyIndex) {
self.outputs.insert(key);
self.input_outputs.insert((EdgeKind::Output, key));
}
/// True if the given key was output by this query.
pub(super) fn is_output(&self, key: DatabaseKeyIndex) -> bool {
let key: DependencyIndex = key.into();
self.outputs.contains(&key)
pub(super) fn is_output(&self, key: DependencyIndex) -> bool {
self.input_outputs.contains(&(EdgeKind::Output, key))
}
pub(crate) fn revisions(&self, runtime: &Runtime) -> QueryRevisions {
let separator = self.dependencies.len();
let input_outputs = if self.dependencies.is_empty() && self.outputs.is_empty() {
let input_outputs = if self.input_outputs.is_empty() {
runtime.empty_dependencies()
} else {
self.dependencies
.iter()
.copied()
.chain(self.outputs.iter().copied())
.collect()
self.input_outputs.iter().copied().collect()
};
let edges = QueryEdges::new(separator, input_outputs);
let edges = QueryEdges::new(input_outputs);
let origin = if self.untracked_read {
QueryOrigin::DerivedUntracked(edges)
@ -129,7 +114,8 @@ impl ActiveQuery {
self.changed_at = self.changed_at.max(other.changed_at);
self.durability = self.durability.min(other.durability);
self.untracked_read |= other.untracked_read;
self.dependencies.extend(other.dependencies.iter().copied());
self.input_outputs
.extend(other.input_outputs.iter().copied());
}
/// Removes the participants in `cycle` from my dependencies.
@ -137,7 +123,7 @@ impl ActiveQuery {
pub(super) fn remove_cycle_participants(&mut self, cycle: &Cycle) {
for p in cycle.participant_keys() {
let p: DependencyIndex = p.into();
self.dependencies.remove(&p);
self.input_outputs.remove(&(EdgeKind::Input, p));
}
}
@ -146,7 +132,7 @@ impl ActiveQuery {
pub(crate) fn take_inputs_from(&mut self, cycle_query: &ActiveQuery) {
self.changed_at = cycle_query.changed_at;
self.durability = cycle_query.durability;
self.dependencies = cycle_query.dependencies.clone();
self.input_outputs = cycle_query.input_outputs.clone();
}
pub(super) fn disambiguate(&mut self, hash: u64) -> Disambiguator {

View file

@ -60,10 +60,6 @@ pub enum QueryOrigin {
/// The `DatabaseKeyIndex` is the identity of the assigning query.
Assigned(DatabaseKeyIndex),
/// This is the value field of a tracked struct.
/// These are different from `Assigned` because we know they will always be assigned a value and hence are never "out of date".
Field,
/// This value was set as a base input to the computation.
BaseInput,
@ -80,19 +76,22 @@ pub enum QueryOrigin {
}
impl QueryOrigin {
/// Indices for queries *written* by this query (or `&[]` if its value was assigned).
/// Indices for queries *written* by this query (or `vec![]` if its value was assigned).
pub(crate) fn outputs(&self) -> impl Iterator<Item = DependencyIndex> + '_ {
let slice = match self {
QueryOrigin::Derived(edges) | QueryOrigin::DerivedUntracked(edges) => {
&edges.input_outputs[edges.separator as usize..]
}
QueryOrigin::Assigned(_) | QueryOrigin::BaseInput | QueryOrigin::Field => &[],
let opt_edges = match self {
QueryOrigin::Derived(edges) | QueryOrigin::DerivedUntracked(edges) => Some(edges),
QueryOrigin::Assigned(_) | QueryOrigin::BaseInput => None,
};
slice.iter().copied()
opt_edges.into_iter().flat_map(|edges| edges.outputs())
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
pub enum EdgeKind {
Input,
Output,
}
/// The edges between a memoized value and other queries in the dependency graph.
/// These edges include both dependency edges
/// e.g., when creating the memoized value for Q0 executed another function Q1)
@ -102,8 +101,6 @@ impl QueryOrigin {
pub struct QueryEdges {
/// The list of outgoing edges from this node.
/// This list combines *both* inputs and outputs.
/// The inputs are defined from the indices `0..S` where
/// `S` is the value of the `separator` field.
///
/// Note that we always track input dependencies even when there are untracked reads.
/// Untracked reads mean that we can't verify values, so we don't use the list of inputs for that,
@ -114,28 +111,33 @@ pub struct QueryEdges {
/// Important:
///
/// * The inputs must be in **execution order** for the red-green algorithm to work.
/// * The outputs must be in **sorted order** so that we can easily "diff" them between revisions.
input_outputs: Arc<[DependencyIndex]>,
/// The index that separates inputs from outputs in the `tracked` field.
separator: u32,
pub input_outputs: Arc<[(EdgeKind, DependencyIndex)]>,
}
impl QueryEdges {
/// Returns the (tracked) inputs that were executed in computing this memoized value.
///
/// These will always be in execution order.
pub(crate) fn inputs(&self) -> &[DependencyIndex] {
&self.input_outputs[0..self.separator as usize]
pub(crate) fn inputs(&self) -> impl Iterator<Item = DependencyIndex> + '_ {
self.input_outputs
.iter()
.filter(|(edge_kind, _)| *edge_kind == EdgeKind::Input)
.map(|(_, dependency_index)| *dependency_index)
}
/// Returns the (tracked) outputs that were executed in computing this memoized value.
///
/// These will always be in execution order.
pub(crate) fn outputs(&self) -> impl Iterator<Item = DependencyIndex> + '_ {
self.input_outputs
.iter()
.filter(|(edge_kind, _)| *edge_kind == EdgeKind::Output)
.map(|(_, dependency_index)| *dependency_index)
}
/// Creates a new `QueryEdges`; the values given for each field must meet struct invariants.
pub(crate) fn new(separator: usize, input_outputs: Arc<[DependencyIndex]>) -> Self {
debug_assert!(separator <= input_outputs.len());
Self {
separator: u32::try_from(separator).unwrap(),
input_outputs,
}
pub(crate) fn new(input_outputs: Arc<[(EdgeKind, DependencyIndex)]>) -> Self {
Self { input_outputs }
}
}
@ -197,7 +199,7 @@ impl LocalState {
})
}
pub(super) fn is_output(&self, entity: DatabaseKeyIndex) -> bool {
pub(super) fn is_output(&self, entity: DependencyIndex) -> bool {
self.with_query_stack(|stack| {
if let Some(top_query) = stack.last_mut() {
top_query.is_output(entity)

View file

@ -5,7 +5,7 @@ use parking_lot::Mutex;
use crate::{durability::Durability, key::DependencyIndex, revision::AtomicRevision};
use super::dependency_graph::DependencyGraph;
use super::{dependency_graph::DependencyGraph, local_state::EdgeKind};
/// State that will be common to all threads (when we support multiple threads)
#[derive(Debug)]
@ -14,7 +14,7 @@ pub(super) struct SharedState {
pub(super) next_id: AtomicUsize,
/// Vector we can clone
pub(super) empty_dependencies: Arc<[DependencyIndex]>,
pub(super) empty_dependencies: Arc<[(EdgeKind, DependencyIndex)]>,
/// Set to true when the current revision has been canceled.
/// This is done when we an input is being changed. The flag

View file

@ -1,14 +1,14 @@
error[E0277]: the trait bound `MyInput: TrackedStructInDb<dyn Db>` is not satisfied
--> tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.rs:21:28
|
20 | #[salsa::tracked(jar = Jar, specify)]
| ------------------------------------- required by a bound introduced by this call
21 | fn tracked_fn(db: &dyn Db, input: MyInput) -> MyTracked {
| ^^^^^ the trait `TrackedStructInDb<dyn Db>` is not implemented for `MyInput`
|
= help: the trait `TrackedStructInDb<DB>` is implemented for `MyTracked`
--> tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.rs:21:28
|
20 | #[salsa::tracked(jar = Jar, specify)]
| ------------------------------------- required by a bound introduced by this call
21 | fn tracked_fn(db: &dyn Db, input: MyInput) -> MyTracked {
| ^^^^^ the trait `TrackedStructInDb<dyn Db>` is not implemented for `MyInput`
|
= help: the trait `TrackedStructInDb<DB>` is implemented for `MyTracked`
note: required by a bound in `function::specify::<impl FunctionIngredient<C>>::specify_and_record`
--> $WORKSPACE/components/salsa-2022/src/function/specify.rs
|
| C::Key: TrackedStructInDb<DynDb<'db, C>>,
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `function::specify::<impl FunctionIngredient<C>>::specify_and_record`
--> $WORKSPACE/components/salsa-2022/src/function/specify.rs
|
| C::Key: TrackedStructInDb<DynDb<'db, C>>,
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `function::specify::<impl FunctionIngredient<C>>::specify_and_record`

View file

@ -1,14 +1,14 @@
error[E0277]: the trait bound `MyInterned: TrackedStructInDb<dyn Db>` is not satisfied
--> tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.rs:22:28
|
21 | #[salsa::tracked(jar = Jar, specify)]
| ------------------------------------- required by a bound introduced by this call
22 | fn tracked_fn(db: &dyn Db, input: MyInterned) -> MyTracked {
| ^^^^^ the trait `TrackedStructInDb<dyn Db>` is not implemented for `MyInterned`
|
= help: the trait `TrackedStructInDb<DB>` is implemented for `MyTracked`
--> tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.rs:22:28
|
21 | #[salsa::tracked(jar = Jar, specify)]
| ------------------------------------- required by a bound introduced by this call
22 | fn tracked_fn(db: &dyn Db, input: MyInterned) -> MyTracked {
| ^^^^^ the trait `TrackedStructInDb<dyn Db>` is not implemented for `MyInterned`
|
= help: the trait `TrackedStructInDb<DB>` is implemented for `MyTracked`
note: required by a bound in `function::specify::<impl FunctionIngredient<C>>::specify_and_record`
--> $WORKSPACE/components/salsa-2022/src/function/specify.rs
|
| C::Key: TrackedStructInDb<DynDb<'db, C>>,
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `function::specify::<impl FunctionIngredient<C>>::specify_and_record`
--> $WORKSPACE/components/salsa-2022/src/function/specify.rs
|
| C::Key: TrackedStructInDb<DynDb<'db, C>>,
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `function::specify::<impl FunctionIngredient<C>>::specify_and_record`

View file

@ -91,7 +91,7 @@ fn basic() {
let mut db = Database::default();
// Creates 3 tracked structs
let input = MyInput::new(&mut db, 3);
let input = MyInput::new(&db, 3);
assert_eq!(final_result(&db, input), 2 * 2 + 2);
db.assert_logs(expect![[r#"
[
@ -125,6 +125,7 @@ fn basic() {
"salsa_event(DidDiscard { key: MyTracked(5) })",
"salsa_event(DidDiscard { key: field(5) })",
"salsa_event(DidDiscard { key: copy_field(5) })",
"salsa_event(WillDiscardStaleOutput { execute_key: create_tracked_structs(0), output_key: field(2) })",
"final_result(MyInput(Id { value: 1 }))",
]"#]]);
}

View file

@ -108,6 +108,7 @@ fn basic() {
"salsa_event(DidDiscard { key: MyTracked(2) })",
"salsa_event(DidDiscard { key: field(2) })",
"salsa_event(DidDiscard { key: contribution_from_struct(2) })",
"salsa_event(WillDiscardStaleOutput { execute_key: create_tracked_structs(0), output_key: field(2) })",
"final_result(MyInput(Id { value: 1 }))",
]"#]]);
}

View file

@ -0,0 +1,46 @@
use test_log::test;
#[salsa::jar(db = Db)]
struct Jar(MyInput, MyTracked, tracked_fn);
trait Db: salsa::DbWithJar<Jar> {}
#[salsa::input(jar = Jar)]
struct MyInput {
field: u32,
}
#[salsa::tracked(jar = Jar)]
struct MyTracked {
field: u32,
}
#[salsa::tracked(jar = Jar)]
fn tracked_fn(db: &dyn Db, input: MyInput) -> MyTracked {
MyTracked::new(db, input.field(db) / 2)
}
#[salsa::db(Jar)]
#[derive(Default)]
struct Database {
storage: salsa::Storage<Self>,
}
impl salsa::Database for Database {}
impl Db for Database {}
#[test]
#[should_panic(expected = "`execute` method for field")]
fn execute() {
let mut db = Database::default();
let input = MyInput::new(&db, 22);
let tracked = tracked_fn(&db, input);
// modify the input and change the revision
input.set_field(&mut db).to(24);
// panic when reading fields of tracked structs from older revisions
tracked.field(&db);
}

View file

@ -39,8 +39,8 @@ impl HasLogger for Database {
#[test]
fn basic() {
let mut db = Database::default();
let input1 = MyInput::new(&mut db, 3, 4);
let db = Database::default();
let input1 = MyInput::new(&db, 3, 4);
let input2 = MyInput::get(&db);
assert_eq!(input1, input2);
@ -52,20 +52,20 @@ fn basic() {
#[test]
#[should_panic]
fn twice() {
let mut db = Database::default();
let input1 = MyInput::new(&mut db, 3, 4);
let db = Database::default();
let input1 = MyInput::new(&db, 3, 4);
let input2 = MyInput::get(&db);
assert_eq!(input1, input2);
// should panic here
_ = MyInput::new(&mut db, 3, 5);
_ = MyInput::new(&db, 3, 5);
}
#[test]
fn debug() {
let mut db = Database::default();
let input = MyInput::new(&mut db, 3, 4);
let db = Database::default();
let input = MyInput::new(&db, 3, 4);
let actual = format!("{:?}", input.debug(&db));
let expected = expect![[r#"MyInput { [salsa id]: 0, id_field: 4 }"#]];
expected.assert_eq(&actual);

View file

@ -209,6 +209,7 @@ fn test_run_0_then_5_then_20() {
[
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: input(0) } }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
"create_tracked(MyInput(Id { value: 1 }))",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
@ -228,6 +229,7 @@ fn test_run_0_then_5_then_20() {
[
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: input(0) } }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
"create_tracked(MyInput(Id { value: 1 }))",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillDiscardStaleOutput { execute_key: create_tracked(0), output_key: maybe_specified(0) } }",
@ -280,6 +282,7 @@ fn test_run_0_then_5_then_10_then_20() {
[
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: input(0) } }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
"create_tracked(MyInput(Id { value: 1 }))",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
@ -299,6 +302,7 @@ fn test_run_0_then_5_then_10_then_20() {
[
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: input(0) } }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
"create_tracked(MyInput(Id { value: 1 }))",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillDiscardStaleOutput { execute_key: create_tracked(0), output_key: maybe_specified(0) } }",
@ -320,6 +324,7 @@ fn test_run_0_then_5_then_10_then_20() {
[
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: input(0) } }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
"create_tracked(MyInput(Id { value: 1 }))",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
@ -364,6 +369,7 @@ fn test_run_5_then_20() {
[
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillCheckCancellation }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: input(0) } }",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: create_tracked(0) } }",
"create_tracked(MyInput(Id { value: 1 }))",
"Event { runtime_id: RuntimeId { counter: 0 }, kind: WillDiscardStaleOutput { execute_key: create_tracked(0), output_key: maybe_specified(0) } }",

View file

@ -0,0 +1,68 @@
use expect_test::expect;
use salsa::{Database as SalsaDatabase, DebugWithDb};
use salsa_2022_tests::{HasLogger, Logger};
#[salsa::jar(db = Db)]
struct Jar(MyInput, MyTracked, tracked_fn, tracked_fn_extra);
trait Db: salsa::DbWithJar<Jar> + HasLogger {}
#[salsa::input(jar = Jar)]
struct MyInput {
field: u32,
}
#[salsa::tracked(jar = Jar)]
struct MyTracked {
field: u32,
}
#[salsa::tracked(jar = Jar)]
fn tracked_fn(db: &dyn Db, input: MyInput) -> u32 {
db.push_log(format!("tracked_fn({:?})", input.debug(db)));
let t = MyTracked::new(db, input.field(db) * 2);
tracked_fn_extra::specify(db, t, 2222);
tracked_fn_extra(db, t)
}
#[salsa::tracked(jar = Jar, specify)]
fn tracked_fn_extra(db: &dyn Db, input: MyTracked) -> u32 {
db.push_log(format!("tracked_fn_extra({:?})", input.debug(db)));
0
}
#[salsa::db(Jar)]
#[derive(Default)]
struct Database {
storage: salsa::Storage<Self>,
logger: Logger,
}
impl salsa::Database for Database {}
impl Db for Database {}
impl HasLogger for Database {
fn logger(&self) -> &Logger {
&self.logger
}
}
#[test]
fn execute() {
let mut db = Database::default();
let input = MyInput::new(&db, 22);
assert_eq!(tracked_fn(&db, input), 2222);
db.assert_logs(expect![[r#"
[
"tracked_fn(MyInput { [salsa id]: 0 })",
]"#]]);
// A "synthetic write" causes the system to act *as though* some
// input of durability `durability` has changed.
db.synthetic_write(salsa::Durability::LOW);
// Re-run the query on the original input. Nothing re-executes!
assert_eq!(tracked_fn(&db, input), 2222);
db.assert_logs(expect!["[]"]);
}