Merge pull request #635 from Veykril/veykril/push-vktlqysrmlmv
Some checks are pending
Book / Book (push) Waiting to run
Book / Deploy (push) Blocked by required conditions
Test / Test (push) Waiting to run
Test / Miri (push) Waiting to run
Test / Benchmarks (push) Waiting to run

Replace unnecessary `Arc` by `Box`
This commit is contained in:
Lukas Wirth 2024-12-23 11:49:09 +00:00 committed by GitHub
commit c8d47cc01a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 83 additions and 56 deletions

View file

@ -21,7 +21,6 @@ rustc-hash = "2"
salsa-macro-rules = { version = "0.1.0", path = "components/salsa-macro-rules" }
salsa-macros = { path = "components/salsa-macros" }
smallvec = "1"
lazy_static = "1"
rayon = "1.10.0"
[dev-dependencies]

View file

@ -65,7 +65,7 @@ pub struct IngredientImpl<A: Accumulator> {
}
impl<A: Accumulator> IngredientImpl<A> {
/// Find the accumulator ingrediate for `A` in the database, if any.
/// Find the accumulator ingredient for `A` in the database, if any.
pub fn from_db<Db>(db: &Db) -> Option<&Self>
where
Db: ?Sized + Database,

View file

@ -8,7 +8,6 @@ use crate::{
hash::FxIndexSet,
key::{DatabaseKeyIndex, DependencyIndex},
tracked_struct::{Disambiguator, Identity},
zalsa_local::EMPTY_DEPENDENCIES,
Cycle, Id, Revision,
};
@ -108,7 +107,7 @@ impl ActiveQuery {
pub(crate) fn into_revisions(self) -> QueryRevisions {
let input_outputs = if self.input_outputs.is_empty() {
EMPTY_DEPENDENCIES.clone()
Box::default()
} else {
self.input_outputs.into_iter().collect()
};

View file

@ -53,8 +53,9 @@ where
continue;
}
let ingredient = zalsa.lookup_ingredient(k.ingredient_index);
// Extend `output` with any values accumulated by `k`.
if let Some(accumulated_map) = k.accumulated(db) {
if let Some(accumulated_map) = ingredient.accumulated(db, k.key_index) {
accumulated_map.extend_with_accumulated(accumulator.index(), &mut output);
// Skip over the inputs because we know that the entire sub-graph has no accumulated values
@ -69,10 +70,7 @@ where
// output vector, we want to push in execution order, so reverse order to
// ensure the first child that was executed will be the first child popped
// from the stack.
let Some(origin) = zalsa
.lookup_ingredient(k.ingredient_index)
.origin(db, k.key_index)
else {
let Some(origin) = ingredient.origin(db, k.key_index) else {
continue;
};

View file

@ -61,30 +61,44 @@ impl<C: Configuration> IngredientImpl<C> {
/// with an equivalent memo that has no value. If the memo is untracked, BaseInput,
/// or has values assigned as output of another query, this has no effect.
pub(super) fn evict_value_from_memo_for<'db>(&'db self, zalsa: &'db Zalsa, id: Id) {
let Some(memo) = self.get_memo_from_table_for(zalsa, id) else {
return;
};
match memo.revisions.origin {
QueryOrigin::Assigned(_)
| QueryOrigin::DerivedUntracked(_)
| QueryOrigin::BaseInput => {
// Careful: Cannot evict memos whose values were
// assigned as output of another query
// or those with untracked inputs
// as their values cannot be reconstructed.
}
QueryOrigin::Derived(_) => {
let memo_evicted = Arc::new(Memo::new(
None::<C::Output<'_>>,
memo.verified_at.load(),
memo.revisions.clone(),
));
self.insert_memo_into_table_for(zalsa, id, memo_evicted);
}
}
zalsa.memo_table_for(id).map_memo::<Memo<C::Output<'_>>>(
self.memo_ingredient_index,
|memo| {
match memo.revisions.origin {
QueryOrigin::Assigned(_)
| QueryOrigin::DerivedUntracked(_)
| QueryOrigin::BaseInput => {
// Careful: Cannot evict memos whose values were
// assigned as output of another query
// or those with untracked inputs
// as their values cannot be reconstructed.
memo
}
QueryOrigin::Derived(_) => {
// QueryRevisions: !Clone to discourage cloning, we need it here though
let QueryRevisions {
changed_at,
durability,
origin,
tracked_struct_ids,
accumulated,
} = &memo.revisions;
// Re-assemble the memo but with the value set to `None`
Arc::new(Memo::new(
None,
memo.verified_at.load(),
QueryRevisions {
changed_at: *changed_at,
durability: *durability,
origin: origin.clone(),
tracked_struct_ids: tracked_struct_ids.clone(),
accumulated: accumulated.clone(),
},
))
}
}
},
);
}
}

View file

@ -1,7 +1,4 @@
use crate::{
accumulator::accumulated_map::AccumulatedMap, cycle::CycleRecoveryStrategy,
zalsa::IngredientIndex, Database, Id,
};
use crate::{cycle::CycleRecoveryStrategy, zalsa::IngredientIndex, Database, Id};
/// An integer that uniquely identifies a particular query instance within the
/// database. Used to track dependencies between queries. Fully ordered and
@ -99,12 +96,6 @@ impl DatabaseKeyIndex {
pub(crate) fn cycle_recovery_strategy(self, db: &dyn Database) -> CycleRecoveryStrategy {
self.ingredient_index.cycle_recovery_strategy(db)
}
pub(crate) fn accumulated(self, db: &dyn Database) -> Option<&AccumulatedMap> {
db.zalsa()
.lookup_ingredient(self.ingredient_index)
.accumulated(db, self.key_index)
}
}
impl std::fmt::Debug for DatabaseKeyIndex {

View file

@ -163,11 +163,40 @@ impl MemoTable {
unsafe { Some(Self::from_dummy(arc_swap.load_full())) }
}
pub(crate) fn into_memos(
mut self,
) -> impl Iterator<Item = (MemoIngredientIndex, Arc<dyn Memo>)> {
let memos = std::mem::take(self.memos.get_mut());
memos
/// Calls `f` on the memo at `memo_ingredient_index` and replaces the memo with the result of `f`.
/// If the memo is not present, `f` is not called.
pub(crate) fn map_memo<M: Memo>(
&self,
memo_ingredient_index: MemoIngredientIndex,
f: impl FnOnce(Arc<M>) -> Arc<M>,
) {
// If the memo slot is already occupied, it must already have the
// right type info etc, and we only need the read-lock.
let memos = self.memos.read();
let Some(MemoEntry {
data:
Some(MemoEntryData {
type_id,
to_dyn_fn: _,
arc_swap,
}),
}) = memos.get(memo_ingredient_index.as_usize())
else {
return;
};
assert_eq!(
*type_id,
TypeId::of::<M>(),
"inconsistent type-id for `{memo_ingredient_index:?}`"
);
// SAFETY: type_id check asserted above
let memo = f(unsafe { Self::from_dummy(arc_swap.load_full()) });
unsafe { Self::from_dummy::<M>(arc_swap.swap(Self::to_dummy(memo))) };
}
pub(crate) fn into_memos(self) -> impl Iterator<Item = (MemoIngredientIndex, Arc<dyn Memo>)> {
self.memos
.into_inner()
.into_iter()
.zip(0..)
.filter_map(|(mut memo, index)| memo.data.take().map(|d| (d, index)))

View file

@ -21,7 +21,6 @@ use crate::EventKind;
use crate::Id;
use crate::Revision;
use std::cell::RefCell;
use std::sync::Arc;
/// State that is specific to a single execution thread.
///
@ -325,7 +324,8 @@ impl std::panic::RefUnwindSafe for ZalsaLocal {}
/// Summarizes "all the inputs that a query used"
/// and "all the outputs its wrote to"
#[derive(Debug, Clone)]
#[derive(Debug)]
// #[derive(Clone)] cloning this is expensive, so we don't derive
pub(crate) struct QueryRevisions {
/// The most revision in which some input changed.
pub(crate) changed_at: Revision,
@ -435,10 +435,6 @@ pub enum EdgeKind {
Output,
}
lazy_static::lazy_static! {
pub(crate) static ref EMPTY_DEPENDENCIES: Arc<[(EdgeKind, DependencyIndex)]> = Arc::new([]);
}
/// The edges between a memoized value and other queries in the dependency graph.
/// These edges include both dependency edges
/// e.g., when creating the memoized value for Q0 executed another function Q1)
@ -458,7 +454,8 @@ pub struct QueryEdges {
/// Important:
///
/// * The inputs must be in **execution order** for the red-green algorithm to work.
pub input_outputs: Arc<[(EdgeKind, DependencyIndex)]>,
// pub input_outputs: ThinBox<[(EdgeKind, DependencyIndex)]>, once that is a thing
pub input_outputs: Box<[(EdgeKind, DependencyIndex)]>,
}
impl QueryEdges {
@ -483,7 +480,7 @@ impl QueryEdges {
}
/// Creates a new `QueryEdges`; the values given for each field must meet struct invariants.
pub(crate) fn new(input_outputs: Arc<[(EdgeKind, DependencyIndex)]>) -> Self {
pub(crate) fn new(input_outputs: Box<[(EdgeKind, DependencyIndex)]>) -> Self {
Self { input_outputs }
}
}