mirror of
https://github.com/salsa-rs/salsa.git
synced 2025-02-02 09:46:06 +00:00
move to dash-map
Because dash-map isn't indexable, we need to store a copy of the key and have two separate maps. I expect to iterate on the best data structures here.
This commit is contained in:
parent
c0d9070a64
commit
685fccc9c5
3 changed files with 79 additions and 42 deletions
|
@ -8,6 +8,8 @@ repository = "https://github.com/salsa-rs/salsa"
|
||||||
description = "A generic framework for on-demand, incrementalized computation (experimental)"
|
description = "A generic framework for on-demand, incrementalized computation (experimental)"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
crossbeam-utils = { version = "0.8", default-features = false }
|
||||||
|
dashmap = "4.0.2"
|
||||||
indexmap = "1.0.1"
|
indexmap = "1.0.1"
|
||||||
lock_api = "0.4"
|
lock_api = "0.4"
|
||||||
log = "0.4.5"
|
log = "0.4.5"
|
||||||
|
|
118
src/derived.rs
118
src/derived.rs
|
@ -1,6 +1,6 @@
|
||||||
use crate::debug::TableEntry;
|
use crate::debug::TableEntry;
|
||||||
use crate::durability::Durability;
|
use crate::durability::Durability;
|
||||||
use crate::hash::FxIndexMap;
|
use crate::hash::FxDashMap;
|
||||||
use crate::lru::Lru;
|
use crate::lru::Lru;
|
||||||
use crate::plumbing::DerivedQueryStorageOps;
|
use crate::plumbing::DerivedQueryStorageOps;
|
||||||
use crate::plumbing::LruQueryStorageOps;
|
use crate::plumbing::LruQueryStorageOps;
|
||||||
|
@ -10,9 +10,8 @@ use crate::plumbing::QueryStorageOps;
|
||||||
use crate::runtime::StampedValue;
|
use crate::runtime::StampedValue;
|
||||||
use crate::Runtime;
|
use crate::Runtime;
|
||||||
use crate::{Database, DatabaseKeyIndex, QueryDb, Revision};
|
use crate::{Database, DatabaseKeyIndex, QueryDb, Revision};
|
||||||
use parking_lot::RwLock;
|
use crossbeam_utils::atomic::AtomicCell;
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::convert::TryFrom;
|
|
||||||
use std::hash::Hash;
|
use std::hash::Hash;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -39,10 +38,23 @@ where
|
||||||
{
|
{
|
||||||
group_index: u16,
|
group_index: u16,
|
||||||
lru_list: Lru<Slot<Q, MP>>,
|
lru_list: Lru<Slot<Q, MP>>,
|
||||||
slot_map: RwLock<FxIndexMap<Q::Key, Arc<Slot<Q, MP>>>>,
|
indices: AtomicCell<u32>,
|
||||||
|
index_map: FxDashMap<Q::Key, DerivedKeyIndex>,
|
||||||
|
slot_map: FxDashMap<DerivedKeyIndex, KeySlot<Q, MP>>,
|
||||||
policy: PhantomData<MP>,
|
policy: PhantomData<MP>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct KeySlot<Q, MP>
|
||||||
|
where
|
||||||
|
Q: QueryFunction,
|
||||||
|
MP: MemoizationPolicy<Q>,
|
||||||
|
{
|
||||||
|
key: Q::Key,
|
||||||
|
slot: Arc<Slot<Q, MP>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
type DerivedKeyIndex = u32;
|
||||||
|
|
||||||
impl<Q, MP> std::panic::RefUnwindSafe for DerivedStorage<Q, MP>
|
impl<Q, MP> std::panic::RefUnwindSafe for DerivedStorage<Q, MP>
|
||||||
where
|
where
|
||||||
Q: QueryFunction,
|
Q: QueryFunction,
|
||||||
|
@ -95,22 +107,52 @@ where
|
||||||
Q: QueryFunction,
|
Q: QueryFunction,
|
||||||
MP: MemoizationPolicy<Q>,
|
MP: MemoizationPolicy<Q>,
|
||||||
{
|
{
|
||||||
fn slot(&self, key: &Q::Key) -> Arc<Slot<Q, MP>> {
|
fn slot_for_key(&self, key: &Q::Key) -> Arc<Slot<Q, MP>> {
|
||||||
if let Some(v) = self.slot_map.read().get(key) {
|
// Common case: get an existing key
|
||||||
return v.clone();
|
if let Some(v) = self.index_map.get(key) {
|
||||||
|
let index = *v;
|
||||||
|
|
||||||
|
// release the read-write lock early, for no particular reason
|
||||||
|
// apart from it bothers me
|
||||||
|
drop(v);
|
||||||
|
|
||||||
|
return self.slot_for_key_index(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut write = self.slot_map.write();
|
// Less common case: (potentially) create a new slot
|
||||||
let entry = write.entry(key.clone());
|
match self.index_map.entry(key.clone()) {
|
||||||
let key_index = u32::try_from(entry.index()).unwrap();
|
dashmap::mapref::entry::Entry::Occupied(entry) => self.slot_for_key_index(*entry.get()),
|
||||||
let database_key_index = DatabaseKeyIndex {
|
dashmap::mapref::entry::Entry::Vacant(entry) => {
|
||||||
group_index: self.group_index,
|
let key_index = self.indices.fetch_add(1);
|
||||||
query_index: Q::QUERY_INDEX,
|
let database_key_index = DatabaseKeyIndex {
|
||||||
key_index,
|
group_index: self.group_index,
|
||||||
};
|
query_index: Q::QUERY_INDEX,
|
||||||
entry
|
key_index,
|
||||||
.or_insert_with(|| Arc::new(Slot::new(key.clone(), database_key_index)))
|
};
|
||||||
.clone()
|
let slot = Arc::new(Slot::new(key.clone(), database_key_index));
|
||||||
|
// Subtle: store the new slot *before* the new index, so that
|
||||||
|
// other threads only see the new index once the slot is also available.
|
||||||
|
self.slot_map.insert(
|
||||||
|
key_index,
|
||||||
|
KeySlot {
|
||||||
|
key: key.clone(),
|
||||||
|
slot: slot.clone(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
entry.insert(key_index);
|
||||||
|
slot
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn slot_for_key_index(&self, index: DerivedKeyIndex) -> Arc<Slot<Q, MP>> {
|
||||||
|
return self.slot_map.get(&index).unwrap().slot.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn slot_for_db_index(&self, index: DatabaseKeyIndex) -> Arc<Slot<Q, MP>> {
|
||||||
|
assert_eq!(index.group_index, self.group_index);
|
||||||
|
assert_eq!(index.query_index, Q::QUERY_INDEX);
|
||||||
|
self.slot_for_key_index(index.key_index)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,9 +166,11 @@ where
|
||||||
fn new(group_index: u16) -> Self {
|
fn new(group_index: u16) -> Self {
|
||||||
DerivedStorage {
|
DerivedStorage {
|
||||||
group_index,
|
group_index,
|
||||||
slot_map: RwLock::new(FxIndexMap::default()),
|
index_map: Default::default(),
|
||||||
|
slot_map: Default::default(),
|
||||||
lru_list: Default::default(),
|
lru_list: Default::default(),
|
||||||
policy: PhantomData,
|
policy: PhantomData,
|
||||||
|
indices: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,9 +182,8 @@ where
|
||||||
) -> std::fmt::Result {
|
) -> std::fmt::Result {
|
||||||
assert_eq!(index.group_index, self.group_index);
|
assert_eq!(index.group_index, self.group_index);
|
||||||
assert_eq!(index.query_index, Q::QUERY_INDEX);
|
assert_eq!(index.query_index, Q::QUERY_INDEX);
|
||||||
let slot_map = self.slot_map.read();
|
let key_slot = self.slot_map.get(&index.key_index).unwrap();
|
||||||
let key = slot_map.get_index(index.key_index as usize).unwrap().0;
|
write!(fmt, "{}({:?})", Q::QUERY_NAME, key_slot.key)
|
||||||
write!(fmt, "{}({:?})", Q::QUERY_NAME, key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn maybe_changed_after(
|
fn maybe_changed_after(
|
||||||
|
@ -149,23 +192,15 @@ where
|
||||||
input: DatabaseKeyIndex,
|
input: DatabaseKeyIndex,
|
||||||
revision: Revision,
|
revision: Revision,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
assert_eq!(input.group_index, self.group_index);
|
|
||||||
assert_eq!(input.query_index, Q::QUERY_INDEX);
|
|
||||||
debug_assert!(revision < db.salsa_runtime().current_revision());
|
debug_assert!(revision < db.salsa_runtime().current_revision());
|
||||||
let slot = self
|
let slot = self.slot_for_db_index(input);
|
||||||
.slot_map
|
|
||||||
.read()
|
|
||||||
.get_index(input.key_index as usize)
|
|
||||||
.unwrap()
|
|
||||||
.1
|
|
||||||
.clone();
|
|
||||||
slot.maybe_changed_after(db, revision)
|
slot.maybe_changed_after(db, revision)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fetch(&self, db: &<Q as QueryDb<'_>>::DynDb, key: &Q::Key) -> Q::Value {
|
fn fetch(&self, db: &<Q as QueryDb<'_>>::DynDb, key: &Q::Key) -> Q::Value {
|
||||||
db.unwind_if_cancelled();
|
db.unwind_if_cancelled();
|
||||||
|
|
||||||
let slot = self.slot(key);
|
let slot = self.slot_for_key(key);
|
||||||
let StampedValue {
|
let StampedValue {
|
||||||
value,
|
value,
|
||||||
durability,
|
durability,
|
||||||
|
@ -187,17 +222,16 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
fn durability(&self, db: &<Q as QueryDb<'_>>::DynDb, key: &Q::Key) -> Durability {
|
fn durability(&self, db: &<Q as QueryDb<'_>>::DynDb, key: &Q::Key) -> Durability {
|
||||||
self.slot(key).durability(db)
|
self.slot_for_key(key).durability(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn entries<C>(&self, _db: &<Q as QueryDb<'_>>::DynDb) -> C
|
fn entries<C>(&self, _db: &<Q as QueryDb<'_>>::DynDb) -> C
|
||||||
where
|
where
|
||||||
C: std::iter::FromIterator<TableEntry<Q::Key, Q::Value>>,
|
C: std::iter::FromIterator<TableEntry<Q::Key, Q::Value>>,
|
||||||
{
|
{
|
||||||
let slot_map = self.slot_map.read();
|
self.slot_map
|
||||||
slot_map
|
.iter()
|
||||||
.values()
|
.filter_map(|r| r.value().slot.as_table_entry())
|
||||||
.filter_map(|slot| slot.as_table_entry())
|
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -209,7 +243,9 @@ where
|
||||||
{
|
{
|
||||||
fn purge(&self) {
|
fn purge(&self) {
|
||||||
self.lru_list.purge();
|
self.lru_list.purge();
|
||||||
*self.slot_map.write() = Default::default();
|
self.indices.store(0);
|
||||||
|
self.index_map.clear();
|
||||||
|
self.slot_map.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,14 +270,12 @@ where
|
||||||
Q::Key: Borrow<S>,
|
Q::Key: Borrow<S>,
|
||||||
{
|
{
|
||||||
runtime.with_incremented_revision(|new_revision| {
|
runtime.with_incremented_revision(|new_revision| {
|
||||||
let map_read = self.slot_map.read();
|
if let Some(key_index) = self.index_map.get(key) {
|
||||||
|
let slot = self.slot_for_key_index(*key_index);
|
||||||
if let Some(slot) = map_read.get(key) {
|
|
||||||
if let Some(durability) = slot.invalidate(new_revision) {
|
if let Some(durability) = slot.invalidate(new_revision) {
|
||||||
return Some(durability);
|
return Some(durability);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
pub(crate) type FxHasher = std::hash::BuildHasherDefault<rustc_hash::FxHasher>;
|
pub(crate) type FxHasher = std::hash::BuildHasherDefault<rustc_hash::FxHasher>;
|
||||||
pub(crate) type FxIndexSet<K> = indexmap::IndexSet<K, FxHasher>;
|
pub(crate) type FxIndexSet<K> = indexmap::IndexSet<K, FxHasher>;
|
||||||
pub(crate) type FxIndexMap<K, V> = indexmap::IndexMap<K, V, FxHasher>;
|
pub(crate) type FxIndexMap<K, V> = indexmap::IndexMap<K, V, FxHasher>;
|
||||||
|
pub(crate) type FxDashMap<K, V> = dashmap::DashMap<K, V, FxHasher>;
|
||||||
|
|
Loading…
Reference in a new issue