mirror of
https://github.com/salsa-rs/salsa.git
synced 2025-02-02 01:43:39 +00:00
Fix clippy issues
This commit is contained in:
parent
dc696726cf
commit
73102b1e8e
36 changed files with 69 additions and 97 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -3,3 +3,4 @@
|
|||
Cargo.lock
|
||||
TAGS
|
||||
nikom
|
||||
.idea
|
||||
|
|
|
@ -3,6 +3,7 @@ use std::sync::{Arc, Mutex};
|
|||
use salsa::DebugWithDb;
|
||||
|
||||
// ANCHOR: db_struct
|
||||
#[derive(Default)]
|
||||
#[salsa::db(crate::Jar)]
|
||||
pub(crate) struct Database {
|
||||
storage: salsa::Storage<Self>,
|
||||
|
@ -34,17 +35,6 @@ impl Database {
|
|||
}
|
||||
}
|
||||
|
||||
// ANCHOR: default_impl
|
||||
impl Default for Database {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
storage: Default::default(),
|
||||
logs: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
// ANCHOR_END: default_impl
|
||||
|
||||
// ANCHOR: db_impl
|
||||
impl salsa::Database for Database {
|
||||
fn salsa_runtime(&self) -> &salsa::Runtime {
|
||||
|
@ -54,15 +44,11 @@ impl salsa::Database for Database {
|
|||
fn salsa_event(&self, event: salsa::Event) {
|
||||
// Log interesting events, if logging is enabled
|
||||
if let Some(logs) = &self.logs {
|
||||
match event.kind {
|
||||
salsa::EventKind::WillExecute { .. } => {
|
||||
logs.lock()
|
||||
.unwrap()
|
||||
.push(format!("Event: {:?}", event.debug(self)));
|
||||
}
|
||||
_ => {
|
||||
// don't log boring events
|
||||
}
|
||||
// don't log boring events
|
||||
if let salsa::EventKind::WillExecute { .. } = event.kind {
|
||||
logs.lock()
|
||||
.unwrap()
|
||||
.push(format!("Event: {:?}", event.debug(self)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#![allow(clippy::needless_borrow)]
|
||||
|
||||
use derive_new::new;
|
||||
use ordered_float::OrderedFloat;
|
||||
use salsa::debug::DebugWithDb;
|
||||
|
||||
// ANCHOR: input
|
||||
#[salsa::input]
|
||||
|
|
|
@ -25,7 +25,7 @@ pub fn parse_statements(db: &dyn crate::Db, source: SourceProgram) -> Program {
|
|||
parser.skip_whitespace();
|
||||
|
||||
// If there are no more tokens, break
|
||||
if let None = parser.peek() {
|
||||
if parser.peek().is_none() {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -166,9 +166,9 @@ impl Parser<'_> {
|
|||
}
|
||||
|
||||
fn low_op(&mut self) -> Option<Op> {
|
||||
if let Some(_) = self.ch('+') {
|
||||
if self.ch('+').is_some() {
|
||||
Some(Op::Add)
|
||||
} else if let Some(_) = self.ch('-') {
|
||||
} else if self.ch('-').is_some() {
|
||||
Some(Op::Subtract)
|
||||
} else {
|
||||
None
|
||||
|
@ -183,9 +183,9 @@ impl Parser<'_> {
|
|||
}
|
||||
|
||||
fn high_op(&mut self) -> Option<Op> {
|
||||
if let Some(_) = self.ch('*') {
|
||||
if self.ch('*').is_some() {
|
||||
Some(Op::Multiply)
|
||||
} else if let Some(_) = self.ch('/') {
|
||||
} else if self.ch('/').is_some() {
|
||||
Some(Op::Divide)
|
||||
} else {
|
||||
None
|
||||
|
@ -217,7 +217,7 @@ impl Parser<'_> {
|
|||
fn parse_expression2(&mut self) -> Option<Expression> {
|
||||
let start_position = self.skip_whitespace();
|
||||
if let Some(w) = self.word() {
|
||||
if let Some(_) = self.ch('(') {
|
||||
if self.ch('(').is_some() {
|
||||
let f = FunctionId::new(self.db, w);
|
||||
let args = self.parse_expressions()?;
|
||||
self.ch(')')?;
|
||||
|
@ -237,7 +237,7 @@ impl Parser<'_> {
|
|||
self.span_from(start_position),
|
||||
ExpressionData::Number(OrderedFloat::from(n)),
|
||||
))
|
||||
} else if let Some(_) = self.ch('(') {
|
||||
} else if self.ch('(').is_some() {
|
||||
let expr = self.parse_expression()?;
|
||||
self.ch(')')?;
|
||||
Some(expr)
|
||||
|
@ -297,11 +297,9 @@ impl Parser<'_> {
|
|||
// In this loop, if we consume any characters, we always
|
||||
// return `Some`.
|
||||
let mut s = String::new();
|
||||
let position = self.position;
|
||||
let _position = self.position;
|
||||
while let Some(ch) = self.peek() {
|
||||
if ch.is_alphabetic() || ch == '_' {
|
||||
s.push(ch);
|
||||
} else if !s.is_empty() && ch.is_numeric() {
|
||||
if ch.is_alphabetic() || ch == '_' || (!s.is_empty() && ch.is_numeric()) {
|
||||
s.push(ch);
|
||||
} else {
|
||||
break;
|
||||
|
@ -321,7 +319,7 @@ impl Parser<'_> {
|
|||
///
|
||||
/// Even on failure, only skips whitespace.
|
||||
fn number(&mut self) -> Option<f64> {
|
||||
let start_position = self.skip_whitespace();
|
||||
let _start_position = self.skip_whitespace();
|
||||
|
||||
self.probe(|this| {
|
||||
// 👆 We need the call to `probe` here because we could consume
|
||||
|
@ -329,9 +327,7 @@ impl Parser<'_> {
|
|||
// still return `None`.
|
||||
let mut s = String::new();
|
||||
while let Some(ch) = this.peek() {
|
||||
if ch.is_numeric() {
|
||||
s.push(ch);
|
||||
} else if ch == '.' {
|
||||
if ch.is_numeric() || ch == '.' {
|
||||
s.push(ch);
|
||||
} else {
|
||||
break;
|
||||
|
|
|
@ -58,7 +58,7 @@ impl CheckExpression<'_> {
|
|||
}
|
||||
}
|
||||
crate::ir::ExpressionData::Call(f, args) => {
|
||||
if let None = self.find_function(*f) {
|
||||
if self.find_function(*f).is_none() {
|
||||
self.report_error(
|
||||
expression.span,
|
||||
format!("the function `{}` is not declared", f.text(self.db)),
|
||||
|
@ -91,8 +91,6 @@ fn check_string(
|
|||
expected_diagnostics: expect_test::Expect,
|
||||
edits: &[(&str, expect_test::Expect, expect_test::Expect)],
|
||||
) {
|
||||
use salsa::debug::DebugWithDb;
|
||||
|
||||
use crate::{db::Database, ir::SourceProgram, parser::parse_statements};
|
||||
|
||||
// Create the database
|
||||
|
|
|
@ -127,7 +127,7 @@ pub(crate) fn jar_impl(
|
|||
|
||||
pub(crate) fn jar_struct(input: &ItemStruct) -> ItemStruct {
|
||||
let mut output_struct = input.clone();
|
||||
output_struct.fields = generate_fields(&input).into();
|
||||
output_struct.fields = generate_fields(input).into();
|
||||
if output_struct.semi_token.is_none() {
|
||||
output_struct.semi_token = Some(Token![;](input.struct_token.span));
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ impl<A: AllowedOptions> Options<A> {
|
|||
return jar_ty.clone();
|
||||
}
|
||||
|
||||
return parse_quote! {crate::Jar};
|
||||
parse_quote! {crate::Jar}
|
||||
}
|
||||
|
||||
pub(crate) fn should_backdate(&self) -> bool {
|
||||
|
|
|
@ -365,6 +365,7 @@ impl SalsaStruct {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub(crate) const FIELD_OPTION_ATTRIBUTES: &[(&str, fn(&syn::Attribute, &mut SalsaField))] = &[
|
||||
("id", |_, ef| ef.has_id_attr = true),
|
||||
("return_ref", |_, ef| ef.has_ref_attr = true),
|
||||
|
|
|
@ -10,7 +10,7 @@ pub(crate) fn tracked(
|
|||
syn::Item::Fn(item) => crate::tracked_fn::tracked(args, item),
|
||||
_ => syn::Error::new(
|
||||
item.span(),
|
||||
&format!("tracked can be applied to structs and functions only"),
|
||||
&"tracked can be applied to structs and functions only".to_string(),
|
||||
)
|
||||
.into_compile_error()
|
||||
.into(),
|
||||
|
|
|
@ -16,7 +16,7 @@ pub trait Accumulator {
|
|||
type Data: Clone;
|
||||
type Jar;
|
||||
|
||||
fn accumulator_ingredient<'db, Db>(db: &'db Db) -> &'db AccumulatorIngredient<Self::Data>
|
||||
fn accumulator_ingredient<Db>(db: &Db) -> &AccumulatorIngredient<Self::Data>
|
||||
where
|
||||
Db: ?Sized + HasJar<Self::Jar>;
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ where
|
|||
// consumers must be aware of. Becoming *more* durable
|
||||
// is not. See the test `constant_to_non_constant`.
|
||||
if revisions.durability >= old_memo.revisions.durability
|
||||
&& C::should_backdate_value(old_value, &value)
|
||||
&& C::should_backdate_value(old_value, value)
|
||||
{
|
||||
log::debug!(
|
||||
"value is equal, back-dating to {:?}",
|
||||
|
|
|
@ -24,6 +24,7 @@ where
|
|||
// two list are in sorted order, we can merge them in linear time.
|
||||
while let (Some(&old_output), Some(&new_output)) = (old_outputs.peek(), new_outputs.peek())
|
||||
{
|
||||
#[allow(clippy::comparison_chain)]
|
||||
if old_output < new_output {
|
||||
// Output that was generated but is no longer.
|
||||
Self::report_stale_output(db, key, old_output);
|
||||
|
|
|
@ -36,7 +36,7 @@ where
|
|||
db.salsa_event(Event {
|
||||
runtime_id: runtime.id(),
|
||||
kind: EventKind::WillExecute {
|
||||
database_key: database_key_index.into(),
|
||||
database_key: database_key_index,
|
||||
},
|
||||
});
|
||||
|
||||
|
@ -87,7 +87,7 @@ where
|
|||
// old value.
|
||||
if let Some(old_memo) = &opt_old_memo {
|
||||
self.backdate_if_appropriate(old_memo, &mut revisions, &value);
|
||||
self.diff_outputs(db, database_key_index, &old_memo, &revisions);
|
||||
self.diff_outputs(db, database_key_index, old_memo, &revisions);
|
||||
}
|
||||
|
||||
let value = self
|
||||
|
|
|
@ -75,14 +75,12 @@ where
|
|||
// This time we can do a *deep* verify. Because this can recurse, don't hold the arcswap guard.
|
||||
let opt_old_memo = self.memo_map.get(key).map(Guard::into_inner);
|
||||
if let Some(old_memo) = &opt_old_memo {
|
||||
if old_memo.value.is_some() {
|
||||
if self.deep_verify_memo(db, old_memo, &active_query) {
|
||||
let value = unsafe {
|
||||
// Unsafety invariant: memo is present in memo_map.
|
||||
self.extend_memo_lifetime(old_memo).unwrap()
|
||||
};
|
||||
return Some(old_memo.revisions.stamped_value(value));
|
||||
}
|
||||
if old_memo.value.is_some() && self.deep_verify_memo(db, old_memo, &active_query) {
|
||||
let value = unsafe {
|
||||
// Unsafety invariant: memo is present in memo_map.
|
||||
self.extend_memo_lifetime(old_memo).unwrap()
|
||||
};
|
||||
return Some(old_memo.revisions.stamped_value(value));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,6 @@ impl<K: AsId, V> MemoMap<K, V> {
|
|||
// assigned as output of another query
|
||||
// or those with untracked inputs
|
||||
// as their values cannot be reconstructed.
|
||||
return;
|
||||
}
|
||||
|
||||
QueryOrigin::Derived(_) => {
|
||||
|
@ -123,7 +122,7 @@ impl<V> Memo<V> {
|
|||
db.salsa_event(Event {
|
||||
runtime_id: runtime.id(),
|
||||
kind: EventKind::DidValidateMemoizedValue {
|
||||
database_key: database_key_index.into(),
|
||||
database_key: database_key_index,
|
||||
},
|
||||
});
|
||||
|
||||
|
|
|
@ -88,6 +88,5 @@ impl AsId for () {
|
|||
|
||||
fn from_id(id: Id) -> Self {
|
||||
assert_eq!(0, id.as_u32());
|
||||
()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -257,6 +257,7 @@ pub struct IdentityInterner<Id: AsId> {
|
|||
}
|
||||
|
||||
impl<Id: AsId> IdentityInterner<Id> {
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Self {
|
||||
IdentityInterner { data: PhantomData }
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@ pub struct Routes<DB: HasJars> {
|
|||
/// Vector indexed by ingredient index. Yields the `DynRoute`,
|
||||
/// a function which can be applied to the `DB::Jars` to yield
|
||||
/// the `dyn Ingredient.
|
||||
#[allow(clippy::type_complexity)]
|
||||
routes: Vec<(Box<DynRoute<DB>>, Box<DynMutRoute<DB>>)>,
|
||||
|
||||
/// Indices of routes which need a 'reset' call.
|
||||
|
|
|
@ -55,6 +55,8 @@ pub(crate) struct StampedValue<V> {
|
|||
}
|
||||
|
||||
impl<V> StampedValue<V> {
|
||||
// FIXME: Use or remove this.
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn merge_revision_info<U>(&mut self, other: &StampedValue<U>) {
|
||||
self.durability = self.durability.min(other.durability);
|
||||
self.changed_at = self.changed_at.max(other.changed_at);
|
||||
|
@ -284,7 +286,7 @@ impl Runtime {
|
|||
runtime_id: self.id(),
|
||||
kind: EventKind::WillBlockOn {
|
||||
other_runtime_id: other_id,
|
||||
database_key: database_key.into(),
|
||||
database_key,
|
||||
},
|
||||
});
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ impl ActiveQuery {
|
|||
self.dependencies
|
||||
.iter()
|
||||
.copied()
|
||||
.chain(self.outputs.iter().map(|&o| o.into()))
|
||||
.chain(self.outputs.iter().copied())
|
||||
.collect()
|
||||
};
|
||||
|
||||
|
|
|
@ -200,7 +200,7 @@ impl LocalState {
|
|||
pub(super) fn is_output(&self, entity: DatabaseKeyIndex) -> bool {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.is_output(entity.into())
|
||||
top_query.is_output(entity)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
|
@ -259,6 +259,8 @@ impl LocalState {
|
|||
|
||||
/// Update the top query on the stack to act as though it read a value
|
||||
/// of durability `durability` which changed in `revision`.
|
||||
// FIXME: Use or remove this.
|
||||
#[allow(dead_code)]
|
||||
pub(super) fn report_synthetic_read(&self, durability: Durability, revision: Revision) {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
|
|
|
@ -183,9 +183,9 @@ pub trait DbWithJar<J>: HasJar<J> + Database {
|
|||
}
|
||||
|
||||
pub trait JarFromJars<J>: HasJars {
|
||||
fn jar_from_jars<'db>(jars: &Self::Jars) -> &J;
|
||||
fn jar_from_jars(jars: &Self::Jars) -> &J;
|
||||
|
||||
fn jar_from_jars_mut<'db>(jars: &mut Self::Jars) -> &mut J;
|
||||
fn jar_from_jars_mut(jars: &mut Self::Jars) -> &mut J;
|
||||
}
|
||||
|
||||
pub trait HasJar<J> {
|
||||
|
|
|
@ -152,11 +152,10 @@ where
|
|||
fn mark_validated_output(
|
||||
&self,
|
||||
_db: &DB,
|
||||
executor: DatabaseKeyIndex,
|
||||
output_key: Option<crate::Id>,
|
||||
_executor: DatabaseKeyIndex,
|
||||
_output_key: Option<crate::Id>,
|
||||
) {
|
||||
// FIXME
|
||||
drop((executor, output_key));
|
||||
}
|
||||
|
||||
fn remove_stale_output(
|
||||
|
|
|
@ -206,7 +206,7 @@ pub(crate) fn database(args: TokenStream, input: TokenStream) -> TokenStream {
|
|||
|
||||
if std::env::var("SALSA_DUMP").is_ok() {
|
||||
println!("~~~ database_storage");
|
||||
println!("{}", output.to_string());
|
||||
println!("{}", output);
|
||||
println!("~~~ database_storage");
|
||||
}
|
||||
|
||||
|
|
|
@ -658,7 +658,7 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
|
|||
|
||||
if std::env::var("SALSA_DUMP").is_ok() {
|
||||
println!("~~~ query_group");
|
||||
println!("{}", output.to_string());
|
||||
println!("{}", output);
|
||||
println!("~~~ query_group");
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ pub trait HasLogger {
|
|||
/// clearing the logged events. This takes `&mut self` because
|
||||
/// it is meant to be run from outside any tracked functions.
|
||||
fn assert_logs(&mut self, expected: expect_test::Expect) {
|
||||
let logs = std::mem::replace(&mut *self.logger().logs.lock().unwrap(), vec![]);
|
||||
let logs = std::mem::take(&mut *self.logger().logs.lock().unwrap());
|
||||
expected.assert_eq(&format!("{:#?}", logs));
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ pub trait HasLogger {
|
|||
/// clearing the logged events. This takes `&mut self` because
|
||||
/// it is meant to be run from outside any tracked functions.
|
||||
fn assert_logs_len(&mut self, expected: usize) {
|
||||
let logs = std::mem::replace(&mut *self.logger().logs.lock().unwrap(), vec![]);
|
||||
let logs = std::mem::take(&mut *self.logger().logs.lock().unwrap());
|
||||
assert_eq!(logs.len(), expected);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ fn basic() {
|
|||
|
||||
// Creates 3 tracked structs
|
||||
let input = MyInput::new(&mut db, 3);
|
||||
assert_eq!(final_result(&db, input), 2 * 2 + 1 * 2 + 0 * 2);
|
||||
assert_eq!(final_result(&db, input), 2 * 2 + 2);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"final_result(MyInput(Id { value: 1 }))",
|
||||
|
@ -118,7 +118,7 @@ fn basic() {
|
|||
// * the `copy_field` result
|
||||
|
||||
input.set_field(&mut db).to(2);
|
||||
assert_eq!(final_result(&db, input), 1 * 2 + 0 * 2);
|
||||
assert_eq!(final_result(&db, input), 2);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"intermediate_result(MyInput(Id { value: 1 }))",
|
||||
|
|
|
@ -89,7 +89,7 @@ fn basic() {
|
|||
|
||||
// Creates 3 tracked structs
|
||||
let input = MyInput::new(&mut db, 3);
|
||||
assert_eq!(final_result(&db, input), 2 * 2 + 1 * 2 + 0 * 2);
|
||||
assert_eq!(final_result(&db, input), 2 * 2 + 2);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"final_result(MyInput(Id { value: 1 }))",
|
||||
|
@ -104,7 +104,7 @@ fn basic() {
|
|||
// * the struct's field
|
||||
// * the `contribution_from_struct` result
|
||||
input.set_field(&mut db).to(2);
|
||||
assert_eq!(final_result(&db, input), 1 * 2 + 0 * 2);
|
||||
assert_eq!(final_result(&db, input), 2);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"intermediate_result(MyInput(Id { value: 1 }))",
|
||||
|
|
|
@ -45,7 +45,7 @@ impl<K, V> TableEntry<K, V> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'d, Q> DebugQueryTable for QueryTable<'_, Q>
|
||||
impl<Q> DebugQueryTable for QueryTable<'_, Q>
|
||||
where
|
||||
Q: Query,
|
||||
Q::Storage: QueryStorageOps<Q>,
|
||||
|
|
|
@ -107,7 +107,7 @@ where
|
|||
DatabaseKeyIndex {
|
||||
group_index: self.group_index,
|
||||
query_index: Q::QUERY_INDEX,
|
||||
key_index: key_index,
|
||||
key_index,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -49,11 +49,7 @@ where
|
|||
K: Borrow<S>,
|
||||
{
|
||||
// Common case: get an existing key
|
||||
if let Some(v) = self.index_map.get(key) {
|
||||
Some(*v)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
self.index_map.get(key).map(|v| *v)
|
||||
}
|
||||
|
||||
pub(super) fn key_for_key_index(&self, key_index: DerivedKeyIndex) -> K {
|
||||
|
|
|
@ -39,7 +39,7 @@ impl<V> MemoMap<V> {
|
|||
// but I can't see a clean way to encapsulate it otherwise. I suppose
|
||||
// it could take a closure, but it seems silly.
|
||||
match self.map.entry(key) {
|
||||
Entry::Vacant(_) => return,
|
||||
Entry::Vacant(_) => (),
|
||||
Entry::Occupied(entry) => {
|
||||
let memo = entry.get().load();
|
||||
|
||||
|
|
|
@ -157,10 +157,9 @@ where
|
|||
slots
|
||||
.values()
|
||||
.map(|slot| {
|
||||
let value = match &*slot.stamped_value.read() {
|
||||
Some(stamped_value) => Some(stamped_value.value.clone()),
|
||||
None => None,
|
||||
};
|
||||
let value = (*slot.stamped_value.read())
|
||||
.as_ref()
|
||||
.map(|stamped_value| stamped_value.value.clone());
|
||||
TableEntry::new(slot.key.clone(), value)
|
||||
})
|
||||
.collect()
|
||||
|
@ -239,7 +238,7 @@ where
|
|||
// (Otherwise, someone else might write a *newer* revision
|
||||
// into the same cell while we block on the lock.)
|
||||
let stamped_value = StampedValue {
|
||||
value: value,
|
||||
value,
|
||||
durability,
|
||||
changed_at: next_revision,
|
||||
};
|
||||
|
|
|
@ -49,6 +49,7 @@ struct Error {
|
|||
cycle: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
#[salsa::database(GroupStruct)]
|
||||
struct DatabaseImpl {
|
||||
storage: salsa::Storage<Self>,
|
||||
|
@ -64,16 +65,6 @@ impl ParallelDatabase for DatabaseImpl {
|
|||
}
|
||||
}
|
||||
|
||||
impl Default for DatabaseImpl {
|
||||
fn default() -> Self {
|
||||
let res = DatabaseImpl {
|
||||
storage: salsa::Storage::default(),
|
||||
};
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
/// The queries A, B, and C in `Database` can be configured
|
||||
/// to invoke one another in arbitrary ways using this
|
||||
/// enum.
|
||||
|
|
|
@ -39,6 +39,7 @@ fn c(db: &dyn QueryGroup, x: u32) -> u32 {
|
|||
struct Database {
|
||||
storage: salsa::Storage<Self>,
|
||||
external_state: HashMap<u32, u32>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
on_event: Option<Box<dyn Fn(&Database, salsa::Event)>>,
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ impl<T> WithValue<T> for Cell<T> {
|
|||
fn with_value<R>(&self, value: T, closure: impl FnOnce() -> R) -> R {
|
||||
let old_value = self.replace(value);
|
||||
|
||||
let result = catch_unwind(AssertUnwindSafe(|| closure()));
|
||||
let result = catch_unwind(AssertUnwindSafe(closure));
|
||||
|
||||
self.set(old_value);
|
||||
|
||||
|
|
Loading…
Reference in a new issue