2022-11-26 23:57:50 +00:00
|
|
|
// Copyright 2021 The Jujutsu Authors
|
2021-03-10 23:39:16 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2023-01-11 23:42:07 +00:00
|
|
|
use std::collections::HashSet;
|
2022-12-15 22:13:00 +00:00
|
|
|
use std::fmt::Debug;
|
2021-03-14 17:37:28 +00:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
use thiserror::Error;
|
|
|
|
|
2023-01-11 23:42:07 +00:00
|
|
|
use crate::dag_walk;
|
2022-12-15 22:13:00 +00:00
|
|
|
use crate::op_store::{OpStore, OperationId};
|
2021-03-10 23:39:16 +00:00
|
|
|
use crate::operation::Operation;
|
|
|
|
|
2022-03-19 06:11:10 +00:00
|
|
|
pub enum OpHeads {
|
|
|
|
/// There's a single latest operation. This is the normal case.
|
|
|
|
Single(Operation),
|
|
|
|
/// There are multiple latest operations, which means there has been
|
|
|
|
/// concurrent operations. These need to be resolved.
|
|
|
|
Unresolved {
|
|
|
|
locked_op_heads: LockedOpHeads,
|
|
|
|
op_heads: Vec<Operation>,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2021-03-11 05:36:08 +00:00
|
|
|
#[derive(Debug, Error, PartialEq, Eq)]
|
|
|
|
pub enum OpHeadResolutionError {
|
|
|
|
#[error("Operation log has no heads")]
|
|
|
|
NoHeads,
|
|
|
|
}
|
|
|
|
|
2022-12-15 22:13:00 +00:00
|
|
|
pub trait LockedOpHeadsResolver {
|
|
|
|
fn finish(&self, new_op: &Operation);
|
2022-03-20 05:55:32 +00:00
|
|
|
}
|
|
|
|
|
2022-12-15 22:13:00 +00:00
|
|
|
// Represents a mutually exclusive lock on the OpHeadsStore in local systems.
|
|
|
|
pub struct LockedOpHeads {
|
|
|
|
resolver: Box<dyn LockedOpHeadsResolver>,
|
2022-03-20 05:55:32 +00:00
|
|
|
}
|
|
|
|
|
2022-12-15 22:13:00 +00:00
|
|
|
impl LockedOpHeads {
|
|
|
|
pub fn new(resolver: Box<dyn LockedOpHeadsResolver>) -> Self {
|
|
|
|
LockedOpHeads { resolver }
|
2021-03-10 23:39:16 +00:00
|
|
|
}
|
|
|
|
|
2022-12-15 22:13:00 +00:00
|
|
|
pub fn finish(self, new_op: &Operation) {
|
|
|
|
self.resolver.finish(new_op);
|
2021-03-10 23:39:16 +00:00
|
|
|
}
|
2022-12-15 22:13:00 +00:00
|
|
|
}
|
2021-03-11 05:36:08 +00:00
|
|
|
|
2022-12-15 22:13:00 +00:00
|
|
|
/// Manages the very set of current heads of the operation log.
|
|
|
|
///
|
|
|
|
/// Implementations should use Arc<> internally, as the lock() and
|
|
|
|
/// get_heads() return values which might outlive the original object. When Rust
|
|
|
|
/// makes it possible for a Trait method to reference &Arc<Self>, this can be
|
|
|
|
/// simplified.
|
|
|
|
pub trait OpHeadsStore: Send + Sync + Debug {
|
2022-12-15 23:47:31 +00:00
|
|
|
fn name(&self) -> &str;
|
|
|
|
|
2022-12-15 22:13:00 +00:00
|
|
|
fn add_op_head(&self, id: &OperationId);
|
2021-03-11 05:36:08 +00:00
|
|
|
|
2022-12-15 22:13:00 +00:00
|
|
|
fn remove_op_head(&self, id: &OperationId);
|
2021-03-12 07:07:47 +00:00
|
|
|
|
2022-12-15 22:13:00 +00:00
|
|
|
fn get_op_heads(&self) -> Vec<OperationId>;
|
2021-03-14 00:33:31 +00:00
|
|
|
|
2022-12-15 22:13:00 +00:00
|
|
|
fn lock(&self) -> LockedOpHeads;
|
2021-03-12 07:30:06 +00:00
|
|
|
|
2022-12-15 22:13:00 +00:00
|
|
|
fn get_heads(&self, op_store: &Arc<dyn OpStore>) -> Result<OpHeads, OpHeadResolutionError>;
|
2023-01-11 23:42:07 +00:00
|
|
|
|
|
|
|
/// Removes operations in the input that are ancestors of other operations
|
|
|
|
/// in the input. The ancestors are removed both from the list and from
|
|
|
|
/// disk.
|
|
|
|
fn handle_ancestor_ops(&self, op_heads: Vec<Operation>) -> Vec<Operation> {
|
|
|
|
let op_head_ids_before: HashSet<_> = op_heads.iter().map(|op| op.id().clone()).collect();
|
|
|
|
let neighbors_fn = |op: &Operation| op.parents();
|
|
|
|
// Remove ancestors so we don't create merge operation with an operation and its
|
|
|
|
// ancestor
|
|
|
|
let op_heads = dag_walk::heads(op_heads, &neighbors_fn, &|op: &Operation| op.id().clone());
|
|
|
|
let op_head_ids_after: HashSet<_> = op_heads.iter().map(|op| op.id().clone()).collect();
|
|
|
|
for removed_op_head in op_head_ids_before.difference(&op_head_ids_after) {
|
|
|
|
self.remove_op_head(removed_op_head);
|
|
|
|
}
|
|
|
|
op_heads.into_iter().collect()
|
|
|
|
}
|
2021-03-11 05:36:08 +00:00
|
|
|
}
|