2022-11-26 23:57:50 +00:00
|
|
|
// Copyright 2020 The Jujutsu Authors
|
2020-12-12 08:00:42 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2021-03-14 17:37:28 +00:00
|
|
|
use std::path::Path;
|
2023-12-29 07:02:56 +00:00
|
|
|
use std::slice;
|
|
|
|
use std::sync::Arc;
|
2024-01-07 09:21:10 +00:00
|
|
|
use std::time::SystemTime;
|
2021-03-14 17:37:28 +00:00
|
|
|
|
2023-12-31 03:15:20 +00:00
|
|
|
use assert_matches::assert_matches;
|
|
|
|
use itertools::Itertools as _;
|
2024-01-04 07:18:04 +00:00
|
|
|
use jj_lib::backend::CommitId;
|
|
|
|
use jj_lib::object_id::ObjectId;
|
2023-12-31 03:15:20 +00:00
|
|
|
use jj_lib::op_walk::{self, OpsetEvaluationError, OpsetResolutionError};
|
2023-12-29 07:02:56 +00:00
|
|
|
use jj_lib::operation::Operation;
|
|
|
|
use jj_lib::repo::{ReadonlyRepo, Repo};
|
2023-12-31 03:15:20 +00:00
|
|
|
use jj_lib::settings::UserSettings;
|
2023-09-19 12:36:45 +00:00
|
|
|
use testutils::{create_random_commit, write_random_commit, TestRepo};
|
2020-12-12 08:00:42 +00:00
|
|
|
|
|
|
|
fn list_dir(dir: &Path) -> Vec<String> {
|
|
|
|
std::fs::read_dir(dir)
|
|
|
|
.unwrap()
|
|
|
|
.map(|entry| entry.unwrap().file_name().to_str().unwrap().to_owned())
|
2024-01-07 09:21:10 +00:00
|
|
|
.sorted()
|
2020-12-12 08:00:42 +00:00
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2023-09-19 12:36:45 +00:00
|
|
|
#[test]
|
|
|
|
fn test_unpublished_operation() {
|
2021-03-12 23:46:06 +00:00
|
|
|
// Test that the operation doesn't get published until that's requested.
|
|
|
|
let settings = testutils::user_settings();
|
2023-09-19 12:36:45 +00:00
|
|
|
let test_repo = TestRepo::init();
|
2022-02-05 23:31:20 +00:00
|
|
|
let repo = &test_repo.repo;
|
2021-03-12 23:46:06 +00:00
|
|
|
|
2023-01-25 04:20:46 +00:00
|
|
|
let op_heads_dir = repo.repo_path().join("op_heads").join("heads");
|
2021-03-14 05:38:37 +00:00
|
|
|
let op_id0 = repo.op_id().clone();
|
|
|
|
assert_eq!(list_dir(&op_heads_dir), vec![repo.op_id().hex()]);
|
2021-03-12 23:46:06 +00:00
|
|
|
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx1 = repo.start_transaction(&settings);
|
2022-12-24 15:38:20 +00:00
|
|
|
write_random_commit(tx1.mut_repo(), &settings);
|
2023-12-12 05:42:05 +00:00
|
|
|
let unpublished_op = tx1.write("transaction 1");
|
2021-03-12 23:46:06 +00:00
|
|
|
let op_id1 = unpublished_op.operation().id().clone();
|
|
|
|
assert_ne!(op_id1, op_id0);
|
|
|
|
assert_eq!(list_dir(&op_heads_dir), vec![op_id0.hex()]);
|
|
|
|
unpublished_op.publish();
|
|
|
|
assert_eq!(list_dir(&op_heads_dir), vec![op_id1.hex()]);
|
|
|
|
}
|
|
|
|
|
2023-09-19 12:36:45 +00:00
|
|
|
#[test]
|
|
|
|
fn test_consecutive_operations() {
|
2020-12-12 08:00:42 +00:00
|
|
|
// Test that consecutive operations result in a single op-head on disk after
|
|
|
|
// each operation
|
|
|
|
let settings = testutils::user_settings();
|
2023-09-19 12:36:45 +00:00
|
|
|
let test_repo = TestRepo::init();
|
2022-02-05 23:31:20 +00:00
|
|
|
let repo = &test_repo.repo;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-01-25 04:20:46 +00:00
|
|
|
let op_heads_dir = repo.repo_path().join("op_heads").join("heads");
|
2021-03-14 05:38:37 +00:00
|
|
|
let op_id0 = repo.op_id().clone();
|
|
|
|
assert_eq!(list_dir(&op_heads_dir), vec![repo.op_id().hex()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx1 = repo.start_transaction(&settings);
|
2022-12-24 15:38:20 +00:00
|
|
|
write_random_commit(tx1.mut_repo(), &settings);
|
2023-12-12 05:42:05 +00:00
|
|
|
let op_id1 = tx1.commit("transaction 1").operation().id().clone();
|
2021-03-10 23:22:04 +00:00
|
|
|
assert_ne!(op_id1, op_id0);
|
|
|
|
assert_eq!(list_dir(&op_heads_dir), vec![op_id1.hex()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2022-04-28 05:30:03 +00:00
|
|
|
let repo = repo.reload_at_head(&settings).unwrap();
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx2 = repo.start_transaction(&settings);
|
2022-12-24 15:38:20 +00:00
|
|
|
write_random_commit(tx2.mut_repo(), &settings);
|
2023-12-12 05:42:05 +00:00
|
|
|
let op_id2 = tx2.commit("transaction 2").operation().id().clone();
|
2021-03-10 23:22:04 +00:00
|
|
|
assert_ne!(op_id2, op_id0);
|
|
|
|
assert_ne!(op_id2, op_id1);
|
|
|
|
assert_eq!(list_dir(&op_heads_dir), vec![op_id2.hex()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
|
|
|
|
// Reloading the repo makes no difference (there are no conflicting operations
|
|
|
|
// to resolve).
|
2022-04-28 05:30:03 +00:00
|
|
|
let _repo = repo.reload_at_head(&settings).unwrap();
|
2021-03-10 23:22:04 +00:00
|
|
|
assert_eq!(list_dir(&op_heads_dir), vec![op_id2.hex()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-09-19 12:36:45 +00:00
|
|
|
#[test]
|
|
|
|
fn test_concurrent_operations() {
|
2020-12-12 08:00:42 +00:00
|
|
|
// Test that consecutive operations result in multiple op-heads on disk until
|
|
|
|
// the repo has been reloaded (which currently happens right away).
|
|
|
|
let settings = testutils::user_settings();
|
2023-09-19 12:36:45 +00:00
|
|
|
let test_repo = TestRepo::init();
|
2022-02-05 23:31:20 +00:00
|
|
|
let repo = &test_repo.repo;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-01-25 04:20:46 +00:00
|
|
|
let op_heads_dir = repo.repo_path().join("op_heads").join("heads");
|
2021-03-14 05:38:37 +00:00
|
|
|
let op_id0 = repo.op_id().clone();
|
|
|
|
assert_eq!(list_dir(&op_heads_dir), vec![repo.op_id().hex()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx1 = repo.start_transaction(&settings);
|
2022-12-24 15:38:20 +00:00
|
|
|
write_random_commit(tx1.mut_repo(), &settings);
|
2023-12-12 05:42:05 +00:00
|
|
|
let op_id1 = tx1.commit("transaction 1").operation().id().clone();
|
2021-03-10 23:22:04 +00:00
|
|
|
assert_ne!(op_id1, op_id0);
|
|
|
|
assert_eq!(list_dir(&op_heads_dir), vec![op_id1.hex()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
|
|
|
|
// After both transactions have committed, we should have two op-heads on disk,
|
|
|
|
// since they were run in parallel.
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx2 = repo.start_transaction(&settings);
|
2022-12-24 15:38:20 +00:00
|
|
|
write_random_commit(tx2.mut_repo(), &settings);
|
2023-12-12 05:42:05 +00:00
|
|
|
let op_id2 = tx2.commit("transaction 2").operation().id().clone();
|
2021-03-10 23:22:04 +00:00
|
|
|
assert_ne!(op_id2, op_id0);
|
|
|
|
assert_ne!(op_id2, op_id1);
|
2020-12-12 08:00:42 +00:00
|
|
|
let mut actual_heads_on_disk = list_dir(&op_heads_dir);
|
|
|
|
actual_heads_on_disk.sort();
|
2021-03-10 23:22:04 +00:00
|
|
|
let mut expected_heads_on_disk = vec![op_id1.hex(), op_id2.hex()];
|
2020-12-12 08:00:42 +00:00
|
|
|
expected_heads_on_disk.sort();
|
|
|
|
assert_eq!(actual_heads_on_disk, expected_heads_on_disk);
|
|
|
|
|
|
|
|
// Reloading the repo causes the operations to be merged
|
2022-04-28 05:30:03 +00:00
|
|
|
let repo = repo.reload_at_head(&settings).unwrap();
|
2021-03-14 05:38:37 +00:00
|
|
|
let merged_op_id = repo.op_id().clone();
|
2021-03-10 23:22:04 +00:00
|
|
|
assert_ne!(merged_op_id, op_id0);
|
|
|
|
assert_ne!(merged_op_id, op_id1);
|
|
|
|
assert_ne!(merged_op_id, op_id2);
|
|
|
|
assert_eq!(list_dir(&op_heads_dir), vec![merged_op_id.hex()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-02-13 17:52:21 +00:00
|
|
|
fn assert_heads(repo: &dyn Repo, expected: Vec<&CommitId>) {
|
2020-12-12 08:00:42 +00:00
|
|
|
let expected = expected.iter().cloned().cloned().collect();
|
2021-01-16 20:15:06 +00:00
|
|
|
assert_eq!(*repo.view().heads(), expected);
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-09-19 12:36:45 +00:00
|
|
|
#[test]
|
|
|
|
fn test_isolation() {
|
2020-12-12 08:00:42 +00:00
|
|
|
// Test that two concurrent transactions don't see each other's changes.
|
|
|
|
let settings = testutils::user_settings();
|
2023-09-19 12:36:45 +00:00
|
|
|
let test_repo = TestRepo::init();
|
2022-02-05 23:31:20 +00:00
|
|
|
let repo = &test_repo.repo;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx = repo.start_transaction(&settings);
|
2022-12-24 17:01:11 +00:00
|
|
|
let initial = create_random_commit(tx.mut_repo(), &settings)
|
2020-12-12 08:00:42 +00:00
|
|
|
.set_parents(vec![repo.store().root_commit_id().clone()])
|
2022-12-24 05:09:19 +00:00
|
|
|
.write()
|
|
|
|
.unwrap();
|
2023-12-12 05:42:05 +00:00
|
|
|
let repo = tx.commit("test");
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx1 = repo.start_transaction(&settings);
|
2021-03-16 23:32:51 +00:00
|
|
|
let mut_repo1 = tx1.mut_repo();
|
2023-12-12 05:42:05 +00:00
|
|
|
let mut tx2 = repo.start_transaction(&settings);
|
2021-03-16 23:32:51 +00:00
|
|
|
let mut_repo2 = tx2.mut_repo();
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-03-21 06:02:29 +00:00
|
|
|
assert_heads(repo.as_ref(), vec![initial.id()]);
|
2023-02-13 17:52:21 +00:00
|
|
|
assert_heads(mut_repo1, vec![initial.id()]);
|
|
|
|
assert_heads(mut_repo2, vec![initial.id()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2022-12-25 20:58:08 +00:00
|
|
|
let rewrite1 = mut_repo1
|
|
|
|
.rewrite_commit(&settings, &initial)
|
2022-12-21 09:13:56 +00:00
|
|
|
.set_description("rewrite1")
|
2022-12-24 05:09:19 +00:00
|
|
|
.write()
|
|
|
|
.unwrap();
|
2022-04-28 05:30:03 +00:00
|
|
|
mut_repo1.rebase_descendants(&settings).unwrap();
|
2022-12-25 20:58:08 +00:00
|
|
|
let rewrite2 = mut_repo2
|
|
|
|
.rewrite_commit(&settings, &initial)
|
2022-12-21 09:13:56 +00:00
|
|
|
.set_description("rewrite2")
|
2022-12-24 05:09:19 +00:00
|
|
|
.write()
|
|
|
|
.unwrap();
|
2022-04-28 05:30:03 +00:00
|
|
|
mut_repo2.rebase_descendants(&settings).unwrap();
|
2020-12-12 08:00:42 +00:00
|
|
|
|
|
|
|
// Neither transaction has committed yet, so each transaction sees its own
|
|
|
|
// commit.
|
2023-03-21 06:02:29 +00:00
|
|
|
assert_heads(repo.as_ref(), vec![initial.id()]);
|
2023-02-13 17:52:21 +00:00
|
|
|
assert_heads(mut_repo1, vec![rewrite1.id()]);
|
|
|
|
assert_heads(mut_repo2, vec![rewrite2.id()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
|
|
|
|
// The base repo and tx2 don't see the commits from tx1.
|
2023-12-12 05:42:05 +00:00
|
|
|
tx1.commit("transaction 1");
|
2023-03-21 06:02:29 +00:00
|
|
|
assert_heads(repo.as_ref(), vec![initial.id()]);
|
2023-02-13 17:52:21 +00:00
|
|
|
assert_heads(mut_repo2, vec![rewrite2.id()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
|
|
|
|
// The base repo still doesn't see the commits after both transactions commit.
|
2023-12-12 05:42:05 +00:00
|
|
|
tx2.commit("transaction 2");
|
2023-03-21 06:02:29 +00:00
|
|
|
assert_heads(repo.as_ref(), vec![initial.id()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
// After reload, the base repo sees both rewrites.
|
2022-04-28 05:30:03 +00:00
|
|
|
let repo = repo.reload_at_head(&settings).unwrap();
|
2023-03-21 06:02:29 +00:00
|
|
|
assert_heads(repo.as_ref(), vec![rewrite1.id(), rewrite2.id()]);
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
2023-12-31 03:15:20 +00:00
|
|
|
|
2023-12-29 07:02:56 +00:00
|
|
|
#[test]
|
|
|
|
fn test_reparent_range_linear() {
|
|
|
|
let settings = testutils::user_settings();
|
|
|
|
let test_repo = TestRepo::init();
|
|
|
|
let repo_0 = test_repo.repo;
|
|
|
|
let op_store = repo_0.op_store();
|
|
|
|
|
|
|
|
let read_op = |id| {
|
|
|
|
let data = op_store.read_operation(id).unwrap();
|
|
|
|
Operation::new(op_store.clone(), id.clone(), data)
|
|
|
|
};
|
|
|
|
|
|
|
|
fn op_parents<const N: usize>(op: &Operation) -> [Operation; N] {
|
|
|
|
let parents: Vec<_> = op.parents().try_collect().unwrap();
|
|
|
|
parents.try_into().unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set up linear operation graph:
|
|
|
|
// D
|
|
|
|
// C
|
|
|
|
// B
|
|
|
|
// A
|
|
|
|
// 0 (initial)
|
|
|
|
let random_tx = |repo: &Arc<ReadonlyRepo>| {
|
|
|
|
let mut tx = repo.start_transaction(&settings);
|
|
|
|
write_random_commit(tx.mut_repo(), &settings);
|
|
|
|
tx
|
|
|
|
};
|
|
|
|
let repo_a = random_tx(&repo_0).commit("op A");
|
|
|
|
let repo_b = random_tx(&repo_a).commit("op B");
|
|
|
|
let repo_c = random_tx(&repo_b).commit("op C");
|
|
|
|
let repo_d = random_tx(&repo_c).commit("op D");
|
|
|
|
|
|
|
|
// Reparent B..D (=C|D) onto A:
|
|
|
|
// D'
|
|
|
|
// C'
|
|
|
|
// A
|
|
|
|
// 0 (initial)
|
|
|
|
let stats = op_walk::reparent_range(
|
|
|
|
op_store.as_ref(),
|
|
|
|
slice::from_ref(repo_b.operation()),
|
|
|
|
slice::from_ref(repo_d.operation()),
|
|
|
|
repo_a.operation(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(stats.new_head_ids.len(), 1);
|
|
|
|
assert_eq!(stats.rewritten_count, 2);
|
|
|
|
assert_eq!(stats.unreachable_count, 1);
|
|
|
|
let new_op_d = read_op(&stats.new_head_ids[0]);
|
|
|
|
assert_eq!(
|
|
|
|
new_op_d.store_operation().metadata,
|
|
|
|
repo_d.operation().store_operation().metadata
|
|
|
|
);
|
2024-01-07 12:58:53 +00:00
|
|
|
assert_eq!(new_op_d.view_id(), repo_d.operation().view_id());
|
2023-12-29 07:02:56 +00:00
|
|
|
let [new_op_c] = op_parents(&new_op_d);
|
|
|
|
assert_eq!(
|
|
|
|
new_op_c.store_operation().metadata,
|
|
|
|
repo_c.operation().store_operation().metadata
|
|
|
|
);
|
2024-01-07 12:58:53 +00:00
|
|
|
assert_eq!(new_op_c.view_id(), repo_c.operation().view_id());
|
2023-12-29 07:02:56 +00:00
|
|
|
assert_eq!(new_op_c.parent_ids(), slice::from_ref(repo_a.op_id()));
|
|
|
|
|
|
|
|
// Reparent empty range onto A
|
|
|
|
let stats = op_walk::reparent_range(
|
|
|
|
op_store.as_ref(),
|
|
|
|
slice::from_ref(repo_d.operation()),
|
|
|
|
slice::from_ref(repo_d.operation()),
|
|
|
|
repo_a.operation(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(stats.new_head_ids, vec![repo_a.op_id().clone()]);
|
|
|
|
assert_eq!(stats.rewritten_count, 0);
|
|
|
|
assert_eq!(stats.unreachable_count, 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_reparent_range_branchy() {
|
|
|
|
let settings = testutils::user_settings();
|
|
|
|
let test_repo = TestRepo::init();
|
|
|
|
let repo_0 = test_repo.repo;
|
|
|
|
let op_store = repo_0.op_store();
|
|
|
|
|
|
|
|
let read_op = |id| {
|
|
|
|
let data = op_store.read_operation(id).unwrap();
|
|
|
|
Operation::new(op_store.clone(), id.clone(), data)
|
|
|
|
};
|
|
|
|
|
|
|
|
fn op_parents<const N: usize>(op: &Operation) -> [Operation; N] {
|
|
|
|
let parents: Vec<_> = op.parents().try_collect().unwrap();
|
|
|
|
parents.try_into().unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set up branchy operation graph:
|
|
|
|
// G
|
|
|
|
// |\
|
|
|
|
// | F
|
|
|
|
// E |
|
|
|
|
// D |
|
|
|
|
// |/
|
|
|
|
// C
|
|
|
|
// B
|
|
|
|
// A
|
|
|
|
// 0 (initial)
|
|
|
|
let random_tx = |repo: &Arc<ReadonlyRepo>| {
|
|
|
|
let mut tx = repo.start_transaction(&settings);
|
|
|
|
write_random_commit(tx.mut_repo(), &settings);
|
|
|
|
tx
|
|
|
|
};
|
|
|
|
let repo_a = random_tx(&repo_0).commit("op A");
|
|
|
|
let repo_b = random_tx(&repo_a).commit("op B");
|
|
|
|
let repo_c = random_tx(&repo_b).commit("op C");
|
|
|
|
let repo_d = random_tx(&repo_c).commit("op D");
|
|
|
|
let tx_e = random_tx(&repo_d);
|
|
|
|
let tx_f = random_tx(&repo_c);
|
|
|
|
let repo_g = testutils::commit_transactions(&settings, vec![tx_e, tx_f]);
|
|
|
|
let [op_e, op_f] = op_parents(repo_g.operation());
|
|
|
|
|
|
|
|
// Reparent D..G (= E|F|G) onto B:
|
|
|
|
// G'
|
|
|
|
// |\
|
|
|
|
// | F'
|
|
|
|
// E'|
|
|
|
|
// |/
|
|
|
|
// B
|
|
|
|
// A
|
|
|
|
// 0 (initial)
|
|
|
|
let stats = op_walk::reparent_range(
|
|
|
|
op_store.as_ref(),
|
|
|
|
slice::from_ref(repo_d.operation()),
|
|
|
|
slice::from_ref(repo_g.operation()),
|
|
|
|
repo_b.operation(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(stats.new_head_ids.len(), 1);
|
|
|
|
assert_eq!(stats.rewritten_count, 3);
|
|
|
|
assert_eq!(stats.unreachable_count, 2);
|
|
|
|
let new_op_g = read_op(&stats.new_head_ids[0]);
|
|
|
|
assert_eq!(
|
|
|
|
new_op_g.store_operation().metadata,
|
|
|
|
repo_g.operation().store_operation().metadata
|
|
|
|
);
|
2024-01-07 12:58:53 +00:00
|
|
|
assert_eq!(new_op_g.view_id(), repo_g.operation().view_id());
|
2023-12-29 07:02:56 +00:00
|
|
|
let [new_op_e, new_op_f] = op_parents(&new_op_g);
|
|
|
|
assert_eq!(new_op_e.parent_ids(), slice::from_ref(repo_b.op_id()));
|
|
|
|
assert_eq!(new_op_f.parent_ids(), slice::from_ref(repo_b.op_id()));
|
|
|
|
|
|
|
|
// Reparent B..G (=C|D|E|F|G) onto A:
|
|
|
|
// G'
|
|
|
|
// |\
|
|
|
|
// | F'
|
|
|
|
// E'|
|
|
|
|
// D'|
|
|
|
|
// |/
|
|
|
|
// C'
|
|
|
|
// A
|
|
|
|
// 0 (initial)
|
|
|
|
let stats = op_walk::reparent_range(
|
|
|
|
op_store.as_ref(),
|
|
|
|
slice::from_ref(repo_b.operation()),
|
|
|
|
slice::from_ref(repo_g.operation()),
|
|
|
|
repo_a.operation(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(stats.new_head_ids.len(), 1);
|
|
|
|
assert_eq!(stats.rewritten_count, 5);
|
|
|
|
assert_eq!(stats.unreachable_count, 1);
|
|
|
|
let new_op_g = read_op(&stats.new_head_ids[0]);
|
|
|
|
assert_eq!(
|
|
|
|
new_op_g.store_operation().metadata,
|
|
|
|
repo_g.operation().store_operation().metadata
|
|
|
|
);
|
2024-01-07 12:58:53 +00:00
|
|
|
assert_eq!(new_op_g.view_id(), repo_g.operation().view_id());
|
2023-12-29 07:02:56 +00:00
|
|
|
let [new_op_e, new_op_f] = op_parents(&new_op_g);
|
|
|
|
let [new_op_d] = op_parents(&new_op_e);
|
|
|
|
assert_eq!(new_op_d.parent_ids(), new_op_f.parent_ids());
|
|
|
|
let [new_op_c] = op_parents(&new_op_d);
|
|
|
|
assert_eq!(new_op_c.parent_ids(), slice::from_ref(repo_a.op_id()));
|
|
|
|
|
|
|
|
// Reparent (E|F)..G (=G) onto D:
|
|
|
|
// G'
|
|
|
|
// D
|
|
|
|
// C
|
|
|
|
// B
|
|
|
|
// A
|
|
|
|
// 0 (initial)
|
|
|
|
let stats = op_walk::reparent_range(
|
|
|
|
op_store.as_ref(),
|
|
|
|
&[op_e.clone(), op_f.clone()],
|
|
|
|
slice::from_ref(repo_g.operation()),
|
|
|
|
repo_d.operation(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(stats.new_head_ids.len(), 1);
|
|
|
|
assert_eq!(stats.rewritten_count, 1);
|
|
|
|
assert_eq!(stats.unreachable_count, 2);
|
|
|
|
let new_op_g = read_op(&stats.new_head_ids[0]);
|
|
|
|
assert_eq!(
|
|
|
|
new_op_g.store_operation().metadata,
|
|
|
|
repo_g.operation().store_operation().metadata
|
|
|
|
);
|
2024-01-07 12:58:53 +00:00
|
|
|
assert_eq!(new_op_g.view_id(), repo_g.operation().view_id());
|
2023-12-29 07:02:56 +00:00
|
|
|
assert_eq!(new_op_g.parent_ids(), slice::from_ref(repo_d.op_id()));
|
|
|
|
|
|
|
|
// Reparent C..F (=F) onto D (ignoring G):
|
|
|
|
// F'
|
|
|
|
// D
|
|
|
|
// C
|
|
|
|
// B
|
|
|
|
// A
|
|
|
|
// 0 (initial)
|
|
|
|
let stats = op_walk::reparent_range(
|
|
|
|
op_store.as_ref(),
|
|
|
|
slice::from_ref(repo_c.operation()),
|
|
|
|
slice::from_ref(&op_f),
|
|
|
|
repo_d.operation(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(stats.new_head_ids.len(), 1);
|
|
|
|
assert_eq!(stats.rewritten_count, 1);
|
|
|
|
assert_eq!(stats.unreachable_count, 0);
|
|
|
|
let new_op_f = read_op(&stats.new_head_ids[0]);
|
|
|
|
assert_eq!(
|
|
|
|
new_op_f.store_operation().metadata,
|
|
|
|
op_f.store_operation().metadata
|
|
|
|
);
|
2024-01-07 12:58:53 +00:00
|
|
|
assert_eq!(new_op_f.view_id(), op_f.view_id());
|
2023-12-29 07:02:56 +00:00
|
|
|
assert_eq!(new_op_f.parent_ids(), slice::from_ref(repo_d.op_id()));
|
|
|
|
}
|
|
|
|
|
2023-12-31 03:15:20 +00:00
|
|
|
fn stable_op_id_settings() -> UserSettings {
|
|
|
|
UserSettings::from_config(
|
|
|
|
testutils::base_config()
|
|
|
|
.add_source(config::File::from_str(
|
|
|
|
"debug.operation-timestamp = '2001-02-03T04:05:06+07:00'",
|
|
|
|
config::FileFormat::Toml,
|
|
|
|
))
|
|
|
|
.build()
|
|
|
|
.unwrap(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_resolve_op_id() {
|
|
|
|
let settings = stable_op_id_settings();
|
|
|
|
let test_repo = TestRepo::init_with_settings(&settings);
|
|
|
|
let mut repo = test_repo.repo;
|
|
|
|
|
|
|
|
let mut operations = Vec::new();
|
|
|
|
for i in 0..6 {
|
|
|
|
let tx = repo.start_transaction(&settings);
|
|
|
|
repo = tx.commit(format!("transaction {i}"));
|
|
|
|
operations.push(repo.operation().clone());
|
|
|
|
}
|
2024-01-07 18:32:31 +00:00
|
|
|
// "1" is ambiguous
|
2023-12-31 03:15:20 +00:00
|
|
|
insta::assert_debug_snapshot!(operations.iter().map(|op| op.id().hex()).collect_vec(), @r###"
|
|
|
|
[
|
2024-01-07 18:32:31 +00:00
|
|
|
"4ff2007de55a2f649f7ab0c98618e4126ef49f0d40a086c8e0a4612a0d5ab4992e1baf4b4fa0a2a224fab39fc5e5b200ac4cddf964db29c6be1379ab2b6d4572",
|
|
|
|
"cf1fd9ea3065ae077e13641278a213b371f98eb237334489c059e50e0a92a79cb058939f6fd2a89492f32dcecb14b6237f3e1fcf6fa454a9c21ef5425d78574c",
|
|
|
|
"5b29edf367805ca7e1dc9219ec25dae70ec853afbeed9a113dc2adba428f71eea944122097276fb664e2c8d6cc99a080d49ef58ce14356b5887d27b45952bfef",
|
|
|
|
"126647a88e08bc46f72db1eff34abe426cdd54e4e4c05b9b6773c288442bed2c5add304d4978e377694516ee89ed3436cec20ef2af4921247e6b6a90c3685a2a",
|
|
|
|
"5131849d86fe586dbbad3992d85b953b0ebe554f0bd7c42ee09628ab85db7ad909242901688a7d00590d326c8bdde21e8ed933f7ab4d6b7b16077e3d1c07b284",
|
|
|
|
"14073ae915621d1ec2358129440dc2a26e67e45995f46cf6199a6bb09b44d16a522451079a2480e4d2026d368759eacd1d5b3bc7a5915d9345d354b8e7ad46b4",
|
2023-12-31 03:15:20 +00:00
|
|
|
]
|
|
|
|
"###);
|
|
|
|
|
2024-01-15 02:26:35 +00:00
|
|
|
let repo_loader = repo.loader();
|
|
|
|
let resolve = |op_str: &str| op_walk::resolve_op_for_load(&repo_loader, op_str);
|
|
|
|
|
2023-12-31 03:15:20 +00:00
|
|
|
// Full id
|
2024-01-15 02:26:35 +00:00
|
|
|
assert_eq!(resolve(&operations[0].id().hex()).unwrap(), operations[0]);
|
2023-12-31 03:15:20 +00:00
|
|
|
// Short id, odd length
|
|
|
|
assert_eq!(
|
2024-01-15 02:26:35 +00:00
|
|
|
resolve(&operations[0].id().hex()[..3]).unwrap(),
|
2023-12-31 03:15:20 +00:00
|
|
|
operations[0]
|
|
|
|
);
|
|
|
|
// Short id, even length
|
|
|
|
assert_eq!(
|
2024-01-15 02:26:35 +00:00
|
|
|
resolve(&operations[1].id().hex()[..2]).unwrap(),
|
2023-12-31 03:15:20 +00:00
|
|
|
operations[1]
|
|
|
|
);
|
|
|
|
// Ambiguous id
|
|
|
|
assert_matches!(
|
2024-01-15 02:26:35 +00:00
|
|
|
resolve("1"),
|
2023-12-31 03:15:20 +00:00
|
|
|
Err(OpsetEvaluationError::OpsetResolution(
|
|
|
|
OpsetResolutionError::AmbiguousIdPrefix(_)
|
|
|
|
))
|
|
|
|
);
|
|
|
|
// Empty id
|
|
|
|
assert_matches!(
|
2024-01-15 02:26:35 +00:00
|
|
|
resolve(""),
|
2023-12-31 03:15:20 +00:00
|
|
|
Err(OpsetEvaluationError::OpsetResolution(
|
|
|
|
OpsetResolutionError::InvalidIdPrefix(_)
|
|
|
|
))
|
|
|
|
);
|
|
|
|
// Unknown id
|
|
|
|
assert_matches!(
|
2024-01-15 02:26:35 +00:00
|
|
|
resolve("deadbee"),
|
2023-12-31 03:15:20 +00:00
|
|
|
Err(OpsetEvaluationError::OpsetResolution(
|
|
|
|
OpsetResolutionError::NoSuchOperation(_)
|
|
|
|
))
|
|
|
|
);
|
2024-01-15 02:26:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_resolve_current_op() {
|
|
|
|
let settings = stable_op_id_settings();
|
|
|
|
let test_repo = TestRepo::init_with_settings(&settings);
|
|
|
|
let repo = test_repo.repo;
|
|
|
|
|
2023-12-31 03:15:20 +00:00
|
|
|
assert_eq!(
|
|
|
|
op_walk::resolve_op_with_repo(&repo, "@").unwrap(),
|
|
|
|
*repo.operation()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2023-12-31 05:58:52 +00:00
|
|
|
fn test_resolve_op_parents_children() {
|
2023-12-31 03:15:20 +00:00
|
|
|
// Use monotonic timestamp to stabilize merge order of transactions
|
|
|
|
let settings = testutils::user_settings();
|
|
|
|
let test_repo = TestRepo::init_with_settings(&settings);
|
2024-01-10 10:35:08 +00:00
|
|
|
let mut repo = &test_repo.repo;
|
2023-12-31 03:15:20 +00:00
|
|
|
|
2024-01-10 10:35:08 +00:00
|
|
|
let mut repos = Vec::new();
|
2023-12-31 03:15:20 +00:00
|
|
|
for _ in 0..3 {
|
|
|
|
let tx = repo.start_transaction(&settings);
|
2024-01-10 10:35:08 +00:00
|
|
|
repos.push(tx.commit("test"));
|
|
|
|
repo = repos.last().unwrap();
|
2023-12-31 03:15:20 +00:00
|
|
|
}
|
2024-01-10 10:35:08 +00:00
|
|
|
let operations = repos.iter().map(|repo| repo.operation()).collect_vec();
|
2023-12-31 03:15:20 +00:00
|
|
|
|
2023-12-31 05:58:52 +00:00
|
|
|
// Parent
|
2023-12-31 03:15:20 +00:00
|
|
|
let op2_id_hex = operations[2].id().hex();
|
|
|
|
assert_eq!(
|
2024-01-10 10:35:08 +00:00
|
|
|
op_walk::resolve_op_with_repo(repo, &format!("{op2_id_hex}-")).unwrap(),
|
|
|
|
*operations[1]
|
2023-12-31 03:15:20 +00:00
|
|
|
);
|
|
|
|
assert_eq!(
|
2024-01-10 10:35:08 +00:00
|
|
|
op_walk::resolve_op_with_repo(repo, &format!("{op2_id_hex}--")).unwrap(),
|
|
|
|
*operations[0]
|
2023-12-31 03:15:20 +00:00
|
|
|
);
|
2024-01-07 18:32:31 +00:00
|
|
|
// "{op2_id_hex}----" is the root operation
|
2023-12-31 03:15:20 +00:00
|
|
|
assert_matches!(
|
2024-01-07 18:32:31 +00:00
|
|
|
op_walk::resolve_op_with_repo(repo, &format!("{op2_id_hex}-----")),
|
2023-12-31 03:15:20 +00:00
|
|
|
Err(OpsetEvaluationError::OpsetResolution(
|
|
|
|
OpsetResolutionError::EmptyOperations(_)
|
|
|
|
))
|
|
|
|
);
|
|
|
|
|
2023-12-31 05:58:52 +00:00
|
|
|
// Child
|
|
|
|
let op0_id_hex = operations[0].id().hex();
|
|
|
|
assert_eq!(
|
2024-01-10 10:35:08 +00:00
|
|
|
op_walk::resolve_op_with_repo(repo, &format!("{op0_id_hex}+")).unwrap(),
|
|
|
|
*operations[1]
|
2023-12-31 05:58:52 +00:00
|
|
|
);
|
|
|
|
assert_eq!(
|
2024-01-10 10:35:08 +00:00
|
|
|
op_walk::resolve_op_with_repo(repo, &format!("{op0_id_hex}++")).unwrap(),
|
|
|
|
*operations[2]
|
2023-12-31 05:58:52 +00:00
|
|
|
);
|
|
|
|
assert_matches!(
|
2024-01-10 10:35:08 +00:00
|
|
|
op_walk::resolve_op_with_repo(repo, &format!("{op0_id_hex}+++")),
|
2023-12-31 05:58:52 +00:00
|
|
|
Err(OpsetEvaluationError::OpsetResolution(
|
|
|
|
OpsetResolutionError::EmptyOperations(_)
|
|
|
|
))
|
|
|
|
);
|
|
|
|
|
|
|
|
// Child of parent
|
|
|
|
assert_eq!(
|
2024-01-10 10:35:08 +00:00
|
|
|
op_walk::resolve_op_with_repo(repo, &format!("{op2_id_hex}--+")).unwrap(),
|
|
|
|
*operations[1]
|
|
|
|
);
|
|
|
|
|
|
|
|
// Child at old repo: new operations shouldn't be visible
|
|
|
|
assert_eq!(
|
|
|
|
op_walk::resolve_op_with_repo(&repos[1], &format!("{op0_id_hex}+")).unwrap(),
|
|
|
|
*operations[1]
|
|
|
|
);
|
|
|
|
assert_matches!(
|
|
|
|
op_walk::resolve_op_with_repo(&repos[0], &format!("{op0_id_hex}+")),
|
|
|
|
Err(OpsetEvaluationError::OpsetResolution(
|
|
|
|
OpsetResolutionError::EmptyOperations(_)
|
|
|
|
))
|
2023-12-31 05:58:52 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
// Merge and fork
|
2023-12-31 03:15:20 +00:00
|
|
|
let tx1 = repo.start_transaction(&settings);
|
|
|
|
let tx2 = repo.start_transaction(&settings);
|
2024-01-10 10:35:08 +00:00
|
|
|
let repo = testutils::commit_transactions(&settings, vec![tx1, tx2]);
|
2023-12-31 03:15:20 +00:00
|
|
|
let op5_id_hex = repo.operation().id().hex();
|
|
|
|
assert_matches!(
|
|
|
|
op_walk::resolve_op_with_repo(&repo, &format!("{op5_id_hex}-")),
|
|
|
|
Err(OpsetEvaluationError::OpsetResolution(
|
|
|
|
OpsetResolutionError::MultipleOperations(_)
|
|
|
|
))
|
|
|
|
);
|
2023-12-31 05:58:52 +00:00
|
|
|
let op2_id_hex = operations[2].id().hex();
|
|
|
|
assert_matches!(
|
|
|
|
op_walk::resolve_op_with_repo(&repo, &format!("{op2_id_hex}+")),
|
|
|
|
Err(OpsetEvaluationError::OpsetResolution(
|
|
|
|
OpsetResolutionError::MultipleOperations(_)
|
|
|
|
))
|
|
|
|
);
|
2023-12-31 03:15:20 +00:00
|
|
|
}
|
2024-01-07 09:21:10 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_gc() {
|
|
|
|
let settings = stable_op_id_settings();
|
|
|
|
let test_repo = TestRepo::init();
|
|
|
|
let repo_0 = test_repo.repo;
|
|
|
|
let op_store = repo_0.op_store();
|
|
|
|
let op_dir = repo_0.repo_path().join("op_store").join("operations");
|
|
|
|
let view_dir = repo_0.repo_path().join("op_store").join("views");
|
|
|
|
|
|
|
|
// Set up operation graph:
|
|
|
|
//
|
|
|
|
// F
|
|
|
|
// E (empty)
|
|
|
|
// D |
|
|
|
|
// C |
|
|
|
|
// |/
|
|
|
|
// B
|
|
|
|
// A
|
|
|
|
// 0 (initial)
|
|
|
|
let empty_tx = |repo: &Arc<ReadonlyRepo>| repo.start_transaction(&settings);
|
|
|
|
let random_tx = |repo: &Arc<ReadonlyRepo>| {
|
|
|
|
let mut tx = repo.start_transaction(&settings);
|
|
|
|
write_random_commit(tx.mut_repo(), &settings);
|
|
|
|
tx
|
|
|
|
};
|
|
|
|
let repo_a = random_tx(&repo_0).commit("op A");
|
|
|
|
let repo_b = random_tx(&repo_a).commit("op B");
|
|
|
|
let repo_c = random_tx(&repo_b).commit("op C");
|
|
|
|
let repo_d = random_tx(&repo_c).commit("op D");
|
|
|
|
let repo_e = empty_tx(&repo_b).commit("op E");
|
|
|
|
let repo_f = random_tx(&repo_e).commit("op F");
|
|
|
|
|
|
|
|
// Sanity check for the original state
|
|
|
|
let mut expected_op_entries = list_dir(&op_dir);
|
|
|
|
let mut expected_view_entries = list_dir(&view_dir);
|
|
|
|
assert_eq!(expected_op_entries.len(), 7);
|
|
|
|
assert_eq!(expected_view_entries.len(), 6);
|
|
|
|
|
|
|
|
// No heads, but all kept by file modification time
|
|
|
|
op_store.gc(&[], SystemTime::UNIX_EPOCH).unwrap();
|
|
|
|
assert_eq!(list_dir(&op_dir), expected_op_entries);
|
|
|
|
assert_eq!(list_dir(&view_dir), expected_view_entries);
|
|
|
|
|
|
|
|
// All reachable from heads
|
|
|
|
let now = SystemTime::now();
|
|
|
|
let head_ids = [repo_d.op_id().clone(), repo_f.op_id().clone()];
|
|
|
|
op_store.gc(&head_ids, now).unwrap();
|
|
|
|
assert_eq!(list_dir(&op_dir), expected_op_entries);
|
|
|
|
assert_eq!(list_dir(&view_dir), expected_view_entries);
|
|
|
|
|
|
|
|
// E|F are no longer reachable, but E's view is still reachable
|
|
|
|
op_store.gc(slice::from_ref(repo_d.op_id()), now).unwrap();
|
|
|
|
expected_op_entries
|
|
|
|
.retain(|name| *name != repo_e.op_id().hex() && *name != repo_f.op_id().hex());
|
|
|
|
expected_view_entries.retain(|name| *name != repo_f.operation().view_id().hex());
|
|
|
|
assert_eq!(list_dir(&op_dir), expected_op_entries);
|
|
|
|
assert_eq!(list_dir(&view_dir), expected_view_entries);
|
|
|
|
|
|
|
|
// B|C|D are no longer reachable
|
|
|
|
op_store.gc(slice::from_ref(repo_a.op_id()), now).unwrap();
|
|
|
|
expected_op_entries.retain(|name| {
|
|
|
|
*name != repo_b.op_id().hex()
|
|
|
|
&& *name != repo_c.op_id().hex()
|
|
|
|
&& *name != repo_d.op_id().hex()
|
|
|
|
});
|
|
|
|
expected_view_entries.retain(|name| {
|
|
|
|
*name != repo_b.operation().view_id().hex()
|
|
|
|
&& *name != repo_c.operation().view_id().hex()
|
|
|
|
&& *name != repo_d.operation().view_id().hex()
|
|
|
|
});
|
|
|
|
assert_eq!(list_dir(&op_dir), expected_op_entries);
|
|
|
|
assert_eq!(list_dir(&view_dir), expected_view_entries);
|
|
|
|
|
|
|
|
// Sanity check for the last state
|
|
|
|
assert_eq!(expected_op_entries.len(), 2);
|
|
|
|
assert_eq!(expected_view_entries.len(), 2);
|
|
|
|
}
|