2022-11-26 23:57:50 +00:00
|
|
|
// Copyright 2020 The Jujutsu Authors
|
2020-12-12 08:00:42 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2023-07-10 15:17:00 +00:00
|
|
|
#![allow(missing_docs)]
|
|
|
|
|
2023-05-12 13:05:32 +00:00
|
|
|
use std::any::Any;
|
2020-12-12 08:00:42 +00:00
|
|
|
use std::fmt::Debug;
|
|
|
|
use std::fs;
|
|
|
|
use std::fs::File;
|
2023-01-02 16:53:11 +00:00
|
|
|
use std::io::{Read, Write};
|
2022-09-23 21:03:43 +00:00
|
|
|
use std::path::{Path, PathBuf};
|
2024-01-09 08:14:30 +00:00
|
|
|
use std::time::SystemTime;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
backend: make read functions async
The commit backend at Google is cloud-based (and so are the other
backends); it reads and writes commits from/to a server, which stores
them in a database. That makes latency much higher than for disk-based
backends. To reduce the latency, we have a local daemon process that
caches and prefetches objects. There are still many cases where
latency is high, such as when diffing two uncached commits. We can
improve that by changing some of our (jj's) algorithms to read many
objects concurrently from the backend. In the case of tree-diffing, we
can fetch one level (depth) of the tree at a time. There are several
ways of doing that:
* Make the backend methods `async`
* Use many threads for reading from the backend
* Add backend methods for batch reading
I don't think we typically need CPU parallelism, so it's wasteful to
have hundreds of threads running in order to fetch hundreds of objects
in parallel (especially when using a synchronous backend like the Git
backend). Batching would work well for the tree-diffing case, but it's
not as composable as `async`. For example, if we wanted to fetch some
commits at the same time as we were doing a diff, it's hard to see how
to do that with batching. Using async seems like our best bet.
I didn't make the backend interface's write functions async because
writes are already async with the daemon we have at Google. That
daemon will hash the object and immediately return, and then send the
object to the server in the background. I think any cloud-based
solution will need a similar daemon process. However, we may need to
reconsider this if/when jj gets used on a server with a custom backend
that writes directly to a database (i.e. no async daemon in between).
I've tried to measure the performance impact. That's the largest
difference I've been able to measure was on `jj diff
--ignore-working-copy -s --from v5.0 --to v6.0` in the Linux repo,
which increases from 749 ms to 773 ms (3.3%). In most cases I've
tested, there's no measurable difference. I've tried diffing from the
root commit, as well as `jj --ignore-working-copy log --no-graph -r
'::v3.0 & author(torvalds)' -T 'commit_id ++ "\n"'` (to test a
commit-heavy load).
2023-09-06 19:59:17 +00:00
|
|
|
use async_trait::async_trait;
|
2022-02-10 06:01:13 +00:00
|
|
|
use blake2::{Blake2b512, Digest};
|
2024-06-25 16:00:51 +00:00
|
|
|
use futures::stream::BoxStream;
|
2022-12-21 19:14:46 +00:00
|
|
|
use prost::Message;
|
2023-07-06 04:56:41 +00:00
|
|
|
use tempfile::NamedTempFile;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2021-09-12 06:52:38 +00:00
|
|
|
use crate::backend::{
|
2022-09-19 00:33:39 +00:00
|
|
|
make_root_commit, Backend, BackendError, BackendResult, ChangeId, Commit, CommitId, Conflict,
|
2024-06-25 16:00:51 +00:00
|
|
|
ConflictId, ConflictTerm, CopyRecord, FileId, MergedTreeId, MillisSinceEpoch, SecureSig,
|
|
|
|
Signature, SigningFn, SymlinkId, Timestamp, Tree, TreeId, TreeValue,
|
2021-09-12 06:52:38 +00:00
|
|
|
};
|
2022-12-02 18:03:00 +00:00
|
|
|
use crate::content_hash::blake2b_hash;
|
2021-06-14 07:27:42 +00:00
|
|
|
use crate::file_util::persist_content_addressed_temp_file;
|
2024-01-09 08:14:30 +00:00
|
|
|
use crate::index::Index;
|
2023-08-25 05:06:11 +00:00
|
|
|
use crate::merge::MergeBuilder;
|
2024-01-04 07:18:04 +00:00
|
|
|
use crate::object_id::ObjectId;
|
2024-06-25 16:00:51 +00:00
|
|
|
use crate::repo_path::{RepoPath, RepoPathBuf, RepoPathComponentBuf};
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-02-06 18:15:01 +00:00
|
|
|
const COMMIT_ID_LENGTH: usize = 64;
|
|
|
|
const CHANGE_ID_LENGTH: usize = 16;
|
|
|
|
|
2023-01-02 16:53:11 +00:00
|
|
|
fn map_not_found_err(err: std::io::Error, id: &impl ObjectId) -> BackendError {
|
|
|
|
if err.kind() == std::io::ErrorKind::NotFound {
|
|
|
|
BackendError::ObjectNotFound {
|
|
|
|
object_type: id.object_type(),
|
|
|
|
hash: id.hex(),
|
|
|
|
source: Box::new(err),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
BackendError::ReadObject {
|
|
|
|
object_type: id.object_type(),
|
|
|
|
hash: id.hex(),
|
|
|
|
source: Box::new(err),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-06 04:56:41 +00:00
|
|
|
fn to_other_err(err: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> BackendError {
|
|
|
|
BackendError::Other(err.into())
|
|
|
|
}
|
|
|
|
|
2020-12-12 08:00:42 +00:00
|
|
|
#[derive(Debug)]
|
2021-09-12 06:52:38 +00:00
|
|
|
pub struct LocalBackend {
|
2020-12-12 08:00:42 +00:00
|
|
|
path: PathBuf,
|
2022-09-19 00:33:39 +00:00
|
|
|
root_commit_id: CommitId,
|
2023-02-06 18:15:01 +00:00
|
|
|
root_change_id: ChangeId,
|
2020-12-12 08:00:42 +00:00
|
|
|
empty_tree_id: TreeId,
|
|
|
|
}
|
|
|
|
|
2021-09-12 06:52:38 +00:00
|
|
|
impl LocalBackend {
|
2023-10-14 13:09:33 +00:00
|
|
|
pub fn name() -> &'static str {
|
|
|
|
"local"
|
|
|
|
}
|
|
|
|
|
2022-09-23 21:03:43 +00:00
|
|
|
pub fn init(store_path: &Path) -> Self {
|
2020-12-12 08:00:42 +00:00
|
|
|
fs::create_dir(store_path.join("commits")).unwrap();
|
|
|
|
fs::create_dir(store_path.join("trees")).unwrap();
|
|
|
|
fs::create_dir(store_path.join("files")).unwrap();
|
|
|
|
fs::create_dir(store_path.join("symlinks")).unwrap();
|
|
|
|
fs::create_dir(store_path.join("conflicts")).unwrap();
|
2021-09-12 06:52:38 +00:00
|
|
|
let backend = Self::load(store_path);
|
|
|
|
let empty_tree_id = backend
|
2023-11-26 10:21:46 +00:00
|
|
|
.write_tree(RepoPath::root(), &Tree::default())
|
2020-12-12 08:00:42 +00:00
|
|
|
.unwrap();
|
2021-09-12 06:52:38 +00:00
|
|
|
assert_eq!(empty_tree_id, backend.empty_tree_id);
|
|
|
|
backend
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2022-09-23 21:03:43 +00:00
|
|
|
pub fn load(store_path: &Path) -> Self {
|
2023-02-06 18:15:01 +00:00
|
|
|
let root_commit_id = CommitId::from_bytes(&[0; COMMIT_ID_LENGTH]);
|
|
|
|
let root_change_id = ChangeId::from_bytes(&[0; CHANGE_ID_LENGTH]);
|
2022-11-12 19:19:03 +00:00
|
|
|
let empty_tree_id = TreeId::from_hex("482ae5a29fbe856c7272f2071b8b0f0359ee2d89ff392b8a900643fbd0836eccd067b8bf41909e206c90d45d6e7d8b6686b93ecaee5fe1a9060d87b672101310");
|
2021-09-12 06:52:38 +00:00
|
|
|
LocalBackend {
|
2022-09-23 21:03:43 +00:00
|
|
|
path: store_path.to_path_buf(),
|
2022-09-19 00:33:39 +00:00
|
|
|
root_commit_id,
|
2023-02-06 18:15:01 +00:00
|
|
|
root_change_id,
|
2020-12-12 08:00:42 +00:00
|
|
|
empty_tree_id,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn file_path(&self, id: &FileId) -> PathBuf {
|
|
|
|
self.path.join("files").join(id.hex())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn symlink_path(&self, id: &SymlinkId) -> PathBuf {
|
|
|
|
self.path.join("symlinks").join(id.hex())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn tree_path(&self, id: &TreeId) -> PathBuf {
|
|
|
|
self.path.join("trees").join(id.hex())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn commit_path(&self, id: &CommitId) -> PathBuf {
|
|
|
|
self.path.join("commits").join(id.hex())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn conflict_path(&self, id: &ConflictId) -> PathBuf {
|
|
|
|
self.path.join("conflicts").join(id.hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
backend: make read functions async
The commit backend at Google is cloud-based (and so are the other
backends); it reads and writes commits from/to a server, which stores
them in a database. That makes latency much higher than for disk-based
backends. To reduce the latency, we have a local daemon process that
caches and prefetches objects. There are still many cases where
latency is high, such as when diffing two uncached commits. We can
improve that by changing some of our (jj's) algorithms to read many
objects concurrently from the backend. In the case of tree-diffing, we
can fetch one level (depth) of the tree at a time. There are several
ways of doing that:
* Make the backend methods `async`
* Use many threads for reading from the backend
* Add backend methods for batch reading
I don't think we typically need CPU parallelism, so it's wasteful to
have hundreds of threads running in order to fetch hundreds of objects
in parallel (especially when using a synchronous backend like the Git
backend). Batching would work well for the tree-diffing case, but it's
not as composable as `async`. For example, if we wanted to fetch some
commits at the same time as we were doing a diff, it's hard to see how
to do that with batching. Using async seems like our best bet.
I didn't make the backend interface's write functions async because
writes are already async with the daemon we have at Google. That
daemon will hash the object and immediately return, and then send the
object to the server in the background. I think any cloud-based
solution will need a similar daemon process. However, we may need to
reconsider this if/when jj gets used on a server with a custom backend
that writes directly to a database (i.e. no async daemon in between).
I've tried to measure the performance impact. That's the largest
difference I've been able to measure was on `jj diff
--ignore-working-copy -s --from v5.0 --to v6.0` in the Linux repo,
which increases from 749 ms to 773 ms (3.3%). In most cases I've
tested, there's no measurable difference. I've tried diffing from the
root commit, as well as `jj --ignore-working-copy log --no-graph -r
'::v3.0 & author(torvalds)' -T 'commit_id ++ "\n"'` (to test a
commit-heavy load).
2023-09-06 19:59:17 +00:00
|
|
|
#[async_trait]
|
2021-09-12 06:52:38 +00:00
|
|
|
impl Backend for LocalBackend {
|
2023-05-12 13:05:32 +00:00
|
|
|
fn as_any(&self) -> &dyn Any {
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2022-09-23 04:01:38 +00:00
|
|
|
fn name(&self) -> &str {
|
2023-10-14 13:09:33 +00:00
|
|
|
Self::name()
|
2022-09-23 04:01:38 +00:00
|
|
|
}
|
|
|
|
|
2023-02-06 18:05:09 +00:00
|
|
|
fn commit_id_length(&self) -> usize {
|
2023-02-06 18:15:01 +00:00
|
|
|
COMMIT_ID_LENGTH
|
|
|
|
}
|
|
|
|
|
|
|
|
fn change_id_length(&self) -> usize {
|
|
|
|
CHANGE_ID_LENGTH
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-09-19 10:55:51 +00:00
|
|
|
fn root_commit_id(&self) -> &CommitId {
|
|
|
|
&self.root_commit_id
|
|
|
|
}
|
|
|
|
|
|
|
|
fn root_change_id(&self) -> &ChangeId {
|
|
|
|
&self.root_change_id
|
|
|
|
}
|
|
|
|
|
|
|
|
fn empty_tree_id(&self) -> &TreeId {
|
|
|
|
&self.empty_tree_id
|
|
|
|
}
|
|
|
|
|
2023-10-19 18:27:55 +00:00
|
|
|
fn concurrency(&self) -> usize {
|
|
|
|
1
|
|
|
|
}
|
|
|
|
|
backend: make read functions async
The commit backend at Google is cloud-based (and so are the other
backends); it reads and writes commits from/to a server, which stores
them in a database. That makes latency much higher than for disk-based
backends. To reduce the latency, we have a local daemon process that
caches and prefetches objects. There are still many cases where
latency is high, such as when diffing two uncached commits. We can
improve that by changing some of our (jj's) algorithms to read many
objects concurrently from the backend. In the case of tree-diffing, we
can fetch one level (depth) of the tree at a time. There are several
ways of doing that:
* Make the backend methods `async`
* Use many threads for reading from the backend
* Add backend methods for batch reading
I don't think we typically need CPU parallelism, so it's wasteful to
have hundreds of threads running in order to fetch hundreds of objects
in parallel (especially when using a synchronous backend like the Git
backend). Batching would work well for the tree-diffing case, but it's
not as composable as `async`. For example, if we wanted to fetch some
commits at the same time as we were doing a diff, it's hard to see how
to do that with batching. Using async seems like our best bet.
I didn't make the backend interface's write functions async because
writes are already async with the daemon we have at Google. That
daemon will hash the object and immediately return, and then send the
object to the server in the background. I think any cloud-based
solution will need a similar daemon process. However, we may need to
reconsider this if/when jj gets used on a server with a custom backend
that writes directly to a database (i.e. no async daemon in between).
I've tried to measure the performance impact. That's the largest
difference I've been able to measure was on `jj diff
--ignore-working-copy -s --from v5.0 --to v6.0` in the Linux repo,
which increases from 749 ms to 773 ms (3.3%). In most cases I've
tested, there's no measurable difference. I've tried diffing from the
root commit, as well as `jj --ignore-working-copy log --no-graph -r
'::v3.0 & author(torvalds)' -T 'commit_id ++ "\n"'` (to test a
commit-heavy load).
2023-09-06 19:59:17 +00:00
|
|
|
async fn read_file(&self, _path: &RepoPath, id: &FileId) -> BackendResult<Box<dyn Read>> {
|
2021-06-14 07:18:38 +00:00
|
|
|
let path = self.file_path(id);
|
2023-01-02 16:53:11 +00:00
|
|
|
let file = File::open(path).map_err(|err| map_not_found_err(err, id))?;
|
2023-07-06 04:56:41 +00:00
|
|
|
Ok(Box::new(zstd::Decoder::new(file).map_err(to_other_err)?))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2021-09-12 06:52:38 +00:00
|
|
|
fn write_file(&self, _path: &RepoPath, contents: &mut dyn Read) -> BackendResult<FileId> {
|
2023-07-06 04:56:41 +00:00
|
|
|
let temp_file = NamedTempFile::new_in(&self.path).map_err(to_other_err)?;
|
|
|
|
let mut encoder = zstd::Encoder::new(temp_file.as_file(), 0).map_err(to_other_err)?;
|
2022-02-10 06:01:13 +00:00
|
|
|
let mut hasher = Blake2b512::new();
|
2023-08-08 19:41:11 +00:00
|
|
|
let mut buff: Vec<u8> = vec![0; 1 << 14];
|
2020-12-12 08:00:42 +00:00
|
|
|
loop {
|
2023-08-08 19:41:11 +00:00
|
|
|
let bytes_read = contents.read(&mut buff).map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
if bytes_read == 0 {
|
|
|
|
break;
|
|
|
|
}
|
2023-08-08 19:41:11 +00:00
|
|
|
let bytes = &buff[..bytes_read];
|
|
|
|
encoder.write_all(bytes).map_err(to_other_err)?;
|
|
|
|
hasher.update(bytes);
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
2023-07-06 04:56:41 +00:00
|
|
|
encoder.finish().map_err(to_other_err)?;
|
2021-11-10 16:43:17 +00:00
|
|
|
let id = FileId::new(hasher.finalize().to_vec());
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-07-06 04:56:41 +00:00
|
|
|
persist_content_addressed_temp_file(temp_file, self.file_path(&id))
|
|
|
|
.map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
Ok(id)
|
|
|
|
}
|
|
|
|
|
2024-05-05 16:37:34 +00:00
|
|
|
async fn read_symlink(&self, _path: &RepoPath, id: &SymlinkId) -> BackendResult<String> {
|
2021-06-14 07:18:38 +00:00
|
|
|
let path = self.symlink_path(id);
|
2023-08-13 01:01:21 +00:00
|
|
|
let target = fs::read_to_string(path).map_err(|err| map_not_found_err(err, id))?;
|
2020-12-12 08:00:42 +00:00
|
|
|
Ok(target)
|
|
|
|
}
|
|
|
|
|
2024-05-05 16:37:34 +00:00
|
|
|
fn write_symlink(&self, _path: &RepoPath, target: &str) -> BackendResult<SymlinkId> {
|
2023-07-06 04:56:41 +00:00
|
|
|
let mut temp_file = NamedTempFile::new_in(&self.path).map_err(to_other_err)?;
|
|
|
|
temp_file
|
|
|
|
.write_all(target.as_bytes())
|
|
|
|
.map_err(to_other_err)?;
|
2022-02-10 06:01:13 +00:00
|
|
|
let mut hasher = Blake2b512::new();
|
2022-10-09 17:08:12 +00:00
|
|
|
hasher.update(target.as_bytes());
|
2021-11-10 16:43:17 +00:00
|
|
|
let id = SymlinkId::new(hasher.finalize().to_vec());
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-07-06 04:56:41 +00:00
|
|
|
persist_content_addressed_temp_file(temp_file, self.symlink_path(&id))
|
|
|
|
.map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
Ok(id)
|
|
|
|
}
|
|
|
|
|
backend: make read functions async
The commit backend at Google is cloud-based (and so are the other
backends); it reads and writes commits from/to a server, which stores
them in a database. That makes latency much higher than for disk-based
backends. To reduce the latency, we have a local daemon process that
caches and prefetches objects. There are still many cases where
latency is high, such as when diffing two uncached commits. We can
improve that by changing some of our (jj's) algorithms to read many
objects concurrently from the backend. In the case of tree-diffing, we
can fetch one level (depth) of the tree at a time. There are several
ways of doing that:
* Make the backend methods `async`
* Use many threads for reading from the backend
* Add backend methods for batch reading
I don't think we typically need CPU parallelism, so it's wasteful to
have hundreds of threads running in order to fetch hundreds of objects
in parallel (especially when using a synchronous backend like the Git
backend). Batching would work well for the tree-diffing case, but it's
not as composable as `async`. For example, if we wanted to fetch some
commits at the same time as we were doing a diff, it's hard to see how
to do that with batching. Using async seems like our best bet.
I didn't make the backend interface's write functions async because
writes are already async with the daemon we have at Google. That
daemon will hash the object and immediately return, and then send the
object to the server in the background. I think any cloud-based
solution will need a similar daemon process. However, we may need to
reconsider this if/when jj gets used on a server with a custom backend
that writes directly to a database (i.e. no async daemon in between).
I've tried to measure the performance impact. That's the largest
difference I've been able to measure was on `jj diff
--ignore-working-copy -s --from v5.0 --to v6.0` in the Linux repo,
which increases from 749 ms to 773 ms (3.3%). In most cases I've
tested, there's no measurable difference. I've tried diffing from the
root commit, as well as `jj --ignore-working-copy log --no-graph -r
'::v3.0 & author(torvalds)' -T 'commit_id ++ "\n"'` (to test a
commit-heavy load).
2023-09-06 19:59:17 +00:00
|
|
|
async fn read_tree(&self, _path: &RepoPath, id: &TreeId) -> BackendResult<Tree> {
|
2021-06-14 07:18:38 +00:00
|
|
|
let path = self.tree_path(id);
|
2023-01-02 16:53:11 +00:00
|
|
|
let buf = fs::read(path).map_err(|err| map_not_found_err(err, id))?;
|
2022-12-02 16:27:28 +00:00
|
|
|
|
2023-07-06 04:56:41 +00:00
|
|
|
let proto = crate::protos::local_store::Tree::decode(&*buf).map_err(to_other_err)?;
|
2022-12-21 19:14:46 +00:00
|
|
|
Ok(tree_from_proto(proto))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2021-09-12 06:52:38 +00:00
|
|
|
fn write_tree(&self, _path: &RepoPath, tree: &Tree) -> BackendResult<TreeId> {
|
2023-07-06 04:56:41 +00:00
|
|
|
let temp_file = NamedTempFile::new_in(&self.path).map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2022-12-02 16:27:28 +00:00
|
|
|
let proto = tree_to_proto(tree);
|
2023-07-06 04:56:41 +00:00
|
|
|
temp_file
|
|
|
|
.as_file()
|
|
|
|
.write_all(&proto.encode_to_vec())
|
|
|
|
.map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2022-12-02 18:03:00 +00:00
|
|
|
let id = TreeId::new(blake2b_hash(tree).to_vec());
|
2022-12-02 16:27:28 +00:00
|
|
|
|
2023-07-06 04:56:41 +00:00
|
|
|
persist_content_addressed_temp_file(temp_file, self.tree_path(&id))
|
|
|
|
.map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
Ok(id)
|
|
|
|
}
|
|
|
|
|
2023-10-27 05:54:09 +00:00
|
|
|
fn read_conflict(&self, _path: &RepoPath, id: &ConflictId) -> BackendResult<Conflict> {
|
2022-04-28 20:31:28 +00:00
|
|
|
let path = self.conflict_path(id);
|
2023-01-02 16:53:11 +00:00
|
|
|
let buf = fs::read(path).map_err(|err| map_not_found_err(err, id))?;
|
2022-12-02 16:27:28 +00:00
|
|
|
|
2023-07-06 04:56:41 +00:00
|
|
|
let proto = crate::protos::local_store::Conflict::decode(&*buf).map_err(to_other_err)?;
|
2022-12-21 19:14:46 +00:00
|
|
|
Ok(conflict_from_proto(proto))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2022-04-28 20:31:28 +00:00
|
|
|
fn write_conflict(&self, _path: &RepoPath, conflict: &Conflict) -> BackendResult<ConflictId> {
|
2023-07-06 04:56:41 +00:00
|
|
|
let temp_file = NamedTempFile::new_in(&self.path).map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2022-12-02 16:27:28 +00:00
|
|
|
let proto = conflict_to_proto(conflict);
|
2023-07-06 04:56:41 +00:00
|
|
|
temp_file
|
|
|
|
.as_file()
|
|
|
|
.write_all(&proto.encode_to_vec())
|
|
|
|
.map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2022-12-02 18:03:00 +00:00
|
|
|
let id = ConflictId::new(blake2b_hash(conflict).to_vec());
|
2022-12-02 16:27:28 +00:00
|
|
|
|
2023-07-06 04:56:41 +00:00
|
|
|
persist_content_addressed_temp_file(temp_file, self.conflict_path(&id))
|
|
|
|
.map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
Ok(id)
|
|
|
|
}
|
|
|
|
|
backend: make read functions async
The commit backend at Google is cloud-based (and so are the other
backends); it reads and writes commits from/to a server, which stores
them in a database. That makes latency much higher than for disk-based
backends. To reduce the latency, we have a local daemon process that
caches and prefetches objects. There are still many cases where
latency is high, such as when diffing two uncached commits. We can
improve that by changing some of our (jj's) algorithms to read many
objects concurrently from the backend. In the case of tree-diffing, we
can fetch one level (depth) of the tree at a time. There are several
ways of doing that:
* Make the backend methods `async`
* Use many threads for reading from the backend
* Add backend methods for batch reading
I don't think we typically need CPU parallelism, so it's wasteful to
have hundreds of threads running in order to fetch hundreds of objects
in parallel (especially when using a synchronous backend like the Git
backend). Batching would work well for the tree-diffing case, but it's
not as composable as `async`. For example, if we wanted to fetch some
commits at the same time as we were doing a diff, it's hard to see how
to do that with batching. Using async seems like our best bet.
I didn't make the backend interface's write functions async because
writes are already async with the daemon we have at Google. That
daemon will hash the object and immediately return, and then send the
object to the server in the background. I think any cloud-based
solution will need a similar daemon process. However, we may need to
reconsider this if/when jj gets used on a server with a custom backend
that writes directly to a database (i.e. no async daemon in between).
I've tried to measure the performance impact. That's the largest
difference I've been able to measure was on `jj diff
--ignore-working-copy -s --from v5.0 --to v6.0` in the Linux repo,
which increases from 749 ms to 773 ms (3.3%). In most cases I've
tested, there's no measurable difference. I've tried diffing from the
root commit, as well as `jj --ignore-working-copy log --no-graph -r
'::v3.0 & author(torvalds)' -T 'commit_id ++ "\n"'` (to test a
commit-heavy load).
2023-09-06 19:59:17 +00:00
|
|
|
async fn read_commit(&self, id: &CommitId) -> BackendResult<Commit> {
|
2022-09-19 00:33:39 +00:00
|
|
|
if *id == self.root_commit_id {
|
2023-02-06 18:15:01 +00:00
|
|
|
return Ok(make_root_commit(
|
|
|
|
self.root_change_id().clone(),
|
|
|
|
self.empty_tree_id.clone(),
|
|
|
|
));
|
2022-09-19 00:33:39 +00:00
|
|
|
}
|
|
|
|
|
2022-04-28 20:31:28 +00:00
|
|
|
let path = self.commit_path(id);
|
2023-01-02 16:53:11 +00:00
|
|
|
let buf = fs::read(path).map_err(|err| map_not_found_err(err, id))?;
|
2022-12-02 16:27:28 +00:00
|
|
|
|
2023-07-06 04:56:41 +00:00
|
|
|
let proto = crate::protos::local_store::Commit::decode(&*buf).map_err(to_other_err)?;
|
2022-12-21 19:14:46 +00:00
|
|
|
Ok(commit_from_proto(proto))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-11-12 01:40:23 +00:00
|
|
|
fn write_commit(
|
|
|
|
&self,
|
|
|
|
mut commit: Commit,
|
2023-11-28 06:34:02 +00:00
|
|
|
sign_with: Option<&mut SigningFn>,
|
2023-11-12 01:40:23 +00:00
|
|
|
) -> BackendResult<(CommitId, Commit)> {
|
|
|
|
assert!(commit.secure_sig.is_none(), "commit.secure_sig was set");
|
|
|
|
|
2023-09-10 18:18:33 +00:00
|
|
|
if commit.parents.is_empty() {
|
|
|
|
return Err(BackendError::Other(
|
|
|
|
"Cannot write a commit with no parents".into(),
|
|
|
|
));
|
|
|
|
}
|
2023-07-06 04:56:41 +00:00
|
|
|
let temp_file = NamedTempFile::new_in(&self.path).map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-11-12 01:40:23 +00:00
|
|
|
let mut proto = commit_to_proto(&commit);
|
2023-11-28 06:34:02 +00:00
|
|
|
if let Some(sign) = sign_with {
|
2023-11-12 01:40:23 +00:00
|
|
|
let data = proto.encode_to_vec();
|
2023-11-24 21:08:16 +00:00
|
|
|
let sig = sign(&data).map_err(to_other_err)?;
|
2023-11-12 01:40:23 +00:00
|
|
|
proto.secure_sig = Some(sig.clone());
|
|
|
|
commit.secure_sig = Some(SecureSig { data, sig });
|
|
|
|
}
|
|
|
|
|
2023-07-06 04:56:41 +00:00
|
|
|
temp_file
|
|
|
|
.as_file()
|
|
|
|
.write_all(&proto.encode_to_vec())
|
|
|
|
.map_err(to_other_err)?;
|
2020-12-12 08:00:42 +00:00
|
|
|
|
2023-05-11 22:40:24 +00:00
|
|
|
let id = CommitId::new(blake2b_hash(&commit).to_vec());
|
2022-12-02 16:27:28 +00:00
|
|
|
|
2023-07-06 04:56:41 +00:00
|
|
|
persist_content_addressed_temp_file(temp_file, self.commit_path(&id))
|
|
|
|
.map_err(to_other_err)?;
|
2023-05-11 22:40:24 +00:00
|
|
|
Ok((id, commit))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
2023-12-01 22:00:22 +00:00
|
|
|
|
2024-06-25 16:00:51 +00:00
|
|
|
fn get_copy_records(
|
|
|
|
&self,
|
2024-07-15 20:55:44 +00:00
|
|
|
_paths: Option<&[RepoPathBuf]>,
|
2024-07-15 20:55:44 +00:00
|
|
|
_root: &CommitId,
|
|
|
|
_head: &CommitId,
|
2024-06-25 16:00:51 +00:00
|
|
|
) -> BackendResult<BoxStream<BackendResult<CopyRecord>>> {
|
|
|
|
Err(BackendError::Unsupported("get_copy_records".into()))
|
|
|
|
}
|
|
|
|
|
2024-01-09 08:14:30 +00:00
|
|
|
fn gc(&self, _index: &dyn Index, _keep_newer: SystemTime) -> BackendResult<()> {
|
2023-12-01 22:00:22 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2024-04-04 08:39:13 +00:00
|
|
|
#[allow(unknown_lints)] // XXX FIXME (aseipp): nightly bogons; re-test this occasionally
|
|
|
|
#[allow(clippy::assigning_clones)]
|
2023-06-22 10:44:56 +00:00
|
|
|
pub fn commit_to_proto(commit: &Commit) -> crate::protos::local_store::Commit {
|
|
|
|
let mut proto = crate::protos::local_store::Commit::default();
|
2020-12-12 08:00:42 +00:00
|
|
|
for parent in &commit.parents {
|
2022-12-02 16:27:28 +00:00
|
|
|
proto.parents.push(parent.to_bytes());
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
for predecessor in &commit.predecessors {
|
2022-12-02 16:27:28 +00:00
|
|
|
proto.predecessors.push(predecessor.to_bytes());
|
|
|
|
}
|
2023-08-24 23:59:07 +00:00
|
|
|
match &commit.root_tree {
|
|
|
|
MergedTreeId::Legacy(tree_id) => {
|
2023-08-25 05:06:11 +00:00
|
|
|
proto.root_tree = vec![tree_id.to_bytes()];
|
2023-08-24 23:59:07 +00:00
|
|
|
}
|
|
|
|
MergedTreeId::Merge(tree_ids) => {
|
|
|
|
proto.uses_tree_conflict_format = true;
|
2023-08-25 05:06:11 +00:00
|
|
|
proto.root_tree = tree_ids.iter().map(|id| id.to_bytes()).collect();
|
2023-08-24 23:59:07 +00:00
|
|
|
}
|
|
|
|
}
|
2022-12-02 16:27:28 +00:00
|
|
|
proto.change_id = commit.change_id.to_bytes();
|
|
|
|
proto.description = commit.description.clone();
|
2022-12-21 19:14:46 +00:00
|
|
|
proto.author = Some(signature_to_proto(&commit.author));
|
|
|
|
proto.committer = Some(signature_to_proto(&commit.committer));
|
2022-12-02 16:27:28 +00:00
|
|
|
proto
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-11-09 01:10:39 +00:00
|
|
|
fn commit_from_proto(mut proto: crate::protos::local_store::Commit) -> Commit {
|
|
|
|
// Note how .take() sets the secure_sig field to None before we encode the data.
|
|
|
|
// Needs to be done first since proto is partially moved a bunch below
|
|
|
|
let secure_sig = proto.secure_sig.take().map(|sig| SecureSig {
|
|
|
|
data: proto.encode_to_vec(),
|
|
|
|
sig,
|
|
|
|
});
|
|
|
|
|
2022-12-21 19:14:46 +00:00
|
|
|
let parents = proto.parents.into_iter().map(CommitId::new).collect();
|
|
|
|
let predecessors = proto.predecessors.into_iter().map(CommitId::new).collect();
|
2023-08-24 23:59:07 +00:00
|
|
|
let root_tree = if proto.uses_tree_conflict_format {
|
2023-08-25 05:06:11 +00:00
|
|
|
let merge_builder: MergeBuilder<_> = proto.root_tree.into_iter().map(TreeId::new).collect();
|
|
|
|
MergedTreeId::Merge(merge_builder.build())
|
2023-08-24 23:59:07 +00:00
|
|
|
} else {
|
2023-08-25 05:06:11 +00:00
|
|
|
assert_eq!(proto.root_tree.len(), 1);
|
|
|
|
MergedTreeId::Legacy(TreeId::new(proto.root_tree[0].to_vec()))
|
2023-08-24 23:59:07 +00:00
|
|
|
};
|
2022-12-21 19:14:46 +00:00
|
|
|
let change_id = ChangeId::new(proto.change_id);
|
2020-12-12 08:00:42 +00:00
|
|
|
Commit {
|
|
|
|
parents,
|
|
|
|
predecessors,
|
|
|
|
root_tree,
|
|
|
|
change_id,
|
2022-12-21 19:14:46 +00:00
|
|
|
description: proto.description,
|
|
|
|
author: signature_from_proto(proto.author.unwrap_or_default()),
|
|
|
|
committer: signature_from_proto(proto.committer.unwrap_or_default()),
|
2023-11-09 01:10:39 +00:00
|
|
|
secure_sig,
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-22 10:44:56 +00:00
|
|
|
fn tree_to_proto(tree: &Tree) -> crate::protos::local_store::Tree {
|
|
|
|
let mut proto = crate::protos::local_store::Tree::default();
|
2020-12-12 08:00:42 +00:00
|
|
|
for entry in tree.entries() {
|
2023-06-22 10:44:56 +00:00
|
|
|
proto.entries.push(crate::protos::local_store::tree::Entry {
|
2023-11-25 07:58:33 +00:00
|
|
|
name: entry.name().as_str().to_owned(),
|
2022-12-21 19:14:46 +00:00
|
|
|
value: Some(tree_value_to_proto(entry.value())),
|
|
|
|
});
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
2022-12-02 16:27:28 +00:00
|
|
|
proto
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-06-22 10:44:56 +00:00
|
|
|
fn tree_from_proto(proto: crate::protos::local_store::Tree) -> Tree {
|
2020-12-12 08:00:42 +00:00
|
|
|
let mut tree = Tree::default();
|
2022-12-21 19:14:46 +00:00
|
|
|
for proto_entry in proto.entries {
|
|
|
|
let value = tree_value_from_proto(proto_entry.value.unwrap());
|
2023-11-25 08:46:17 +00:00
|
|
|
tree.set(RepoPathComponentBuf::from(proto_entry.name), value);
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
tree
|
|
|
|
}
|
|
|
|
|
2023-06-22 10:44:56 +00:00
|
|
|
fn tree_value_to_proto(value: &TreeValue) -> crate::protos::local_store::TreeValue {
|
|
|
|
let mut proto = crate::protos::local_store::TreeValue::default();
|
2020-12-12 08:00:42 +00:00
|
|
|
match value {
|
2022-11-14 21:27:18 +00:00
|
|
|
TreeValue::File { id, executable } => {
|
2023-06-22 10:44:56 +00:00
|
|
|
proto.value = Some(crate::protos::local_store::tree_value::Value::File(
|
|
|
|
crate::protos::local_store::tree_value::File {
|
2022-12-21 19:14:46 +00:00
|
|
|
id: id.to_bytes(),
|
|
|
|
executable: *executable,
|
|
|
|
},
|
|
|
|
));
|
2022-12-02 16:27:28 +00:00
|
|
|
}
|
|
|
|
TreeValue::Symlink(id) => {
|
2023-06-22 10:44:56 +00:00
|
|
|
proto.value = Some(crate::protos::local_store::tree_value::Value::SymlinkId(
|
2022-12-21 19:14:46 +00:00
|
|
|
id.to_bytes(),
|
|
|
|
));
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
TreeValue::GitSubmodule(_id) => {
|
|
|
|
panic!("cannot store git submodules");
|
|
|
|
}
|
2022-12-02 16:27:28 +00:00
|
|
|
TreeValue::Tree(id) => {
|
2023-06-22 10:44:56 +00:00
|
|
|
proto.value = Some(crate::protos::local_store::tree_value::Value::TreeId(
|
2022-12-21 19:14:46 +00:00
|
|
|
id.to_bytes(),
|
|
|
|
));
|
2022-12-02 16:27:28 +00:00
|
|
|
}
|
|
|
|
TreeValue::Conflict(id) => {
|
2023-06-22 10:44:56 +00:00
|
|
|
proto.value = Some(crate::protos::local_store::tree_value::Value::ConflictId(
|
2022-12-21 19:14:46 +00:00
|
|
|
id.to_bytes(),
|
|
|
|
));
|
2022-12-02 16:27:28 +00:00
|
|
|
}
|
2022-11-04 05:23:24 +00:00
|
|
|
}
|
2022-12-02 16:27:28 +00:00
|
|
|
proto
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-06-22 10:44:56 +00:00
|
|
|
fn tree_value_from_proto(proto: crate::protos::local_store::TreeValue) -> TreeValue {
|
2022-12-21 19:14:46 +00:00
|
|
|
match proto.value.unwrap() {
|
2023-06-22 10:44:56 +00:00
|
|
|
crate::protos::local_store::tree_value::Value::TreeId(id) => {
|
|
|
|
TreeValue::Tree(TreeId::new(id))
|
|
|
|
}
|
|
|
|
crate::protos::local_store::tree_value::Value::File(
|
|
|
|
crate::protos::local_store::tree_value::File { id, executable, .. },
|
|
|
|
) => TreeValue::File {
|
2022-12-21 19:14:46 +00:00
|
|
|
id: FileId::new(id),
|
|
|
|
executable,
|
2020-12-12 08:00:42 +00:00
|
|
|
},
|
2023-06-22 10:44:56 +00:00
|
|
|
crate::protos::local_store::tree_value::Value::SymlinkId(id) => {
|
2022-12-21 19:14:46 +00:00
|
|
|
TreeValue::Symlink(SymlinkId::new(id))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
2023-06-22 10:44:56 +00:00
|
|
|
crate::protos::local_store::tree_value::Value::ConflictId(id) => {
|
2022-12-21 19:14:46 +00:00
|
|
|
TreeValue::Conflict(ConflictId::new(id))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-22 10:44:56 +00:00
|
|
|
fn signature_to_proto(signature: &Signature) -> crate::protos::local_store::commit::Signature {
|
|
|
|
crate::protos::local_store::commit::Signature {
|
2022-12-21 19:14:46 +00:00
|
|
|
name: signature.name.clone(),
|
|
|
|
email: signature.email.clone(),
|
2023-06-22 10:44:56 +00:00
|
|
|
timestamp: Some(crate::protos::local_store::commit::Timestamp {
|
2022-12-21 19:14:46 +00:00
|
|
|
millis_since_epoch: signature.timestamp.timestamp.0,
|
|
|
|
tz_offset: signature.timestamp.tz_offset,
|
|
|
|
}),
|
|
|
|
}
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-06-22 10:44:56 +00:00
|
|
|
fn signature_from_proto(proto: crate::protos::local_store::commit::Signature) -> Signature {
|
2022-12-21 19:14:46 +00:00
|
|
|
let timestamp = proto.timestamp.unwrap_or_default();
|
2020-12-12 08:00:42 +00:00
|
|
|
Signature {
|
2022-12-21 19:14:46 +00:00
|
|
|
name: proto.name,
|
|
|
|
email: proto.email,
|
2020-12-12 08:00:42 +00:00
|
|
|
timestamp: Timestamp {
|
|
|
|
timestamp: MillisSinceEpoch(timestamp.millis_since_epoch),
|
|
|
|
tz_offset: timestamp.tz_offset,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-22 10:44:56 +00:00
|
|
|
fn conflict_to_proto(conflict: &Conflict) -> crate::protos::local_store::Conflict {
|
|
|
|
let mut proto = crate::protos::local_store::Conflict::default();
|
2023-02-17 22:34:41 +00:00
|
|
|
for term in &conflict.removes {
|
|
|
|
proto.removes.push(conflict_term_to_proto(term));
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
2023-05-22 21:04:09 +00:00
|
|
|
for term in &conflict.adds {
|
|
|
|
proto.adds.push(conflict_term_to_proto(term));
|
|
|
|
}
|
2022-12-02 16:27:28 +00:00
|
|
|
proto
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
|
2023-06-22 10:44:56 +00:00
|
|
|
fn conflict_from_proto(proto: crate::protos::local_store::Conflict) -> Conflict {
|
2020-12-12 08:00:42 +00:00
|
|
|
let mut conflict = Conflict::default();
|
2023-02-17 22:34:41 +00:00
|
|
|
for term in proto.removes {
|
|
|
|
conflict.removes.push(conflict_term_from_proto(term))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
2023-02-17 22:34:41 +00:00
|
|
|
for term in proto.adds {
|
|
|
|
conflict.adds.push(conflict_term_from_proto(term))
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
conflict
|
|
|
|
}
|
|
|
|
|
2023-06-22 10:44:56 +00:00
|
|
|
fn conflict_term_from_proto(proto: crate::protos::local_store::conflict::Term) -> ConflictTerm {
|
2023-02-17 22:34:41 +00:00
|
|
|
ConflictTerm {
|
2022-12-21 19:14:46 +00:00
|
|
|
value: tree_value_from_proto(proto.content.unwrap()),
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-22 10:44:56 +00:00
|
|
|
fn conflict_term_to_proto(part: &ConflictTerm) -> crate::protos::local_store::conflict::Term {
|
|
|
|
crate::protos::local_store::conflict::Term {
|
2022-12-21 19:14:46 +00:00
|
|
|
content: Some(tree_value_to_proto(&part.value)),
|
|
|
|
}
|
2020-12-12 08:00:42 +00:00
|
|
|
}
|
2023-09-10 18:18:33 +00:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use assert_matches::assert_matches;
|
2023-10-28 23:15:01 +00:00
|
|
|
use pollster::FutureExt;
|
2023-09-10 18:18:33 +00:00
|
|
|
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
/// Test that parents get written correctly
|
|
|
|
#[test]
|
|
|
|
fn write_commit_parents() {
|
|
|
|
let temp_dir = testutils::new_temp_dir();
|
|
|
|
let store_path = temp_dir.path();
|
|
|
|
|
|
|
|
let backend = LocalBackend::init(store_path);
|
|
|
|
let mut commit = Commit {
|
|
|
|
parents: vec![],
|
|
|
|
predecessors: vec![],
|
|
|
|
root_tree: MergedTreeId::resolved(backend.empty_tree_id().clone()),
|
|
|
|
change_id: ChangeId::from_hex("abc123"),
|
|
|
|
description: "".to_string(),
|
|
|
|
author: create_signature(),
|
|
|
|
committer: create_signature(),
|
2023-11-09 01:10:39 +00:00
|
|
|
secure_sig: None,
|
2023-09-10 18:18:33 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// No parents
|
|
|
|
commit.parents = vec![];
|
|
|
|
assert_matches!(
|
2023-11-12 01:40:23 +00:00
|
|
|
backend.write_commit(commit.clone(), None),
|
2023-09-10 18:18:33 +00:00
|
|
|
Err(BackendError::Other(err)) if err.to_string().contains("no parents")
|
|
|
|
);
|
|
|
|
|
|
|
|
// Only root commit as parent
|
|
|
|
commit.parents = vec![backend.root_commit_id().clone()];
|
2023-11-12 01:40:23 +00:00
|
|
|
let first_id = backend.write_commit(commit.clone(), None).unwrap().0;
|
2023-10-28 23:15:01 +00:00
|
|
|
let first_commit = backend.read_commit(&first_id).block_on().unwrap();
|
2023-09-10 18:18:33 +00:00
|
|
|
assert_eq!(first_commit, commit);
|
|
|
|
|
|
|
|
// Only non-root commit as parent
|
|
|
|
commit.parents = vec![first_id.clone()];
|
2023-11-12 01:40:23 +00:00
|
|
|
let second_id = backend.write_commit(commit.clone(), None).unwrap().0;
|
2023-10-28 23:15:01 +00:00
|
|
|
let second_commit = backend.read_commit(&second_id).block_on().unwrap();
|
2023-09-10 18:18:33 +00:00
|
|
|
assert_eq!(second_commit, commit);
|
|
|
|
|
|
|
|
// Merge commit
|
|
|
|
commit.parents = vec![first_id.clone(), second_id.clone()];
|
2023-11-12 01:40:23 +00:00
|
|
|
let merge_id = backend.write_commit(commit.clone(), None).unwrap().0;
|
2023-10-28 23:15:01 +00:00
|
|
|
let merge_commit = backend.read_commit(&merge_id).block_on().unwrap();
|
2023-09-10 18:18:33 +00:00
|
|
|
assert_eq!(merge_commit, commit);
|
|
|
|
|
|
|
|
// Merge commit with root as one parent
|
|
|
|
commit.parents = vec![first_id, backend.root_commit_id().clone()];
|
2023-11-12 01:40:23 +00:00
|
|
|
let root_merge_id = backend.write_commit(commit.clone(), None).unwrap().0;
|
2023-10-28 23:15:01 +00:00
|
|
|
let root_merge_commit = backend.read_commit(&root_merge_id).block_on().unwrap();
|
2023-09-10 18:18:33 +00:00
|
|
|
assert_eq!(root_merge_commit, commit);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn create_signature() -> Signature {
|
|
|
|
Signature {
|
|
|
|
name: "Someone".to_string(),
|
|
|
|
email: "someone@example.com".to_string(),
|
|
|
|
timestamp: Timestamp {
|
|
|
|
timestamp: MillisSinceEpoch(0),
|
|
|
|
tz_offset: 0,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|