Support adding worktrees to project while host is offline

This commit is contained in:
Antonio Scandurra 2022-12-20 17:42:08 +01:00
parent 52babc51a0
commit d31fd9bbf2
3 changed files with 96 additions and 59 deletions

View file

@ -1421,6 +1421,9 @@ impl Database {
.exec(&*tx)
.await?;
self.update_project_worktrees(project_id, &reshared_project.worktrees, &tx)
.await?;
reshared_projects.push(ResharedProject {
id: project_id,
old_connection_id,
@ -1970,35 +1973,7 @@ impl Database {
.await?
.ok_or_else(|| anyhow!("no such project"))?;
if !worktrees.is_empty() {
worktree::Entity::insert_many(worktrees.iter().map(|worktree| {
worktree::ActiveModel {
id: ActiveValue::set(worktree.id as i64),
project_id: ActiveValue::set(project.id),
abs_path: ActiveValue::set(worktree.abs_path.clone()),
root_name: ActiveValue::set(worktree.root_name.clone()),
visible: ActiveValue::set(worktree.visible),
scan_id: ActiveValue::set(0),
is_complete: ActiveValue::set(false),
}
}))
.on_conflict(
OnConflict::columns([worktree::Column::ProjectId, worktree::Column::Id])
.update_column(worktree::Column::RootName)
.to_owned(),
)
.exec(&*tx)
.await?;
}
worktree::Entity::delete_many()
.filter(
worktree::Column::ProjectId.eq(project.id).and(
worktree::Column::Id
.is_not_in(worktrees.iter().map(|worktree| worktree.id as i64)),
),
)
.exec(&*tx)
self.update_project_worktrees(project.id, worktrees, &tx)
.await?;
let guest_connection_ids = self.project_guest_connection_ids(project.id, &tx).await?;
@ -2008,6 +1983,41 @@ impl Database {
.await
}
async fn update_project_worktrees(
&self,
project_id: ProjectId,
worktrees: &[proto::WorktreeMetadata],
tx: &DatabaseTransaction,
) -> Result<()> {
if !worktrees.is_empty() {
worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel {
id: ActiveValue::set(worktree.id as i64),
project_id: ActiveValue::set(project_id),
abs_path: ActiveValue::set(worktree.abs_path.clone()),
root_name: ActiveValue::set(worktree.root_name.clone()),
visible: ActiveValue::set(worktree.visible),
scan_id: ActiveValue::set(0),
is_complete: ActiveValue::set(false),
}))
.on_conflict(
OnConflict::columns([worktree::Column::ProjectId, worktree::Column::Id])
.update_column(worktree::Column::RootName)
.to_owned(),
)
.exec(&*tx)
.await?;
}
worktree::Entity::delete_many()
.filter(worktree::Column::ProjectId.eq(project_id).and(
worktree::Column::Id.is_not_in(worktrees.iter().map(|worktree| worktree.id as i64)),
))
.exec(&*tx)
.await?;
Ok(())
}
pub async fn update_worktree(
&self,
update: &proto::UpdateWorktree,

View file

@ -1336,7 +1336,9 @@ async fn test_host_reconnect(
}
},
"dir2": {
"x.txt": "x-contents",
"x": "x-contents",
"y": "y-contents",
"z": "z-contents",
},
}),
)
@ -1344,7 +1346,8 @@ async fn test_host_reconnect(
let active_call_a = cx_a.read(ActiveCall::global);
let (project_a, _) = client_a.build_local_project("/root/dir1", cx_a).await;
let worktree_a = project_a.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap());
let worktree_a1 =
project_a.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap());
let project_id = active_call_a
.update(cx_a, |call, cx| call.share_project(project_a.clone(), cx))
.await
@ -1353,7 +1356,7 @@ async fn test_host_reconnect(
let project_b = client_b.build_remote_project(project_id, cx_b).await;
deterministic.run_until_parked();
let worktree_id = worktree_a.read_with(cx_a, |worktree, _| {
let worktree1_id = worktree_a1.read_with(cx_a, |worktree, _| {
assert!(worktree.as_local().unwrap().is_shared());
worktree.id()
});
@ -1370,11 +1373,11 @@ async fn test_host_reconnect(
assert!(!project.is_read_only());
assert_eq!(project.collaborators().len(), 1);
});
worktree_a.read_with(cx_a, |tree, _| {
worktree_a1.read_with(cx_a, |tree, _| {
assert!(tree.as_local().unwrap().is_shared())
});
// While disconnected, add and remove files from the client A's project.
// While disconnected, add/remove files and worktrees from client A's project.
client_a
.fs
.insert_tree(
@ -1398,6 +1401,20 @@ async fn test_host_reconnect(
)
.await
.unwrap();
let (worktree_a2, _) = project_a
.update(cx_a, |p, cx| {
p.find_or_create_local_worktree("/root/dir2", true, cx)
})
.await
.unwrap();
worktree_a2
.read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
.await;
let worktree2_id = worktree_a2.read_with(cx_a, |tree, _| {
assert!(tree.as_local().unwrap().is_shared());
tree.id()
});
deterministic.run_until_parked();
// Client A reconnects. Their project is re-shared, and client B re-joins it.
server.allow_connections();
@ -1409,7 +1426,7 @@ async fn test_host_reconnect(
project_a.read_with(cx_a, |project, cx| {
assert!(project.is_shared());
assert_eq!(
worktree_a
worktree_a1
.read(cx)
.snapshot()
.paths()
@ -1429,12 +1446,22 @@ async fn test_host_reconnect(
"subdir2/i.txt"
]
);
assert_eq!(
worktree_a2
.read(cx)
.snapshot()
.paths()
.map(|p| p.to_str().unwrap())
.collect::<Vec<_>>(),
vec!["x", "y", "z"]
);
});
project_b.read_with(cx_b, |project, cx| {
assert!(!project.is_read_only());
let worktree_b = project.worktree_for_id(worktree_id, cx).unwrap();
assert_eq!(
worktree_b
project
.worktree_for_id(worktree1_id, cx)
.unwrap()
.read(cx)
.snapshot()
.paths()
@ -1454,6 +1481,17 @@ async fn test_host_reconnect(
"subdir2/i.txt"
]
);
assert_eq!(
project
.worktree_for_id(worktree2_id, cx)
.unwrap()
.read(cx)
.snapshot()
.paths()
.map(|p| p.to_str().unwrap())
.collect::<Vec<_>>(),
vec!["x", "y", "z"]
);
});
}

View file

@ -1028,28 +1028,16 @@ impl Project {
cx.emit(Event::RemoteIdChanged(Some(project_id)));
cx.notify();
let mut status = self.client.status();
let (metadata_changed_tx, mut metadata_changed_rx) = mpsc::unbounded();
self.client_state = Some(ProjectClientState::Local {
remote_id: project_id,
metadata_changed: metadata_changed_tx,
_maintain_metadata: cx.spawn_weak(move |this, cx| async move {
let mut txs = Vec::new();
loop {
select_biased! {
tx = metadata_changed_rx.next().fuse() => {
let Some(tx) = tx else { break };
txs.push(tx);
while let Ok(Some(next_tx)) = metadata_changed_rx.try_next() {
txs.push(next_tx);
}
}
status = status.next().fuse() => {
let Some(status) = status else { break };
if !status.is_connected() {
continue
}
}
while let Some(tx) = metadata_changed_rx.next().await {
txs.push(tx);
while let Ok(Some(next_tx)) = metadata_changed_rx.try_next() {
txs.push(next_tx);
}
let Some(this) = this.upgrade(&cx) else { break };
@ -4284,12 +4272,13 @@ impl Project {
if let Some(project_id) =
project.read_with(&cx, |project, _| project.remote_id())
{
worktree
.update(&mut cx, |worktree, cx| {
worktree.as_local_mut().unwrap().share(project_id, cx)
})
.await
.log_err();
worktree.update(&mut cx, |worktree, cx| {
worktree
.as_local_mut()
.unwrap()
.share(project_id, cx)
.detach_and_log_err(cx);
});
}
Ok(worktree)