more refactoring and slightly better api

This commit is contained in:
Kay Simmons 2022-11-02 18:09:35 -07:00 committed by Mikayla Maki
parent aa7b909b7b
commit eb0598dac2
7 changed files with 588 additions and 627 deletions

View file

@ -1,7 +1,5 @@
pub mod items;
pub mod kvp;
mod migrations;
pub mod pane;
pub mod workspace;
use std::fs;
@ -11,10 +9,10 @@ use std::path::Path;
use anyhow::Result;
use indoc::indoc;
use kvp::KVP_MIGRATION;
use pane::PANE_MIGRATIONS;
use sqlez::connection::Connection;
use sqlez::thread_safe_connection::ThreadSafeConnection;
use workspace::pane::PANE_MIGRATIONS;
pub use workspace::*;
#[derive(Clone)]

View file

@ -1,310 +0,0 @@
use gpui::Axis;
use indoc::indoc;
use sqlez::{
bindable::{Bind, Column},
migrations::Migration,
statement::Statement,
};
use util::{iife, ResultExt};
use crate::{items::ItemId, workspace::WorkspaceId, DockAnchor};
use super::Db;
pub(crate) const PANE_MIGRATIONS: Migration = Migration::new(
"pane",
&[indoc! {"
CREATE TABLE pane_groups(
group_id INTEGER PRIMARY KEY,
workspace_id INTEGER NOT NULL,
parent_group INTEGER, -- NULL indicates that this is a root node
axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal'
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE panes(
pane_id INTEGER PRIMARY KEY,
workspace_id INTEGER NOT NULL,
group_id INTEGER, -- If null, this is a dock pane
idx INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE items(
item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique
pane_id INTEGER NOT NULL,
workspace_id INTEGER NOT NULL,
kind TEXT NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
PRIMARY KEY(item_id, workspace_id)
) STRICT;
"}],
);
// We have an many-branched, unbalanced tree with three types:
// Pane Groups
// Panes
// Items
// The root is always a Pane Group
// Pane Groups can have 0 (or more) Panes and/or Pane Groups as children
// Panes can have 0 or more items as children
// Panes can be their own root
// Items cannot have children
// References pointing down is hard (SQL doesn't like arrays)
// References pointing up is easy (1-1 item / parent relationship) but is harder to query
//
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct PaneId {
workspace_id: WorkspaceId,
pane_id: usize,
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct PaneGroupId {
workspace_id: WorkspaceId,
}
impl PaneGroupId {
pub fn root(workspace_id: WorkspaceId) -> Self {
Self {
workspace_id,
// group_id: 0,
}
}
}
#[derive(Debug, PartialEq, Eq, Default)]
pub struct SerializedPaneGroup {
axis: Axis,
children: Vec<PaneGroupChild>,
}
impl SerializedPaneGroup {
pub fn empty_root(_workspace_id: WorkspaceId) -> Self {
Self {
// group_id: PaneGroupId::root(workspace_id),
axis: Default::default(),
children: Default::default(),
}
}
}
struct _PaneGroupChildRow {
child_pane_id: Option<usize>,
child_group_id: Option<usize>,
index: usize,
}
#[derive(Debug, PartialEq, Eq)]
pub enum PaneGroupChild {
Pane(SerializedPane),
Group(SerializedPaneGroup),
}
#[derive(Debug, PartialEq, Eq)]
pub struct SerializedPane {
items: Vec<ItemId>,
}
//********* CURRENTLY IN USE TYPES: *********
#[derive(Default, Debug, PartialEq, Eq)]
pub struct SerializedDockPane {
pub anchor_position: DockAnchor,
pub visible: bool,
}
impl SerializedDockPane {
fn to_row(&self, workspace: &WorkspaceId) -> DockRow {
DockRow {
workspace_id: *workspace,
anchor_position: self.anchor_position,
visible: self.visible,
}
}
}
impl Column for SerializedDockPane {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
<(DockAnchor, bool) as Column>::column(statement, start_index).map(
|((anchor_position, visible), next_index)| {
(
SerializedDockPane {
anchor_position,
visible,
},
next_index,
)
},
)
}
}
#[derive(Default, Debug, PartialEq, Eq)]
pub(crate) struct DockRow {
workspace_id: WorkspaceId,
anchor_position: DockAnchor,
visible: bool,
}
impl Bind for DockRow {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
statement.bind(
(self.workspace_id, self.anchor_position, self.visible),
start_index,
)
}
}
impl Column for DockRow {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
<(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map(
|((workspace_id, anchor_position, visible), next_index)| {
(
DockRow {
workspace_id,
anchor_position,
visible,
},
next_index,
)
},
)
}
}
impl Db {
pub fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup {
unimplemented!()
}
pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup {
unimplemented!()
// let axis = self.get_pane_group_axis(pane_group_id);
// let mut children: Vec<(usize, PaneGroupChild)> = Vec::new();
// for child_row in self.get_pane_group_children(pane_group_id) {
// if let Some(child_pane_id) = child_row.child_pane_id {
// children.push((
// child_row.index,
// PaneGroupChild::Pane(self.get_pane(PaneId {
// workspace_id: pane_group_id.workspace_id,
// pane_id: child_pane_id,
// })),
// ));
// } else if let Some(child_group_id) = child_row.child_group_id {
// children.push((
// child_row.index,
// PaneGroupChild::Group(self.get_pane_group(PaneGroupId {
// workspace_id: pane_group_id.workspace_id,
// group_id: child_group_id,
// })),
// ));
// }
// }
// children.sort_by_key(|(index, _)| *index);
// SerializedPaneGroup {
// group_id: pane_group_id,
// axis,
// children: children.into_iter().map(|(_, child)| child).collect(),
// }
}
fn _get_pane_group_children(
&self,
_pane_group_id: PaneGroupId,
) -> impl Iterator<Item = _PaneGroupChildRow> {
Vec::new().into_iter()
}
fn _get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis {
unimplemented!();
}
pub fn save_pane_splits(
&self,
_workspace: &WorkspaceId,
_center_pane_group: &SerializedPaneGroup,
) {
// Delete the center pane group for this workspace and any of its children
// Generate new pane group IDs as we go through
// insert them
}
pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane {
unimplemented!();
}
pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option<SerializedDockPane> {
iife!({
self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")?
.with_bindings(workspace)?
.maybe_row::<SerializedDockPane>()
})
.log_err()
.flatten()
}
pub fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) {
iife!({
self.prepare(
"INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);",
)?
.with_bindings(dock_pane.to_row(workspace))?
.insert()
})
.log_err();
}
}
#[cfg(test)]
mod tests {
use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor};
use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup};
#[test]
fn test_basic_dock_pane() {
let db = Db::open_in_memory("basic_dock_pane");
let workspace = db.workspace_for_roots(&["/tmp"]);
let dock_pane = SerializedDockPane {
anchor_position: DockAnchor::Expanded,
visible: true,
};
db.save_dock_pane(&workspace.workspace_id, &dock_pane);
let new_workspace = db.workspace_for_roots(&["/tmp"]);
assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane);
}
#[test]
fn test_dock_simple_split() {
let db = Db::open_in_memory("simple_split");
let workspace = db.workspace_for_roots(&["/tmp"]);
// Pane group -> Pane -> 10 , 20
let center_pane = SerializedPaneGroup {
axis: gpui::Axis::Horizontal,
children: vec![PaneGroupChild::Pane(SerializedPane {
items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }],
})],
};
db.save_pane_splits(&workspace.workspace_id, &center_pane);
// let new_workspace = db.workspace_for_roots(&["/tmp"]);
// assert_eq!(new_workspace.center_group, center_pane);
}
}

View file

@ -1,23 +1,14 @@
use anyhow::{bail, Context, Result};
use util::{iife, ResultExt};
mod items;
pub mod model;
pub(crate) mod pane;
use std::{
fmt::Debug,
os::unix::prelude::OsStrExt,
path::{Path, PathBuf},
};
use anyhow::{Context, Result};
use util::ResultExt;
use indoc::indoc;
use sqlez::{
bindable::{Bind, Column},
connection::Connection,
migrations::Migration,
statement::Statement,
};
use std::path::{Path, PathBuf};
use crate::pane::{SerializedDockPane, SerializedPaneGroup};
use super::Db;
use indoc::{formatdoc, indoc};
use sqlez::{connection::Connection, migrations::Migration};
// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging
// you might want to update some of the parsing code as well, I've left the variations in but commented
@ -37,87 +28,34 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new(
workspace_id INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
PRIMARY KEY(worktree_root, workspace_id)
) STRICT;"}],
) STRICT;
"}],
);
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
pub(crate) struct WorkspaceId(i64);
use self::model::{SerializedWorkspace, WorkspaceId, WorkspaceRow};
impl Bind for WorkspaceId {
fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
self.0.bind(statement, start_index)
}
}
impl Column for WorkspaceId {
fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index))
}
}
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub enum DockAnchor {
#[default]
Bottom,
Right,
Expanded,
}
impl Bind for DockAnchor {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
DockAnchor::Bottom => "Bottom",
DockAnchor::Right => "Right",
DockAnchor::Expanded => "Expanded",
}
.bind(statement, start_index)
}
}
impl Column for DockAnchor {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(anchor_text, next_index)| {
Ok((
match anchor_text.as_ref() {
"Bottom" => DockAnchor::Bottom,
"Right" => DockAnchor::Right,
"Expanded" => DockAnchor::Expanded,
_ => bail!("Stored dock anchor is incorrect"),
},
next_index,
))
})
}
}
type WorkspaceRow = (WorkspaceId, DockAnchor, bool);
#[derive(Default, Debug)]
pub struct SerializedWorkspace {
pub center_group: SerializedPaneGroup,
pub dock_anchor: DockAnchor,
pub dock_visible: bool,
pub dock_pane: SerializedDockPane,
}
use super::Db;
impl Db {
/// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty,
/// returns the last workspace which was updated
pub fn workspace_for_roots<P>(&self, worktree_roots: &[P]) -> Option<SerializedWorkspace>
where
P: AsRef<Path> + Debug,
{
pub fn workspace_for_roots<P: AsRef<Path>>(
&self,
worktree_roots: &[P],
) -> Option<SerializedWorkspace> {
// Find the workspace id which is uniquely identified by this set of paths
// return it if found
let mut workspace_row = get_workspace(worktree_roots, &self)
.log_err()
.unwrap_or_default();
if workspace_row.is_none() && worktree_roots.len() == 0 {
// Return last workspace if no roots passed
workspace_row = self.prepare(
"SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1"
).and_then(|mut stmt| stmt.maybe_row::<WorkspaceRow>())
.log_err()
.flatten()
.flatten();
}
workspace_row.and_then(|(workspace_id, dock_anchor, dock_visible)| {
@ -130,66 +68,56 @@ impl Db {
})
}
/// TODO: Change to be 'update workspace' and to serialize the whole workspace in one go.
///
/// Updates the open paths for the given workspace id. Will garbage collect items from
/// any workspace ids which are no replaced by the new workspace id. Updates the timestamps
/// in the workspace id table
pub fn update_worktrees<P>(&self, workspace_id: &WorkspaceId, worktree_roots: &[P])
where
P: AsRef<Path> + Debug,
{
/// Saves a workspace using the worktree roots. Will garbage collect any workspaces
/// that used this workspace previously
pub fn save_workspace<P: AsRef<Path>>(
&self,
worktree_roots: &[P],
workspace: SerializedWorkspace,
) {
self.with_savepoint("update_worktrees", |conn| {
// Lookup any old WorkspaceIds which have the same set of roots, and delete them.
let preexisting_workspace = get_workspace(worktree_roots, &conn)?;
if let Some((preexisting_workspace_id, _, _)) = preexisting_workspace {
if preexisting_workspace_id != *workspace_id {
// Should also delete fields in other tables with cascading updates
conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")?
.with_bindings(preexisting_workspace_id)?
.exec()?;
}
if let Some((id_to_delete, _, _)) = get_workspace(worktree_roots, &conn)? {
// Should also delete fields in other tables with cascading updates and insert
// new entry
conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")?
.with_bindings(id_to_delete)?
.exec()?;
}
conn.prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")?
.with_bindings(workspace_id.0)?
.exec()?;
// Insert new workspace into workspaces table if none were found
let workspace_id = WorkspaceId(
conn.prepare("INSERT INTO workspaces(dock_anchor, dock_visible) VALUES (?, ?)")?
.with_bindings((workspace.dock_anchor, workspace.dock_visible))?
.insert()?,
);
// Write worktree_roots with new workspace_id
for root in worktree_roots {
let path = root.as_ref().as_os_str().as_bytes();
// If you need to debug this, here's the string parsing:
// let path = root.as_ref().to_string_lossy().to_string();
conn.prepare(
"INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)",
)?
.with_bindings((workspace_id.0, path))?
.with_bindings((workspace_id, root.as_ref()))?
.exec()?;
}
conn.prepare(
"UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?",
)?
.with_bindings(workspace_id.0)?
.exec()?;
Ok(())
})
.context("Update workspace {workspace_id:?} with roots {worktree_roots:?}")
.context("Update workspace with roots {worktree_roots:?}")
.log_err();
}
/// Returns the previous workspace ids sorted by last modified along with their opened worktree roots
pub fn recent_workspaces(&self, limit: usize) -> Vec<Vec<PathBuf>> {
self.with_savepoint("recent_workspaces", |conn| {
let mut stmt =
let mut roots_by_id =
conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?;
conn.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")?
.with_bindings(limit)?
.rows::<WorkspaceId>()?
.iter()
.map(|workspace_id| stmt.with_bindings(workspace_id.0)?.rows::<PathBuf>())
.map(|workspace_id| roots_by_id.with_bindings(workspace_id.0)?.rows::<PathBuf>())
.collect::<Result<_>>()
})
.log_err()
@ -197,25 +125,15 @@ impl Db {
}
}
fn get_workspace<P>(worktree_roots: &[P], connection: &Connection) -> Result<Option<WorkspaceRow>>
where
P: AsRef<Path> + Debug,
{
fn get_workspace<P: AsRef<Path>>(
worktree_roots: &[P],
connection: &Connection,
) -> Result<Option<WorkspaceRow>> {
// Short circuit if we can
if worktree_roots.len() == 0 {
return Ok(None);
}
// Prepare the array binding string. SQL doesn't have syntax for this, so
// we have to do it ourselves.
let array_binding_stmt = format!(
"({})",
(0..worktree_roots.len())
.map(|index| format!("?{}", index + 1))
.collect::<Vec<_>>()
.join(", ")
);
// Any workspace can have multiple independent paths, and these paths
// can overlap in the database. Take this test data for example:
//
@ -266,230 +184,225 @@ where
// Note: due to limitations in SQLite's query binding, we have to generate the prepared
// statement with string substitution (the {array_bind}) below, and then bind the
// parameters by number.
let query = format!(
r#"
SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible
FROM (SELECT workspace_id
FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots
WHERE worktree_root in {array_bind} AND workspace_id NOT IN
(SELECT wt1.workspace_id FROM worktree_roots as wt1
JOIN worktree_roots as wt2
ON wt1.workspace_id = wt2.workspace_id
WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind})
GROUP BY workspace_id)
WHERE num_matching = ?) as matching_workspace
JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id
"#,
array_bind = array_binding_stmt
);
// This will only be called on start up and when root workspaces change, no need to waste memory
// caching it.
let mut stmt = connection.prepare(&query)?;
// Make sure we bound the parameters correctly
debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count());
let root_bytes: Vec<&[u8]> = worktree_roots
.iter()
.map(|root| root.as_ref().as_os_str().as_bytes())
.collect();
let num_of_roots = root_bytes.len();
stmt.with_bindings((root_bytes, num_of_roots))?
connection
.prepare(formatdoc! {"
SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible
FROM (SELECT workspace_id
FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots
WHERE worktree_root in ({roots}) AND workspace_id NOT IN
(SELECT wt1.workspace_id FROM worktree_roots as wt1
JOIN worktree_roots as wt2
ON wt1.workspace_id = wt2.workspace_id
WHERE wt1.worktree_root NOT in ({roots}) AND wt2.worktree_root in ({roots}))
GROUP BY workspace_id)
WHERE num_matching = ?) as matching_workspace
JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id",
roots =
// Prepare the array binding string. SQL doesn't have syntax for this, so
// we have to do it ourselves.
(0..worktree_roots.len())
.map(|index| format!("?{}", index + 1))
.collect::<Vec<_>>()
.join(", ")
})?
.with_bindings((
worktree_roots
.into_iter()
.map(|p| p.as_ref())
.collect::<Vec<&Path>>(),
worktree_roots.len(),
))?
.maybe_row::<WorkspaceRow>()
}
#[cfg(test)]
mod tests {
use std::{path::PathBuf, thread::sleep, time::Duration};
// use std::{path::PathBuf, thread::sleep, time::Duration};
use crate::Db;
// use crate::Db;
use super::WorkspaceId;
// use super::WorkspaceId;
#[test]
fn test_new_worktrees_for_roots() {
env_logger::init();
let db = Db::open_in_memory("test_new_worktrees_for_roots");
// #[test]
// fn test_workspace_saving() {
// env_logger::init();
// let db = Db::open_in_memory("test_new_worktrees_for_roots");
// Test creation in 0 case
let workspace_1 = db.workspace_for_roots::<String>(&[]);
assert_eq!(workspace_1.workspace_id, WorkspaceId(1));
// // Test nothing returned with no roots at first
// assert_eq!(db.workspace_for_roots::<String>(&[]), None);
// Test pulling from recent workspaces
let workspace_1 = db.workspace_for_roots::<String>(&[]);
assert_eq!(workspace_1.workspace_id, WorkspaceId(1));
// // Test creation
// let workspace_1 = db.workspace_for_roots::<String>(&[]);
// assert_eq!(workspace_1.workspace_id, WorkspaceId(1));
// Ensure the timestamps are different
sleep(Duration::from_secs(1));
db.make_new_workspace::<String>(&[]);
// // Ensure the timestamps are different
// sleep(Duration::from_secs(1));
// db.make_new_workspace::<String>(&[]);
// Test pulling another value from recent workspaces
let workspace_2 = db.workspace_for_roots::<String>(&[]);
assert_eq!(workspace_2.workspace_id, WorkspaceId(2));
// // Test pulling another value from recent workspaces
// let workspace_2 = db.workspace_for_roots::<String>(&[]);
// assert_eq!(workspace_2.workspace_id, WorkspaceId(2));
// Ensure the timestamps are different
sleep(Duration::from_secs(1));
// // Ensure the timestamps are different
// sleep(Duration::from_secs(1));
// Test creating a new workspace that doesn't exist already
let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
// // Test creating a new workspace that doesn't exist already
// let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
// assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
// Make sure it's in the recent workspaces....
let workspace_3 = db.workspace_for_roots::<String>(&[]);
assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
// // Make sure it's in the recent workspaces....
// let workspace_3 = db.workspace_for_roots::<String>(&[]);
// assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
// And that it can be pulled out again
let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
}
// // And that it can be pulled out again
// let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
// assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
// }
#[test]
fn test_empty_worktrees() {
let db = Db::open_in_memory("test_empty_worktrees");
// #[test]
// fn test_empty_worktrees() {
// let db = Db::open_in_memory("test_empty_worktrees");
assert_eq!(None, db.workspace::<String>(&[]));
// assert_eq!(None, db.workspace::<String>(&[]));
db.make_new_workspace::<String>(&[]); //ID 1
db.make_new_workspace::<String>(&[]); //ID 2
db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]);
// db.make_new_workspace::<String>(&[]); //ID 1
// db.make_new_workspace::<String>(&[]); //ID 2
// db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]);
// Sanity check
assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1));
// // Sanity check
// assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1));
db.update_worktrees::<String>(&WorkspaceId(1), &[]);
// db.update_worktrees::<String>(&WorkspaceId(1), &[]);
// Make sure 'no worktrees' fails correctly. returning [1, 2] from this
// call would be semantically correct (as those are the workspaces that
// don't have roots) but I'd prefer that this API to either return exactly one
// workspace, and None otherwise
assert_eq!(db.workspace::<String>(&[]), None,);
// // Make sure 'no worktrees' fails correctly. returning [1, 2] from this
// // call would be semantically correct (as those are the workspaces that
// // don't have roots) but I'd prefer that this API to either return exactly one
// // workspace, and None otherwise
// assert_eq!(db.workspace::<String>(&[]), None,);
assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1));
// assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1));
assert_eq!(
db.recent_workspaces(2),
vec![Vec::<PathBuf>::new(), Vec::<PathBuf>::new()],
)
}
// assert_eq!(
// db.recent_workspaces(2),
// vec![Vec::<PathBuf>::new(), Vec::<PathBuf>::new()],
// )
// }
#[test]
fn test_more_workspace_ids() {
let data = &[
(WorkspaceId(1), vec!["/tmp1"]),
(WorkspaceId(2), vec!["/tmp1", "/tmp2"]),
(WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]),
(WorkspaceId(4), vec!["/tmp2", "/tmp3"]),
(WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]),
(WorkspaceId(6), vec!["/tmp2", "/tmp4"]),
(WorkspaceId(7), vec!["/tmp2"]),
];
// #[test]
// fn test_more_workspace_ids() {
// let data = &[
// (WorkspaceId(1), vec!["/tmp1"]),
// (WorkspaceId(2), vec!["/tmp1", "/tmp2"]),
// (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]),
// (WorkspaceId(4), vec!["/tmp2", "/tmp3"]),
// (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]),
// (WorkspaceId(6), vec!["/tmp2", "/tmp4"]),
// (WorkspaceId(7), vec!["/tmp2"]),
// ];
let db = Db::open_in_memory("test_more_workspace_ids");
// let db = Db::open_in_memory("test_more_workspace_ids");
for (workspace_id, entries) in data {
db.make_new_workspace::<String>(&[]);
db.update_worktrees(workspace_id, entries);
}
// for (workspace_id, entries) in data {
// db.make_new_workspace::<String>(&[]);
// db.update_worktrees(workspace_id, entries);
// }
assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0);
assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2));
assert_eq!(
db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0,
WorkspaceId(3)
);
assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4));
assert_eq!(
db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0,
WorkspaceId(5)
);
assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6));
assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7));
// assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0);
// assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2));
// assert_eq!(
// db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0,
// WorkspaceId(3)
// );
// assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4));
// assert_eq!(
// db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0,
// WorkspaceId(5)
// );
// assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6));
// assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7));
assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None);
assert_eq!(db.workspace(&["/tmp5"]), None);
assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None);
}
// assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None);
// assert_eq!(db.workspace(&["/tmp5"]), None);
// assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None);
// }
#[test]
fn test_detect_workspace_id() {
let data = &[
(WorkspaceId(1), vec!["/tmp"]),
(WorkspaceId(2), vec!["/tmp", "/tmp2"]),
(WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]),
];
// #[test]
// fn test_detect_workspace_id() {
// let data = &[
// (WorkspaceId(1), vec!["/tmp"]),
// (WorkspaceId(2), vec!["/tmp", "/tmp2"]),
// (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]),
// ];
let db = Db::open_in_memory("test_detect_workspace_id");
// let db = Db::open_in_memory("test_detect_workspace_id");
for (workspace_id, entries) in data {
db.make_new_workspace::<String>(&[]);
db.update_worktrees(workspace_id, entries);
}
// for (workspace_id, entries) in data {
// db.make_new_workspace::<String>(&[]);
// db.update_worktrees(workspace_id, entries);
// }
assert_eq!(db.workspace(&["/tmp2"]), None);
assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None);
assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1));
assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2));
assert_eq!(
db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0,
WorkspaceId(3)
);
}
// assert_eq!(db.workspace(&["/tmp2"]), None);
// assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None);
// assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1));
// assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2));
// assert_eq!(
// db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0,
// WorkspaceId(3)
// );
// }
#[test]
fn test_tricky_overlapping_updates() {
// DB state:
// (/tree) -> ID: 1
// (/tree, /tree2) -> ID: 2
// (/tree2, /tree3) -> ID: 3
// #[test]
// fn test_tricky_overlapping_updates() {
// // DB state:
// // (/tree) -> ID: 1
// // (/tree, /tree2) -> ID: 2
// // (/tree2, /tree3) -> ID: 3
// -> User updates 2 to: (/tree2, /tree3)
// // -> User updates 2 to: (/tree2, /tree3)
// DB state:
// (/tree) -> ID: 1
// (/tree2, /tree3) -> ID: 2
// Get rid of 3 for garbage collection
// // DB state:
// // (/tree) -> ID: 1
// // (/tree2, /tree3) -> ID: 2
// // Get rid of 3 for garbage collection
let data = &[
(WorkspaceId(1), vec!["/tmp"]),
(WorkspaceId(2), vec!["/tmp", "/tmp2"]),
(WorkspaceId(3), vec!["/tmp2", "/tmp3"]),
];
// let data = &[
// (WorkspaceId(1), vec!["/tmp"]),
// (WorkspaceId(2), vec!["/tmp", "/tmp2"]),
// (WorkspaceId(3), vec!["/tmp2", "/tmp3"]),
// ];
let db = Db::open_in_memory("test_tricky_overlapping_update");
// let db = Db::open_in_memory("test_tricky_overlapping_update");
// Load in the test data
for (workspace_id, entries) in data {
db.make_new_workspace::<String>(&[]);
db.update_worktrees(workspace_id, entries);
}
// // Load in the test data
// for (workspace_id, entries) in data {
// db.make_new_workspace::<String>(&[]);
// db.update_worktrees(workspace_id, entries);
// }
sleep(Duration::from_secs(1));
// Execute the update
db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]);
// sleep(Duration::from_secs(1));
// // Execute the update
// db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]);
// Make sure that workspace 3 doesn't exist
assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2));
// // Make sure that workspace 3 doesn't exist
// assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2));
// And that workspace 1 was untouched
assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1));
// // And that workspace 1 was untouched
// assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1));
// And that workspace 2 is no longer registered under these roots
assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None);
// // And that workspace 2 is no longer registered under these roots
// assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None);
assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2));
// assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2));
let recent_workspaces = db.recent_workspaces(10);
assert_eq!(
recent_workspaces.get(0).unwrap(),
&vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")]
);
assert_eq!(
recent_workspaces.get(1).unwrap(),
&vec![PathBuf::from("/tmp")]
);
}
// let recent_workspaces = db.recent_workspaces(10);
// assert_eq!(
// recent_workspaces.get(0).unwrap(),
// &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")]
// );
// assert_eq!(
// recent_workspaces.get(1).unwrap(),
// &vec![PathBuf::from("/tmp")]
// );
// }
}

View file

@ -65,11 +65,6 @@
// ) STRICT;
// ";
#[derive(Debug, PartialEq, Eq)]
pub struct ItemId {
pub item_id: usize,
}
// enum SerializedItemKind {
// Editor,
// Diagnostics,

View file

@ -0,0 +1,173 @@
use anyhow::{bail, Result};
use gpui::Axis;
use sqlez::{
bindable::{Bind, Column},
statement::Statement,
};
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
pub(crate) struct WorkspaceId(pub(crate) i64);
impl Bind for WorkspaceId {
fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
self.0.bind(statement, start_index)
}
}
impl Column for WorkspaceId {
fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index))
}
}
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub enum DockAnchor {
#[default]
Bottom,
Right,
Expanded,
}
impl Bind for DockAnchor {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
DockAnchor::Bottom => "Bottom",
DockAnchor::Right => "Right",
DockAnchor::Expanded => "Expanded",
}
.bind(statement, start_index)
}
}
impl Column for DockAnchor {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(anchor_text, next_index)| {
Ok((
match anchor_text.as_ref() {
"Bottom" => DockAnchor::Bottom,
"Right" => DockAnchor::Right,
"Expanded" => DockAnchor::Expanded,
_ => bail!("Stored dock anchor is incorrect"),
},
next_index,
))
})
}
}
pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool);
#[derive(Default, Debug)]
pub struct SerializedWorkspace {
pub center_group: SerializedPaneGroup,
pub dock_anchor: DockAnchor,
pub dock_visible: bool,
pub dock_pane: SerializedDockPane,
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct PaneId {
workspace_id: WorkspaceId,
pane_id: usize,
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct PaneGroupId {
workspace_id: WorkspaceId,
}
impl PaneGroupId {
pub fn root(workspace_id: WorkspaceId) -> Self {
Self {
workspace_id,
// group_id: 0,
}
}
}
#[derive(Debug, PartialEq, Eq, Default)]
pub struct SerializedPaneGroup {
axis: Axis,
children: Vec<PaneGroupChild>,
}
impl SerializedPaneGroup {
pub(crate) fn empty_root(_workspace_id: WorkspaceId) -> Self {
Self {
// group_id: PaneGroupId::root(workspace_id),
axis: Default::default(),
children: Default::default(),
}
}
}
#[derive(Default, Debug, PartialEq, Eq)]
pub struct SerializedDockPane {
pub anchor_position: DockAnchor,
pub visible: bool,
}
impl SerializedDockPane {
fn to_row(&self, workspace: &WorkspaceId) -> DockRow {
DockRow {
workspace_id: *workspace,
anchor_position: self.anchor_position,
visible: self.visible,
}
}
}
impl Column for SerializedDockPane {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
<(DockAnchor, bool) as Column>::column(statement, start_index).map(
|((anchor_position, visible), next_index)| {
(
SerializedDockPane {
anchor_position,
visible,
},
next_index,
)
},
)
}
}
#[derive(Default, Debug, PartialEq, Eq)]
pub(crate) struct DockRow {
workspace_id: WorkspaceId,
anchor_position: DockAnchor,
visible: bool,
}
impl Bind for DockRow {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
statement.bind(
(self.workspace_id, self.anchor_position, self.visible),
start_index,
)
}
}
impl Column for DockRow {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
<(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map(
|((workspace_id, anchor_position, visible), next_index)| {
(
DockRow {
workspace_id,
anchor_position,
visible,
},
next_index,
)
},
)
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct ItemId {
pub item_id: usize,
}

View file

@ -0,0 +1,169 @@
use gpui::Axis;
use indoc::indoc;
use sqlez::migrations::Migration;
use util::{iife, ResultExt};
use super::{
model::{PaneGroupId, PaneId, SerializedDockPane, SerializedPaneGroup, WorkspaceId},
Db,
};
pub(crate) const PANE_MIGRATIONS: Migration = Migration::new(
"pane",
&[indoc! {"
CREATE TABLE pane_groups(
group_id INTEGER PRIMARY KEY,
workspace_id INTEGER NOT NULL,
parent_group INTEGER, -- NULL indicates that this is a root node
axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal'
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE panes(
pane_id INTEGER PRIMARY KEY,
workspace_id INTEGER NOT NULL,
group_id INTEGER, -- If null, this is a dock pane
idx INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE items(
item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique
pane_id INTEGER NOT NULL,
workspace_id INTEGER NOT NULL,
kind TEXT NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
PRIMARY KEY(item_id, workspace_id)
) STRICT;
"}],
);
impl Db {
pub(crate) fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup {
unimplemented!()
}
pub(crate) fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup {
unimplemented!()
// let axis = self.get_pane_group_axis(pane_group_id);
// let mut children: Vec<(usize, PaneGroupChild)> = Vec::new();
// for child_row in self.get_pane_group_children(pane_group_id) {
// if let Some(child_pane_id) = child_row.child_pane_id {
// children.push((
// child_row.index,
// PaneGroupChild::Pane(self.get_pane(PaneId {
// workspace_id: pane_group_id.workspace_id,
// pane_id: child_pane_id,
// })),
// ));
// } else if let Some(child_group_id) = child_row.child_group_id {
// children.push((
// child_row.index,
// PaneGroupChild::Group(self.get_pane_group(PaneGroupId {
// workspace_id: pane_group_id.workspace_id,
// group_id: child_group_id,
// })),
// ));
// }
// }
// children.sort_by_key(|(index, _)| *index);
// SerializedPaneGroup {
// group_id: pane_group_id,
// axis,
// children: children.into_iter().map(|(_, child)| child).collect(),
// }
}
// fn _get_pane_group_children(
// &self,
// _pane_group_id: PaneGroupId,
// ) -> impl Iterator<Item = PaneGroupChildRow> {
// Vec::new().into_iter()
// }
pub(crate) fn save_pane_splits(
&self,
_workspace: &WorkspaceId,
_center_pane_group: &SerializedPaneGroup,
) {
// Delete the center pane group for this workspace and any of its children
// Generate new pane group IDs as we go through
// insert them
}
pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane {
unimplemented!();
}
pub(crate) fn get_dock_pane(&self, workspace: WorkspaceId) -> Option<SerializedDockPane> {
iife!({
self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")?
.with_bindings(workspace)?
.maybe_row::<SerializedDockPane>()
})
.log_err()
.flatten()
}
pub(crate) fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) {
// iife!({
// self.prepare(
// "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);",
// )?
// .with_bindings(dock_pane.to_row(workspace))?
// .insert()
// })
// .log_err();
}
}
#[cfg(test)]
mod tests {
// use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor};
// use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup};
// #[test]
// fn test_basic_dock_pane() {
// let db = Db::open_in_memory("basic_dock_pane");
// let workspace = db.workspace_for_roots(&["/tmp"]);
// let dock_pane = SerializedDockPane {
// anchor_position: DockAnchor::Expanded,
// visible: true,
// };
// db.save_dock_pane(&workspace.workspace_id, &dock_pane);
// let new_workspace = db.workspace_for_roots(&["/tmp"]);
// assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane);
// }
// #[test]
// fn test_dock_simple_split() {
// let db = Db::open_in_memory("simple_split");
// let workspace = db.workspace_for_roots(&["/tmp"]);
// // Pane group -> Pane -> 10 , 20
// let center_pane = SerializedPaneGroup {
// axis: gpui::Axis::Horizontal,
// children: vec![PaneGroupChild::Pane(SerializedPane {
// items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }],
// })],
// };
// db.save_pane_splits(&workspace.workspace_id, &center_pane);
// // let new_workspace = db.workspace_for_roots(&["/tmp"]);
// // assert_eq!(new_workspace.center_group, center_pane);
// }
}

View file

@ -1,3 +1,9 @@
use std::{
ffi::{CString, OsStr},
os::unix::prelude::OsStrExt,
path::{Path, PathBuf},
};
use anyhow::Result;
use crate::statement::{SqlType, Statement};
@ -241,3 +247,20 @@ impl<T: Bind> Bind for &[T] {
Ok(current_index)
}
}
impl Bind for &Path {
fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
self.as_os_str().as_bytes().bind(statement, start_index)
}
}
impl Column for PathBuf {
fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
let blob = statement.column_blob(start_index)?;
Ok((
PathBuf::from(OsStr::from_bytes(blob).to_owned()),
start_index + 1,
))
}
}