From 1cdf5dff90db1c88acff95f0ed5993d9a8700794 Mon Sep 17 00:00:00 2001 From: Claude Date: Sat, 25 Apr 2026 07:25:36 +0000 Subject: [PATCH 1/4] perf(sync): hash-set membership check in get_sync_status The deletion-detection loop in `get_sync_status` scanned `local_files` linearly for every tracked path in `sync_state.files`, making the cost quadratic in the file count. The earlier "pending change" loop just above already does the inverse direction via `sync_state.files.get` (O(1)). Build a `HashSet<&str>` of local paths once and check it the same way to make the function O(n). This is called by the GUI status indicator, so the win shows up as soon as a workspace tracks more than a handful of files. --- crates/onyx-core/src/sync.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/onyx-core/src/sync.rs b/crates/onyx-core/src/sync.rs index a21cf9d..c97f0c6 100644 --- a/crates/onyx-core/src/sync.rs +++ b/crates/onyx-core/src/sync.rs @@ -914,9 +914,15 @@ pub fn get_sync_status(workspace_path: &Path) -> Result { } } - // Count files in base that are now missing locally (deleted) + // Count files in base that are now missing locally (deleted). + // Build a set of local paths once so the membership check is O(1) per + // tracked file instead of scanning local_files linearly each time. + let local_paths: std::collections::HashSet<&str> = local_files + .iter() + .map(|f| f.path.as_str()) + .collect(); for path in sync_state.files.keys() { - if !local_files.iter().any(|f| f.path == *path) { + if !local_paths.contains(path.as_str()) { pending_changes += 1; } } From 069afe8d5e7d9344cb9d0f71d99ad7ca25f5c9a9 Mon Sep 17 00:00:00 2001 From: Claude Date: Sat, 25 Apr 2026 07:26:56 +0000 Subject: [PATCH 2/4] perf(tauri): build child index once for cascade delete `delete_task`'s descendant walk re-scanned the full task list on every frontier pop, so the cost was O(n * depth) where n is the list size. For a list of a few hundred tasks with even moderate nesting that's already noticeable. Index `parent_id -> [child_id]` once up-front; the BFS then visits each descendant in O(1) amortised, dropping the total to O(n). --- apps/tauri/src-tauri/src/lib.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/apps/tauri/src-tauri/src/lib.rs b/apps/tauri/src-tauri/src/lib.rs index d71e981..eae1c25 100644 --- a/apps/tauri/src-tauri/src/lib.rs +++ b/apps/tauri/src-tauri/src/lib.rs @@ -455,12 +455,23 @@ fn delete_task( // so deleting a parent can't leave grandchildren orphaned with a // parent_id pointing at a deleted task. let all_tasks = repo.list_tasks(lid).map_err(|e| e.to_string())?; + // Build a parent -> children index in one pass so the BFS below is O(n) + // instead of O(n * depth) scanning all tasks for each frontier pop. + let mut children_by_parent: std::collections::HashMap> = + std::collections::HashMap::new(); + for t in &all_tasks { + if let Some(pid) = t.parent_id { + children_by_parent.entry(pid).or_default().push(t.id); + } + } let mut to_delete: std::collections::HashSet = std::collections::HashSet::new(); let mut frontier: Vec = vec![tid]; while let Some(parent) = frontier.pop() { - for t in &all_tasks { - if t.parent_id == Some(parent) && to_delete.insert(t.id) { - frontier.push(t.id); + if let Some(children) = children_by_parent.get(&parent) { + for &child_id in children { + if to_delete.insert(child_id) { + frontier.push(child_id); + } } } } From 8c8735b2b4b172fc285284575abfbf248a8501e9 Mon Sep 17 00:00:00 2001 From: Claude Date: Sat, 25 Apr 2026 07:27:25 +0000 Subject: [PATCH 3/4] refactor(config): reuse storage::atomic_write for save_to_file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `AppConfig::save_to_file` had its own copy of the temp-file + rename + cleanup-on-failure dance. `storage::atomic_write` is already `pub(crate)` and does exactly that — `google_tasks.rs` was migrated to use it earlier. Drop the duplicate so there's one canonical atomic write path in the crate. --- crates/onyx-core/src/config.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/crates/onyx-core/src/config.rs b/crates/onyx-core/src/config.rs index 1ad7769..e5af3fc 100644 --- a/crates/onyx-core/src/config.rs +++ b/crates/onyx-core/src/config.rs @@ -116,13 +116,7 @@ impl AppConfig { std::fs::create_dir_all(parent)?; } let content = serde_json::to_string_pretty(&self)?; - // Atomic write: write to temp file then rename to prevent corruption on crash - let temp = path.with_extension("tmp"); - std::fs::write(&temp, &content)?; - if let Err(e) = std::fs::rename(&temp, path) { - let _ = std::fs::remove_file(&temp); - return Err(e.into()); - } + crate::storage::atomic_write(path, content.as_bytes())?; Ok(()) } From e1c4fd7dfb3d929cc20791be690361396e720f21 Mon Sep 17 00:00:00 2001 From: Claude Date: Sat, 25 Apr 2026 07:28:33 +0000 Subject: [PATCH 4/4] docs(audit): log 2026-04-25 findings --- Audit.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Audit.md b/Audit.md index 977a2c7..03eba91 100644 --- a/Audit.md +++ b/Audit.md @@ -1,5 +1,13 @@ # Audit Log +## 2026-04-25 + +Found and fixed 3 issues: + +1. **Perf: O(n²) deletion-detection in `get_sync_status`** (sync.rs:918) — for every path tracked in `sync_state.files`, the loop scanned `local_files` linearly via `.any(|f| f.path == *path)` to decide whether to count it as a deleted-locally pending change. The earlier "modified or new" loop already used the inverse direction with `sync_state.files.get(...)` (O(1)), so the second loop was the inconsistent one. Built a `HashSet<&str>` of local paths once and used `contains` for the membership check. +2. **Perf: cascade delete walks all_tasks per frontier pop** (tauri/lib.rs:460) — `delete_task`'s descendant BFS scanned the full task list on every parent popped from the frontier, making the work O(n × depth). Built a `parent_id -> [child_id]` `HashMap` once, then the BFS visits each descendant in O(1) amortised, dropping total cost to O(n). +3. **Code quality: duplicate atomic-write in `AppConfig::save_to_file`** (config.rs:114) — the function had its own copy of the temp-file + rename + cleanup-on-failure dance even though `storage::atomic_write` is `pub(crate)` and was already shared by `google_tasks.rs`. Replaced the inline implementation with a call to `crate::storage::atomic_write` so the crate has one canonical atomic write path. + ## 2026-04-24 Found and fixed 3 issues: