Merge pull request #62 from SteelDynamite/claude/serene-ride-JTRND
audit(2026-04-25): O(n²) sync-status + cascade-delete + atomic-write dedup
This commit is contained in:
commit
0506d44989
8
Audit.md
8
Audit.md
|
|
@ -1,5 +1,13 @@
|
||||||
# Audit Log
|
# Audit Log
|
||||||
|
|
||||||
|
## 2026-04-25
|
||||||
|
|
||||||
|
Found and fixed 3 issues:
|
||||||
|
|
||||||
|
1. **Perf: O(n²) deletion-detection in `get_sync_status`** (sync.rs:918) — for every path tracked in `sync_state.files`, the loop scanned `local_files` linearly via `.any(|f| f.path == *path)` to decide whether to count it as a deleted-locally pending change. The earlier "modified or new" loop already used the inverse direction with `sync_state.files.get(...)` (O(1)), so the second loop was the inconsistent one. Built a `HashSet<&str>` of local paths once and used `contains` for the membership check.
|
||||||
|
2. **Perf: cascade delete walks all_tasks per frontier pop** (tauri/lib.rs:460) — `delete_task`'s descendant BFS scanned the full task list on every parent popped from the frontier, making the work O(n × depth). Built a `parent_id -> [child_id]` `HashMap` once, then the BFS visits each descendant in O(1) amortised, dropping total cost to O(n).
|
||||||
|
3. **Code quality: duplicate atomic-write in `AppConfig::save_to_file`** (config.rs:114) — the function had its own copy of the temp-file + rename + cleanup-on-failure dance even though `storage::atomic_write` is `pub(crate)` and was already shared by `google_tasks.rs`. Replaced the inline implementation with a call to `crate::storage::atomic_write` so the crate has one canonical atomic write path.
|
||||||
|
|
||||||
## 2026-04-24
|
## 2026-04-24
|
||||||
|
|
||||||
Found and fixed 3 issues:
|
Found and fixed 3 issues:
|
||||||
|
|
|
||||||
|
|
@ -455,12 +455,23 @@ fn delete_task(
|
||||||
// so deleting a parent can't leave grandchildren orphaned with a
|
// so deleting a parent can't leave grandchildren orphaned with a
|
||||||
// parent_id pointing at a deleted task.
|
// parent_id pointing at a deleted task.
|
||||||
let all_tasks = repo.list_tasks(lid).map_err(|e| e.to_string())?;
|
let all_tasks = repo.list_tasks(lid).map_err(|e| e.to_string())?;
|
||||||
|
// Build a parent -> children index in one pass so the BFS below is O(n)
|
||||||
|
// instead of O(n * depth) scanning all tasks for each frontier pop.
|
||||||
|
let mut children_by_parent: std::collections::HashMap<Uuid, Vec<Uuid>> =
|
||||||
|
std::collections::HashMap::new();
|
||||||
|
for t in &all_tasks {
|
||||||
|
if let Some(pid) = t.parent_id {
|
||||||
|
children_by_parent.entry(pid).or_default().push(t.id);
|
||||||
|
}
|
||||||
|
}
|
||||||
let mut to_delete: std::collections::HashSet<Uuid> = std::collections::HashSet::new();
|
let mut to_delete: std::collections::HashSet<Uuid> = std::collections::HashSet::new();
|
||||||
let mut frontier: Vec<Uuid> = vec![tid];
|
let mut frontier: Vec<Uuid> = vec![tid];
|
||||||
while let Some(parent) = frontier.pop() {
|
while let Some(parent) = frontier.pop() {
|
||||||
for t in &all_tasks {
|
if let Some(children) = children_by_parent.get(&parent) {
|
||||||
if t.parent_id == Some(parent) && to_delete.insert(t.id) {
|
for &child_id in children {
|
||||||
frontier.push(t.id);
|
if to_delete.insert(child_id) {
|
||||||
|
frontier.push(child_id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -116,13 +116,7 @@ impl AppConfig {
|
||||||
std::fs::create_dir_all(parent)?;
|
std::fs::create_dir_all(parent)?;
|
||||||
}
|
}
|
||||||
let content = serde_json::to_string_pretty(&self)?;
|
let content = serde_json::to_string_pretty(&self)?;
|
||||||
// Atomic write: write to temp file then rename to prevent corruption on crash
|
crate::storage::atomic_write(path, content.as_bytes())?;
|
||||||
let temp = path.with_extension("tmp");
|
|
||||||
std::fs::write(&temp, &content)?;
|
|
||||||
if let Err(e) = std::fs::rename(&temp, path) {
|
|
||||||
let _ = std::fs::remove_file(&temp);
|
|
||||||
return Err(e.into());
|
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -914,9 +914,15 @@ pub fn get_sync_status(workspace_path: &Path) -> Result<SyncStatusInfo> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count files in base that are now missing locally (deleted)
|
// Count files in base that are now missing locally (deleted).
|
||||||
|
// Build a set of local paths once so the membership check is O(1) per
|
||||||
|
// tracked file instead of scanning local_files linearly each time.
|
||||||
|
let local_paths: std::collections::HashSet<&str> = local_files
|
||||||
|
.iter()
|
||||||
|
.map(|f| f.path.as_str())
|
||||||
|
.collect();
|
||||||
for path in sync_state.files.keys() {
|
for path in sync_state.files.keys() {
|
||||||
if !local_files.iter().any(|f| f.path == *path) {
|
if !local_paths.contains(path.as_str()) {
|
||||||
pending_changes += 1;
|
pending_changes += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue