Compare commits
9 commits
claude/dre
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c5a3840aea | ||
|
|
c29f715c9e | ||
|
|
6f4d00b912 | ||
|
|
39718ef700 | ||
|
|
c57ffd3f55 | ||
|
|
12adfdc532 | ||
|
|
6e161ba819 | ||
|
|
e8a69a3222 | ||
|
|
0506d44989 |
8
Audit.md
8
Audit.md
|
|
@ -1,5 +1,13 @@
|
||||||
# Audit Log
|
# Audit Log
|
||||||
|
|
||||||
|
## 2026-04-27
|
||||||
|
|
||||||
|
Found and fixed 3 issues:
|
||||||
|
|
||||||
|
1. **Perf: needless clone of upload payload** (sync.rs:733) — the `SyncAction::Upload` arm read the file into `data`, computed `compute_checksum(&data)`, then called `client.put_file(path, data.clone())`. The clone existed only because the next statement needed `data.len()` for the sync-state record. Captured `data.len() as u64` into `len` first, moved `data` into `put_file`, and used `len` afterwards — one full byte copy avoided per uploaded file.
|
||||||
|
2. **Bug: Google Tasks sync silently drops metadata-write failures** (google_tasks.rs:361, 377) — both `.listdata.json` and `.onyx-workspace.json` were written via `if let Ok(meta_content) = serde_json::to_string_pretty(...) { let _ = atomic_write(...); }`, so a serialization or atomic-write error returned `Ok(GoogleSyncResult { downloaded: N, errors: [] })` even though list/workspace ordering was never persisted. Both writes now push their errors into the `errors` vec already returned in `GoogleSyncResult`.
|
||||||
|
3. **Code quality: unreachable dead-error path in storage dedup** (storage.rs:447) — the dedup loop computed `Option<Task>` from each `by_id` group and then `ok_or_else(|| Error::InvalidData("Empty dedup entries for task"))?`. `by_id` is only populated by `entry(uuid).or_default().push(entry)`, so every group has ≥1 element and the `None` branch is unreachable. Replaced the `Option`+`?` with direct `expect` calls (one per branch) that document the non-empty invariant; the loop now yields `Task` directly.
|
||||||
|
|
||||||
## 2026-04-25
|
## 2026-04-25
|
||||||
|
|
||||||
Found and fixed 3 issues:
|
Found and fixed 3 issues:
|
||||||
|
|
|
||||||
|
|
@ -358,8 +358,15 @@ pub async fn sync_google_tasks(
|
||||||
list_meta.task_order = task_order;
|
list_meta.task_order = task_order;
|
||||||
list_meta.updated_at = Utc::now();
|
list_meta.updated_at = Utc::now();
|
||||||
|
|
||||||
if let Ok(meta_content) = serde_json::to_string_pretty(&list_meta) {
|
match serde_json::to_string_pretty(&list_meta) {
|
||||||
let _ = atomic_write(&listdata_path, meta_content.as_bytes());
|
Ok(meta_content) => {
|
||||||
|
if let Err(e) = atomic_write(&listdata_path, meta_content.as_bytes()) {
|
||||||
|
errors.push(format!("Failed to write metadata for list '{}': {}", gt_list.title, e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
errors.push(format!("Failed to serialize metadata for list '{}': {}", gt_list.title, e));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -374,8 +381,15 @@ pub async fn sync_google_tasks(
|
||||||
RootMetadata::default()
|
RootMetadata::default()
|
||||||
};
|
};
|
||||||
root_meta.list_order = new_list_order;
|
root_meta.list_order = new_list_order;
|
||||||
if let Ok(meta_content) = serde_json::to_string_pretty(&root_meta) {
|
match serde_json::to_string_pretty(&root_meta) {
|
||||||
let _ = atomic_write(&root_meta_path, meta_content.as_bytes());
|
Ok(meta_content) => {
|
||||||
|
if let Err(e) = atomic_write(&root_meta_path, meta_content.as_bytes()) {
|
||||||
|
errors.push(format!("Failed to write workspace metadata: {}", e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
errors.push(format!("Failed to serialize workspace metadata: {}", e));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(GoogleSyncResult { downloaded, errors })
|
Ok(GoogleSyncResult { downloaded, errors })
|
||||||
|
|
|
||||||
|
|
@ -454,7 +454,9 @@ impl Storage for FileSystemStorage {
|
||||||
|
|
||||||
let mut tasks = Vec::new();
|
let mut tasks = Vec::new();
|
||||||
for (_id, entries) in by_id {
|
for (_id, entries) in by_id {
|
||||||
let winner = if entries.len() > 1 {
|
// `by_id` only inserts non-empty groups, so each `entries` has at
|
||||||
|
// least one element.
|
||||||
|
let task = if entries.len() > 1 {
|
||||||
// Read mtime once per file so sort_by doesn't hit the filesystem
|
// Read mtime once per file so sort_by doesn't hit the filesystem
|
||||||
// O(n log n) times and can't produce inconsistent orderings if a
|
// O(n log n) times and can't produce inconsistent orderings if a
|
||||||
// file is touched mid-sort.
|
// file is touched mid-sort.
|
||||||
|
|
@ -479,12 +481,14 @@ impl Storage for FileSystemStorage {
|
||||||
eprintln!("Warning: failed to remove stale duplicate task file {:?}: {}", stale_path, e);
|
eprintln!("Warning: failed to remove stale duplicate task file {:?}: {}", stale_path, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
with_mtime.into_iter().next().map(|(_, t, _)| t)
|
let (_, t, _) = with_mtime.into_iter().next()
|
||||||
|
.expect("dedup group is non-empty after drain(1..)");
|
||||||
|
t
|
||||||
} else {
|
} else {
|
||||||
entries.into_iter().next().map(|(_, t)| t)
|
let (_, t) = entries.into_iter().next()
|
||||||
|
.expect("dedup group is non-empty");
|
||||||
|
t
|
||||||
};
|
};
|
||||||
let task = winner
|
|
||||||
.ok_or_else(|| Error::InvalidData("Empty dedup entries for task".to_string()))?;
|
|
||||||
tasks.push(task);
|
tasks.push(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -724,19 +724,20 @@ async fn execute_action(
|
||||||
Err(e) => return Err(e.into()),
|
Err(e) => return Err(e.into()),
|
||||||
};
|
};
|
||||||
let checksum = compute_checksum(&data);
|
let checksum = compute_checksum(&data);
|
||||||
|
let len = data.len() as u64;
|
||||||
|
|
||||||
if let Some(parent) = path_parent(path) {
|
if let Some(parent) = path_parent(path) {
|
||||||
client.ensure_dir(parent).await?;
|
client.ensure_dir(parent).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
report(&format!(" ^ Uploading {}", path));
|
report(&format!(" ^ Uploading {}", path));
|
||||||
client.put_file(path, data.clone()).await?;
|
client.put_file(path, data).await?;
|
||||||
|
|
||||||
// Record in sync state using local file metadata
|
// Record in sync state using local file metadata
|
||||||
let modified = std::fs::metadata(&local_path).ok()
|
let modified = std::fs::metadata(&local_path).ok()
|
||||||
.and_then(|m| m.modified().ok())
|
.and_then(|m| m.modified().ok())
|
||||||
.map(|t| { let dt: DateTime<Utc> = t.into(); dt.to_rfc3339() });
|
.map(|t| { let dt: DateTime<Utc> = t.into(); dt.to_rfc3339() });
|
||||||
sync_state.record_file(path, &checksum, modified.as_deref(), data.len() as u64);
|
sync_state.record_file(path, &checksum, modified.as_deref(), len);
|
||||||
}
|
}
|
||||||
|
|
||||||
SyncAction::Conflict { path } => {
|
SyncAction::Conflict { path } => {
|
||||||
|
|
|
||||||
|
|
@ -523,9 +523,9 @@ Key test areas:
|
||||||
|
|
||||||
## Thread Safety
|
## Thread Safety
|
||||||
|
|
||||||
The `Storage` trait requires `Send + Sync`, and `TaskRepository` wraps `Box<dyn Storage + Send + Sync>`, so repository instances can be shared across threads behind a `Mutex`. The Tauri GUI uses `Mutex<AppState>` for this purpose.
|
`TaskRepository` holds its storage as `Box<dyn Storage + Send + Sync>`, so any concrete storage implementation passed in must be `Send + Sync`. Repository instances can be shared across threads behind a `Mutex` — the Tauri GUI uses `Mutex<AppState>` for this purpose.
|
||||||
|
|
||||||
For concurrent access:
|
For concurrent access:
|
||||||
|
|
||||||
1. Wrap `TaskRepository` in `Mutex` or `RwLock` (the Tauri app does this)
|
1. Wrap `TaskRepository` in `Mutex` or `RwLock` (the Tauri app does this)
|
||||||
2. Or create separate repository instances per thread (file system handles locking)
|
2. Or create separate repository instances per thread. Note that `FileSystemStorage` does not coordinate writes between processes — concurrent multi-process writes to the same workspace are not supported outside the WebDAV sync flow, which uses a `.sync.lock` file.
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue