Compare commits

..

1 commit

Author SHA1 Message Date
Claude e3a40c46f1
chore(settings): allow nx71726.your-storageshare.de for Nextcloud Talk
Adds the Nextcloud server to the network allowlist so curl can reach it
for sending chat notifications.

https://claude.ai/code/session_01C7jV6wrzJVhHRKWsq87XwB
2026-04-27 15:25:08 +00:00
6 changed files with 24 additions and 40 deletions

View file

@ -1,4 +1,15 @@
{ {
"permissions": {
"allow": [
"WebFetch(domain:nx71726.your-storageshare.de)",
"Bash(curl * nx71726.your-storageshare.de*)"
]
},
"sandbox": {
"network": {
"allowedDomains": ["nx71726.your-storageshare.de"]
}
},
"hooks": { "hooks": {
"PreToolUse": [ "PreToolUse": [
{ {

View file

@ -1,13 +1,5 @@
# Audit Log # Audit Log
## 2026-04-27
Found and fixed 3 issues:
1. **Perf: needless clone of upload payload** (sync.rs:733) — the `SyncAction::Upload` arm read the file into `data`, computed `compute_checksum(&data)`, then called `client.put_file(path, data.clone())`. The clone existed only because the next statement needed `data.len()` for the sync-state record. Captured `data.len() as u64` into `len` first, moved `data` into `put_file`, and used `len` afterwards — one full byte copy avoided per uploaded file.
2. **Bug: Google Tasks sync silently drops metadata-write failures** (google_tasks.rs:361, 377) — both `.listdata.json` and `.onyx-workspace.json` were written via `if let Ok(meta_content) = serde_json::to_string_pretty(...) { let _ = atomic_write(...); }`, so a serialization or atomic-write error returned `Ok(GoogleSyncResult { downloaded: N, errors: [] })` even though list/workspace ordering was never persisted. Both writes now push their errors into the `errors` vec already returned in `GoogleSyncResult`.
3. **Code quality: unreachable dead-error path in storage dedup** (storage.rs:447) — the dedup loop computed `Option<Task>` from each `by_id` group and then `ok_or_else(|| Error::InvalidData("Empty dedup entries for task"))?`. `by_id` is only populated by `entry(uuid).or_default().push(entry)`, so every group has ≥1 element and the `None` branch is unreachable. Replaced the `Option`+`?` with direct `expect` calls (one per branch) that document the non-empty invariant; the loop now yields `Task` directly.
## 2026-04-25 ## 2026-04-25
Found and fixed 3 issues: Found and fixed 3 issues:

View file

@ -358,15 +358,8 @@ pub async fn sync_google_tasks(
list_meta.task_order = task_order; list_meta.task_order = task_order;
list_meta.updated_at = Utc::now(); list_meta.updated_at = Utc::now();
match serde_json::to_string_pretty(&list_meta) { if let Ok(meta_content) = serde_json::to_string_pretty(&list_meta) {
Ok(meta_content) => { let _ = atomic_write(&listdata_path, meta_content.as_bytes());
if let Err(e) = atomic_write(&listdata_path, meta_content.as_bytes()) {
errors.push(format!("Failed to write metadata for list '{}': {}", gt_list.title, e));
}
}
Err(e) => {
errors.push(format!("Failed to serialize metadata for list '{}': {}", gt_list.title, e));
}
} }
} }
@ -381,15 +374,8 @@ pub async fn sync_google_tasks(
RootMetadata::default() RootMetadata::default()
}; };
root_meta.list_order = new_list_order; root_meta.list_order = new_list_order;
match serde_json::to_string_pretty(&root_meta) { if let Ok(meta_content) = serde_json::to_string_pretty(&root_meta) {
Ok(meta_content) => { let _ = atomic_write(&root_meta_path, meta_content.as_bytes());
if let Err(e) = atomic_write(&root_meta_path, meta_content.as_bytes()) {
errors.push(format!("Failed to write workspace metadata: {}", e));
}
}
Err(e) => {
errors.push(format!("Failed to serialize workspace metadata: {}", e));
}
} }
Ok(GoogleSyncResult { downloaded, errors }) Ok(GoogleSyncResult { downloaded, errors })

View file

@ -454,9 +454,7 @@ impl Storage for FileSystemStorage {
let mut tasks = Vec::new(); let mut tasks = Vec::new();
for (_id, entries) in by_id { for (_id, entries) in by_id {
// `by_id` only inserts non-empty groups, so each `entries` has at let winner = if entries.len() > 1 {
// least one element.
let task = if entries.len() > 1 {
// Read mtime once per file so sort_by doesn't hit the filesystem // Read mtime once per file so sort_by doesn't hit the filesystem
// O(n log n) times and can't produce inconsistent orderings if a // O(n log n) times and can't produce inconsistent orderings if a
// file is touched mid-sort. // file is touched mid-sort.
@ -481,14 +479,12 @@ impl Storage for FileSystemStorage {
eprintln!("Warning: failed to remove stale duplicate task file {:?}: {}", stale_path, e); eprintln!("Warning: failed to remove stale duplicate task file {:?}: {}", stale_path, e);
} }
} }
let (_, t, _) = with_mtime.into_iter().next() with_mtime.into_iter().next().map(|(_, t, _)| t)
.expect("dedup group is non-empty after drain(1..)");
t
} else { } else {
let (_, t) = entries.into_iter().next() entries.into_iter().next().map(|(_, t)| t)
.expect("dedup group is non-empty");
t
}; };
let task = winner
.ok_or_else(|| Error::InvalidData("Empty dedup entries for task".to_string()))?;
tasks.push(task); tasks.push(task);
} }

View file

@ -724,20 +724,19 @@ async fn execute_action(
Err(e) => return Err(e.into()), Err(e) => return Err(e.into()),
}; };
let checksum = compute_checksum(&data); let checksum = compute_checksum(&data);
let len = data.len() as u64;
if let Some(parent) = path_parent(path) { if let Some(parent) = path_parent(path) {
client.ensure_dir(parent).await?; client.ensure_dir(parent).await?;
} }
report(&format!(" ^ Uploading {}", path)); report(&format!(" ^ Uploading {}", path));
client.put_file(path, data).await?; client.put_file(path, data.clone()).await?;
// Record in sync state using local file metadata // Record in sync state using local file metadata
let modified = std::fs::metadata(&local_path).ok() let modified = std::fs::metadata(&local_path).ok()
.and_then(|m| m.modified().ok()) .and_then(|m| m.modified().ok())
.map(|t| { let dt: DateTime<Utc> = t.into(); dt.to_rfc3339() }); .map(|t| { let dt: DateTime<Utc> = t.into(); dt.to_rfc3339() });
sync_state.record_file(path, &checksum, modified.as_deref(), len); sync_state.record_file(path, &checksum, modified.as_deref(), data.len() as u64);
} }
SyncAction::Conflict { path } => { SyncAction::Conflict { path } => {

View file

@ -523,9 +523,9 @@ Key test areas:
## Thread Safety ## Thread Safety
`TaskRepository` holds its storage as `Box<dyn Storage + Send + Sync>`, so any concrete storage implementation passed in must be `Send + Sync`. Repository instances can be shared across threads behind a `Mutex` — the Tauri GUI uses `Mutex<AppState>` for this purpose. The `Storage` trait requires `Send + Sync`, and `TaskRepository` wraps `Box<dyn Storage + Send + Sync>`, so repository instances can be shared across threads behind a `Mutex`. The Tauri GUI uses `Mutex<AppState>` for this purpose.
For concurrent access: For concurrent access:
1. Wrap `TaskRepository` in `Mutex` or `RwLock` (the Tauri app does this) 1. Wrap `TaskRepository` in `Mutex` or `RwLock` (the Tauri app does this)
2. Or create separate repository instances per thread. Note that `FileSystemStorage` does not coordinate writes between processes — concurrent multi-process writes to the same workspace are not supported outside the WebDAV sync flow, which uses a `.sync.lock` file. 2. Or create separate repository instances per thread (file system handles locking)