fix(sync): use atomic_write for all payload file writes during sync
Sync's conflict-resolution and download paths wrote the local file with plain fs::write. A crash or I/O error mid-write left a truncated .md or .listdata.json that would then fail YAML/JSON parsing on the next list_tasks. All other callers in this crate use atomic_write; route the four sync call sites through it for consistency and crash safety.
This commit is contained in:
parent
c134624839
commit
b437b0b7b2
|
|
@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize};
|
||||||
use sha2::{Sha256, Digest};
|
use sha2::{Sha256, Digest};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::storage::{ListMetadata, TaskFrontmatter};
|
use crate::storage::{atomic_write, ListMetadata, TaskFrontmatter};
|
||||||
use crate::webdav::WebDavClient;
|
use crate::webdav::WebDavClient;
|
||||||
|
|
||||||
/// File-based lock to prevent concurrent sync operations on the same workspace.
|
/// File-based lock to prevent concurrent sync operations on the same workspace.
|
||||||
|
|
@ -743,8 +743,9 @@ async fn execute_action(
|
||||||
} else {
|
} else {
|
||||||
report(&format!(" ! Conflict: remote wins for {}, recovering local as duplicate", path));
|
report(&format!(" ! Conflict: remote wins for {}, recovering local as duplicate", path));
|
||||||
|
|
||||||
// Remote wins: overwrite local with remote content
|
// Remote wins: overwrite local with remote content. Atomic
|
||||||
std::fs::write(&local_path, &remote_data)?;
|
// so a crash mid-sync cannot leave a truncated file behind.
|
||||||
|
atomic_write(&local_path, &remote_data)?;
|
||||||
let modified = std::fs::metadata(&local_path).ok()
|
let modified = std::fs::metadata(&local_path).ok()
|
||||||
.and_then(|m| m.modified().ok())
|
.and_then(|m| m.modified().ok())
|
||||||
.map(|t| { let dt: DateTime<Utc> = t.into(); dt.to_rfc3339() });
|
.map(|t| { let dt: DateTime<Utc> = t.into(); dt.to_rfc3339() });
|
||||||
|
|
@ -775,7 +776,7 @@ async fn execute_action(
|
||||||
let list_dir = workspace_path.join(parts[0]);
|
let list_dir = workspace_path.join(parts[0]);
|
||||||
let dup_filename = format!("{}.md", new_id);
|
let dup_filename = format!("{}.md", new_id);
|
||||||
let dup_path = list_dir.join(&dup_filename);
|
let dup_path = list_dir.join(&dup_filename);
|
||||||
std::fs::write(&dup_path, &new_content)?;
|
atomic_write(&dup_path, new_content.as_bytes())?;
|
||||||
|
|
||||||
// Insert new task adjacent to original in .listdata.json.
|
// Insert new task adjacent to original in .listdata.json.
|
||||||
// If metadata update fails, remove the duplicate file to
|
// If metadata update fails, remove the duplicate file to
|
||||||
|
|
@ -791,7 +792,7 @@ async fn execute_action(
|
||||||
.unwrap_or(metadata.task_order.len());
|
.unwrap_or(metadata.task_order.len());
|
||||||
metadata.task_order.insert(insert_pos, new_id);
|
metadata.task_order.insert(insert_pos, new_id);
|
||||||
let json = serde_json::to_string_pretty(&metadata)?;
|
let json = serde_json::to_string_pretty(&metadata)?;
|
||||||
std::fs::write(&listdata_path, json)?;
|
atomic_write(&listdata_path, json.as_bytes())?;
|
||||||
Ok(())
|
Ok(())
|
||||||
})();
|
})();
|
||||||
if let Err(e) = metadata_updated {
|
if let Err(e) = metadata_updated {
|
||||||
|
|
@ -816,7 +817,7 @@ async fn execute_action(
|
||||||
if let Some(parent) = local_path.parent() {
|
if let Some(parent) = local_path.parent() {
|
||||||
std::fs::create_dir_all(parent)?;
|
std::fs::create_dir_all(parent)?;
|
||||||
}
|
}
|
||||||
std::fs::write(&local_path, &data)?;
|
atomic_write(&local_path, &data)?;
|
||||||
|
|
||||||
// Record remote's last_modified so next diff won't see a timestamp mismatch
|
// Record remote's last_modified so next diff won't see a timestamp mismatch
|
||||||
let modified = remote_meta.get(path.as_str()).and_then(|r| r.last_modified.clone());
|
let modified = remote_meta.get(path.as_str()).and_then(|r| r.last_modified.clone());
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue