Harden codebase: fix security, quality, and maintainability issues

- Replace dangerous unwrap() with proper error handling (storage.rs, webdav.rs)
- Add atomic writes (temp + rename) for config, sync state, and metadata files
- Add path traversal validation in sync executor
- Add workspace path validation in Tauri commands
- Add input size limits for task titles, descriptions, and list names
- Add file download size limit (10MB) to WebDAV get_file
- Fix move_task rollback to log failures instead of silently ignoring
- Fix JSON serialization unwrap in Tauri create_remote_workspace
- Fix swallowed errors in sync queue backup, metadata writes, sync state load
- Extract hardcoded strings into named constants (filenames, extensions, limits)
- Use REQUEST_TIMEOUT/CONNECT_TIMEOUT constants in WebDAV client builder
- Fix frontend: clear taskStack when viewed task is deleted or list is switched
- Fix frontend: surface credential loading and focus listener errors

https://claude.ai/code/session_01F67yfLLmSaBtT7aKKNus1M
This commit is contained in:
Claude 2026-04-06 10:17:30 +00:00
parent b45f39c96c
commit a12deb5182
No known key found for this signature in database
9 changed files with 177 additions and 36 deletions

View file

@ -41,6 +41,33 @@ fn lock_state(state: &Mutex<AppState>) -> Result<std::sync::MutexGuard<'_, AppSt
state.lock().map_err(|e| format!("State lock poisoned: {}", e)) state.lock().map_err(|e| format!("State lock poisoned: {}", e))
} }
/// Validate that a workspace path is a reasonable directory and not a system path.
fn validate_workspace_path(path: &str) -> Result<(), String> {
let p = PathBuf::from(path);
// Reject obviously dangerous paths
let normalized = p.to_string_lossy();
if normalized.is_empty() {
return Err("Workspace path cannot be empty".into());
}
// Reject paths that are system root directories
#[cfg(unix)]
{
let forbidden = ["/", "/etc", "/usr", "/bin", "/sbin", "/var", "/proc", "/sys", "/dev"];
let canonical = normalized.trim_end_matches('/');
if forbidden.contains(&canonical) {
return Err(format!("Cannot use system directory as workspace: {}", path));
}
}
#[cfg(windows)]
{
let upper = normalized.to_uppercase();
if upper.len() <= 3 && upper.ends_with(":\\") || upper.ends_with(":") {
return Err(format!("Cannot use drive root as workspace: {}", path));
}
}
Ok(())
}
/// Serializable sync result for the frontend. /// Serializable sync result for the frontend.
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
struct SyncResult { struct SyncResult {
@ -120,6 +147,7 @@ fn add_workspace(
path: String, path: String,
state: State<'_, Mutex<AppState>>, state: State<'_, Mutex<AppState>>,
) -> Result<(), String> { ) -> Result<(), String> {
validate_workspace_path(&path)?;
let mut s = lock_state(&state)?; let mut s = lock_state(&state)?;
let ws = WorkspaceConfig::new(name, PathBuf::from(&path)); let ws = WorkspaceConfig::new(name, PathBuf::from(&path));
let id = s.config.add_workspace(ws); let id = s.config.add_workspace(ws);
@ -243,6 +271,7 @@ async fn rename_workspace(
#[tauri::command] #[tauri::command]
fn init_workspace(path: String) -> Result<(), String> { fn init_workspace(path: String) -> Result<(), String> {
validate_workspace_path(&path)?;
TaskRepository::init(PathBuf::from(path)) TaskRepository::init(PathBuf::from(path))
.map(|_| ()) .map(|_| ())
.map_err(|e| e.to_string()) .map_err(|e| e.to_string())
@ -625,7 +654,7 @@ async fn create_remote_workspace(
} else { } else {
format!("{}/{}", path.trim_end_matches('/'), ".onyx-workspace.json") format!("{}/{}", path.trim_end_matches('/'), ".onyx-workspace.json")
}; };
client.put_file(&file_path, serde_json::to_string_pretty(&metadata).unwrap().into_bytes()) client.put_file(&file_path, serde_json::to_string_pretty(&metadata).map_err(|e| e.to_string())?.into_bytes())
.await .await
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
Ok(()) Ok(())

View file

@ -26,7 +26,9 @@
invoke<[string, string]>("load_credentials", { domain }).then(([u, p]) => { invoke<[string, string]>("load_credentials", { domain }).then(([u, p]) => {
webdavUser = u; webdavUser = u;
webdavPass = p; webdavPass = p;
}).catch(() => {}); }).catch((e) => {
console.warn("Failed to load credentials:", e);
});
} catch {} } catch {}
}); });

View file

@ -18,6 +18,13 @@
let parentTask = $derived(taskStack.length >= 1 ? app.tasks.find(t => t.id === taskStack[0]) ?? null : null); let parentTask = $derived(taskStack.length >= 1 ? app.tasks.find(t => t.id === taskStack[0]) ?? null : null);
let subtaskDetail = $derived(taskStack.length >= 2 ? app.tasks.find(t => t.id === taskStack[1]) ?? null : null); let subtaskDetail = $derived(taskStack.length >= 2 ? app.tasks.find(t => t.id === taskStack[1]) ?? null : null);
// Clear taskStack when the viewed task no longer exists (e.g. deleted or list switched)
$effect(() => {
if (taskStack.length > 0 && !parentTask) {
taskStack = [];
}
});
function openTask(task: Task) { function openTask(task: Task) {
taskStack = [task.id]; taskStack = [task.id];
} }
@ -282,7 +289,7 @@
<div class="flex-1 overflow-y-auto py-2"> <div class="flex-1 overflow-y-auto py-2">
{#each app.lists as list (list.id)} {#each app.lists as list (list.id)}
<button <button
onclick={() => { app.selectList(list.id); closeDrawer(); }} onclick={() => { app.selectList(list.id); taskStack = []; closeDrawer(); }}
class="group flex w-full items-center gap-2 px-5 py-2.5 text-left text-sm hover:bg-black/5 dark:hover:bg-white/10 {list.id === app.activeListId ? 'font-bold' : ''}" class="group flex w-full items-center gap-2 px-5 py-2.5 text-left text-sm hover:bg-black/5 dark:hover:bg-white/10 {list.id === app.activeListId ? 'font-bold' : ''}"
> >
{#if list.id === app.activeListId} {#if list.id === app.activeListId}

View file

@ -369,7 +369,9 @@ function startAutoSync() {
_syncInterval = setInterval(triggerSync, syncIntervalSecs * 1000); _syncInterval = setInterval(triggerSync, syncIntervalSecs * 1000);
getCurrentWindow().onFocusChanged(({ payload: focused }) => { getCurrentWindow().onFocusChanged(({ payload: focused }) => {
if (focused && Date.now() - lastSyncTime > SYNC_FOCUS_THRESHOLD_MS) triggerSync(); if (focused && Date.now() - lastSyncTime > SYNC_FOCUS_THRESHOLD_MS) triggerSync();
}).then((unlisten) => { _focusUnlisten = unlisten; }); }).then((unlisten) => { _focusUnlisten = unlisten; }).catch((e) => {
console.warn("Failed to set up focus listener:", e);
});
} }
function stopAutoSync() { function stopAutoSync() {

View file

@ -115,7 +115,10 @@ impl AppConfig {
std::fs::create_dir_all(parent)?; std::fs::create_dir_all(parent)?;
} }
let content = serde_json::to_string_pretty(&self)?; let content = serde_json::to_string_pretty(&self)?;
std::fs::write(path, content)?; // Atomic write: write to temp file then rename to prevent corruption on crash
let temp = path.with_extension("tmp");
std::fs::write(&temp, &content)?;
std::fs::rename(&temp, path)?;
Ok(()) Ok(())
} }

View file

@ -78,7 +78,9 @@ impl TaskRepository {
self.storage.write_task(to_list_id, &task)?; self.storage.write_task(to_list_id, &task)?;
// If delete from source fails, roll back by removing the copy from destination // If delete from source fails, roll back by removing the copy from destination
if let Err(e) = self.storage.delete_task(from_list_id, task_id) { if let Err(e) = self.storage.delete_task(from_list_id, task_id) {
let _ = self.storage.delete_task(to_list_id, task_id); if let Err(rollback_err) = self.storage.delete_task(to_list_id, task_id) {
eprintln!("Warning: move_task rollback failed: {}", rollback_err);
}
return Err(e); return Err(e);
} }
Ok(()) Ok(())

View file

@ -7,6 +7,30 @@ use uuid::Uuid;
use crate::error::{Error, Result}; use crate::error::{Error, Result};
use crate::models::{Task, TaskList, TaskStatus}; use crate::models::{Task, TaskList, TaskStatus};
/// Maximum allowed length for task titles.
const MAX_TITLE_LENGTH: usize = 500;
/// Maximum allowed length for task descriptions.
const MAX_DESCRIPTION_LENGTH: usize = 1_000_000; // 1 MB
/// Maximum allowed length for list names.
const MAX_LIST_NAME_LENGTH: usize = 255;
/// Workspace root metadata filename.
const WORKSPACE_METADATA_FILE: &str = ".onyx-workspace.json";
/// Per-list metadata filename.
const LIST_METADATA_FILE: &str = ".listdata.json";
/// Task file extension.
const TASK_FILE_EXT: &str = "md";
/// Default version for tasks without a version field (legacy files).
const DEFAULT_TASK_VERSION: u64 = 1;
/// Write data to a temporary file then atomically rename to the target path.
/// Prevents corruption from partial writes on crash.
fn atomic_write(path: &Path, content: &[u8]) -> std::io::Result<()> {
let temp = path.with_extension("tmp");
fs::write(&temp, content)?;
fs::rename(&temp, path)?;
Ok(())
}
/// Metadata stored in root .onyx-workspace.json /// Metadata stored in root .onyx-workspace.json
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RootMetadata { pub struct RootMetadata {
@ -50,7 +74,7 @@ impl ListMetadata {
} }
fn is_false(v: &bool) -> bool { !v } fn is_false(v: &bool) -> bool { !v }
fn default_version() -> u64 { 1 } fn default_version() -> u64 { DEFAULT_TASK_VERSION }
/// Frontmatter for task markdown files /// Frontmatter for task markdown files
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@ -126,7 +150,7 @@ impl FileSystemStorage {
} }
fn metadata_path(&self) -> PathBuf { fn metadata_path(&self) -> PathBuf {
self.root_path.join(".onyx-workspace.json") self.root_path.join(WORKSPACE_METADATA_FILE)
} }
fn list_dir_path(&self, list_id: Uuid) -> Result<PathBuf> { fn list_dir_path(&self, list_id: Uuid) -> Result<PathBuf> {
@ -137,7 +161,7 @@ impl FileSystemStorage {
let entry = entry?; let entry = entry?;
let path = entry.path(); let path = entry.path();
if path.is_dir() { if path.is_dir() {
let listdata_path = path.join(".listdata.json"); let listdata_path = path.join(LIST_METADATA_FILE);
if listdata_path.exists() { if listdata_path.exists() {
let content = fs::read_to_string(&listdata_path)?; let content = fs::read_to_string(&listdata_path)?;
let list_metadata: ListMetadata = serde_json::from_str(&content)?; let list_metadata: ListMetadata = serde_json::from_str(&content)?;
@ -191,7 +215,7 @@ impl FileSystemStorage {
} else { } else {
safe_title safe_title
}; };
list_dir.join(format!("{}.md", filename)) list_dir.join(format!("{}.{}", filename, TASK_FILE_EXT))
} }
fn parse_markdown_with_frontmatter(&self, content: &str) -> Result<(TaskFrontmatter, String)> { fn parse_markdown_with_frontmatter(&self, content: &str) -> Result<(TaskFrontmatter, String)> {
@ -247,7 +271,7 @@ impl FileSystemStorage {
fn write_root_metadata_internal(&self, metadata: &RootMetadata) -> Result<()> { fn write_root_metadata_internal(&self, metadata: &RootMetadata) -> Result<()> {
let path = self.metadata_path(); let path = self.metadata_path();
let content = serde_json::to_string_pretty(&metadata)?; let content = serde_json::to_string_pretty(&metadata)?;
fs::write(&path, content)?; atomic_write(&path, content.as_bytes())?;
Ok(()) Ok(())
} }
} }
@ -263,7 +287,7 @@ impl Storage for FileSystemStorage {
let entry = entry?; let entry = entry?;
let path = entry.path(); let path = entry.path();
if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some("md") { if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some(TASK_FILE_EXT) {
let content = fs::read_to_string(&path)?; let content = fs::read_to_string(&path)?;
let (frontmatter, description) = self.parse_markdown_with_frontmatter(&content)?; let (frontmatter, description) = self.parse_markdown_with_frontmatter(&content)?;
@ -291,6 +315,13 @@ impl Storage for FileSystemStorage {
} }
fn write_task(&mut self, list_id: Uuid, task: &Task) -> Result<()> { fn write_task(&mut self, list_id: Uuid, task: &Task) -> Result<()> {
if task.title.len() > MAX_TITLE_LENGTH {
return Err(Error::InvalidData(format!("Task title too long ({} chars, max {})", task.title.len(), MAX_TITLE_LENGTH)));
}
if task.description.len() > MAX_DESCRIPTION_LENGTH {
return Err(Error::InvalidData(format!("Task description too long ({} bytes, max {})", task.description.len(), MAX_DESCRIPTION_LENGTH)));
}
let list_dir = self.list_dir_path(list_id)?; let list_dir = self.list_dir_path(list_id)?;
let task_path = self.task_file_path(&list_dir, task); let task_path = self.task_file_path(&list_dir, task);
@ -299,7 +330,7 @@ impl Storage for FileSystemStorage {
let entry = entry?; let entry = entry?;
let path = entry.path(); let path = entry.path();
if path == task_path { continue; } if path == task_path { continue; }
if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some("md") { if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some(TASK_FILE_EXT) {
if let Ok(content) = fs::read_to_string(&path) { if let Ok(content) = fs::read_to_string(&path) {
if let Ok((fm, _)) = self.parse_markdown_with_frontmatter(&content) { if let Ok((fm, _)) = self.parse_markdown_with_frontmatter(&content) {
if fm.id == task.id { if fm.id == task.id {
@ -352,7 +383,7 @@ impl Storage for FileSystemStorage {
let entry = entry?; let entry = entry?;
let path = entry.path(); let path = entry.path();
if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some("md") { if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some(TASK_FILE_EXT) {
let content = fs::read_to_string(&path)?; let content = fs::read_to_string(&path)?;
let (frontmatter, description) = self.parse_markdown_with_frontmatter(&content)?; let (frontmatter, description) = self.parse_markdown_with_frontmatter(&content)?;
@ -387,10 +418,13 @@ impl Storage for FileSystemStorage {
if entries.len() > 1 { if entries.len() > 1 {
entries.sort_by(|a, b| b.1.version.cmp(&a.1.version)); entries.sort_by(|a, b| b.1.version.cmp(&a.1.version));
for (stale_path, _) in entries.drain(1..) { for (stale_path, _) in entries.drain(1..) {
let _ = fs::remove_file(&stale_path); if let Err(e) = fs::remove_file(&stale_path) {
eprintln!("Warning: failed to remove stale duplicate task file {:?}: {}", stale_path, e);
} }
} }
let (_, task) = entries.into_iter().next().unwrap(); }
let (_, task) = entries.into_iter().next()
.ok_or_else(|| Error::InvalidData("Empty dedup entries for task".to_string()))?;
tasks.push(task); tasks.push(task);
} }
@ -407,6 +441,12 @@ impl Storage for FileSystemStorage {
} }
fn create_list(&mut self, name: String) -> Result<TaskList> { fn create_list(&mut self, name: String) -> Result<TaskList> {
if name.trim().is_empty() {
return Err(Error::InvalidData("List name cannot be empty".to_string()));
}
if name.len() > MAX_LIST_NAME_LENGTH {
return Err(Error::InvalidData(format!("List name too long ({} chars, max {})", name.len(), MAX_LIST_NAME_LENGTH)));
}
let list_dir = self.list_dir_path_by_name(&name)?; let list_dir = self.list_dir_path_by_name(&name)?;
if list_dir.exists() { if list_dir.exists() {
@ -420,7 +460,7 @@ impl Storage for FileSystemStorage {
let metadata_path = list_dir.join(".listdata.json"); let metadata_path = list_dir.join(".listdata.json");
let content = serde_json::to_string_pretty(&list_metadata)?; let content = serde_json::to_string_pretty(&list_metadata)?;
fs::write(&metadata_path, content)?; atomic_write(&metadata_path, content.as_bytes())?;
// Add to root metadata // Add to root metadata
let mut root_metadata = self.read_root_metadata_internal()?; let mut root_metadata = self.read_root_metadata_internal()?;
@ -453,7 +493,7 @@ impl Storage for FileSystemStorage {
let path = entry.path(); let path = entry.path();
if path.is_dir() { if path.is_dir() {
let listdata_path = path.join(".listdata.json"); let listdata_path = path.join(LIST_METADATA_FILE);
if listdata_path.exists() { if listdata_path.exists() {
let content = fs::read_to_string(&listdata_path)?; let content = fs::read_to_string(&listdata_path)?;
let list_metadata: ListMetadata = serde_json::from_str(&content)?; let list_metadata: ListMetadata = serde_json::from_str(&content)?;
@ -508,6 +548,12 @@ impl Storage for FileSystemStorage {
} }
fn rename_list(&mut self, list_id: Uuid, new_name: String) -> Result<()> { fn rename_list(&mut self, list_id: Uuid, new_name: String) -> Result<()> {
if new_name.trim().is_empty() {
return Err(Error::InvalidData("List name cannot be empty".to_string()));
}
if new_name.len() > MAX_LIST_NAME_LENGTH {
return Err(Error::InvalidData(format!("List name too long ({} chars, max {})", new_name.len(), MAX_LIST_NAME_LENGTH)));
}
let old_dir = self.list_dir_path(list_id)?; let old_dir = self.list_dir_path(list_id)?;
let new_dir = self.list_dir_path_by_name(&new_name)?; let new_dir = self.list_dir_path_by_name(&new_name)?;
@ -523,7 +569,7 @@ impl Storage for FileSystemStorage {
let mut metadata: ListMetadata = serde_json::from_str(&content)?; let mut metadata: ListMetadata = serde_json::from_str(&content)?;
metadata.updated_at = Utc::now(); metadata.updated_at = Utc::now();
let json = serde_json::to_string_pretty(&metadata)?; let json = serde_json::to_string_pretty(&metadata)?;
fs::write(&metadata_path, json)?; atomic_write(&metadata_path, json.as_bytes())?;
Ok(()) Ok(())
} }
@ -554,7 +600,7 @@ impl Storage for FileSystemStorage {
let metadata_path = list_dir.join(".listdata.json"); let metadata_path = list_dir.join(".listdata.json");
let content = serde_json::to_string_pretty(&metadata)?; let content = serde_json::to_string_pretty(&metadata)?;
fs::write(&metadata_path, content)?; atomic_write(&metadata_path, content.as_bytes())?;
Ok(()) Ok(())
} }
} }

View file

@ -265,7 +265,9 @@ impl OfflineQueue {
Err(e) => { Err(e) => {
eprintln!("Warning: corrupt sync queue, backing up and resetting: {}", e); eprintln!("Warning: corrupt sync queue, backing up and resetting: {}", e);
let backup = workspace_path.join(".syncqueue.json.bak"); let backup = workspace_path.join(".syncqueue.json.bak");
let _ = std::fs::copy(&queue_path, &backup); if let Err(backup_err) = std::fs::copy(&queue_path, &backup) {
eprintln!("Warning: failed to backup corrupt sync queue: {}", backup_err);
}
Self::default() Self::default()
} }
}, },
@ -281,12 +283,17 @@ impl OfflineQueue {
if self.operations.is_empty() { if self.operations.is_empty() {
// Clean up empty queue file // Clean up empty queue file
if queue_path.exists() { if queue_path.exists() {
let _ = std::fs::remove_file(&queue_path); if let Err(e) = std::fs::remove_file(&queue_path) {
eprintln!("Warning: failed to remove empty sync queue file: {}", e);
}
} }
return Ok(()); return Ok(());
} }
let content = serde_json::to_string_pretty(self)?; let content = serde_json::to_string_pretty(self)?;
std::fs::write(&queue_path, content)?; // Atomic write: write to temp then rename
let temp_path = workspace_path.join(".syncqueue.json.tmp");
std::fs::write(&temp_path, &content)?;
std::fs::rename(&temp_path, &queue_path)?;
Ok(()) Ok(())
} }
@ -349,16 +356,21 @@ pub fn compute_checksum(data: &[u8]) -> String {
format!("{:x}", hasher.finalize()) format!("{:x}", hasher.finalize())
} }
/// Workspace root metadata filename.
const WORKSPACE_METADATA_FILE: &str = ".onyx-workspace.json";
/// Per-list metadata filename.
const LIST_METADATA_FILE: &str = ".listdata.json";
/// Check if a file is syncable: *.md files and metadata files at expected depths. /// Check if a file is syncable: *.md files and metadata files at expected depths.
fn is_syncable(path: &str) -> bool { fn is_syncable(path: &str) -> bool {
let parts: Vec<&str> = path.split('/').collect(); let parts: Vec<&str> = path.split('/').collect();
let filename = parts.last().copied().unwrap_or(path); let filename = parts.last().copied().unwrap_or(path);
// .onyx-workspace.json only at workspace root (depth 1) // .onyx-workspace.json only at workspace root (depth 1)
if filename == ".onyx-workspace.json" { if filename == WORKSPACE_METADATA_FILE {
return parts.len() == 1; return parts.len() == 1;
} }
// .listdata.json only inside a list directory (depth 2) // .listdata.json only inside a list directory (depth 2)
if filename == ".listdata.json" { if filename == LIST_METADATA_FILE {
return parts.len() == 2; return parts.len() == 2;
} }
// .md files inside a list directory (depth 2) // .md files inside a list directory (depth 2)
@ -451,15 +463,27 @@ impl SyncState {
return Self::default(); return Self::default();
} }
match std::fs::read_to_string(&state_path) { match std::fs::read_to_string(&state_path) {
Ok(content) => serde_json::from_str(&content).unwrap_or_default(), Ok(content) => match serde_json::from_str(&content) {
Err(_) => Self::default(), Ok(state) => state,
Err(e) => {
eprintln!("Warning: corrupt sync state file, resetting: {}", e);
Self::default()
}
},
Err(e) => {
eprintln!("Warning: failed to read sync state file: {}", e);
Self::default()
}
} }
} }
pub fn save(&self, workspace_path: &Path) -> Result<()> { pub fn save(&self, workspace_path: &Path) -> Result<()> {
let state_path = workspace_path.join(".syncstate.json"); let state_path = workspace_path.join(".syncstate.json");
let content = serde_json::to_string_pretty(self)?; let content = serde_json::to_string_pretty(self)?;
std::fs::write(&state_path, content)?; // Atomic write: write to temp file then rename to prevent corruption on crash
let temp_path = workspace_path.join(".syncstate.json.tmp");
std::fs::write(&temp_path, &content)?;
std::fs::rename(&temp_path, &state_path)?;
Ok(()) Ok(())
} }
@ -589,6 +613,16 @@ async fn sync_workspace_inner(
Ok(result) Ok(result)
} }
/// Validate that a sync path doesn't escape the workspace via path traversal.
fn validate_sync_path(path: &str) -> Result<()> {
for component in path.split('/') {
if component == ".." || component.contains('\\') {
return Err(Error::Sync(format!("Path traversal not allowed: {}", path)));
}
}
Ok(())
}
/// Execute a single sync action. /// Execute a single sync action.
async fn execute_action( async fn execute_action(
client: &WebDavClient, client: &WebDavClient,
@ -598,6 +632,9 @@ async fn execute_action(
remote_meta: &HashMap<&str, &RemoteFileSnapshot>, remote_meta: &HashMap<&str, &RemoteFileSnapshot>,
report: &(dyn Fn(&str) + Send + Sync), report: &(dyn Fn(&str) + Send + Sync),
) -> Result<()> { ) -> Result<()> {
// Validate path before any file system operation
validate_sync_path(action.path())?;
match action { match action {
SyncAction::Upload { path } => { SyncAction::Upload { path } => {
let local_path = workspace_path.join(path.replace('/', std::path::MAIN_SEPARATOR_STR)); let local_path = workspace_path.join(path.replace('/', std::path::MAIN_SEPARATOR_STR));
@ -681,7 +718,9 @@ async fn execute_action(
.unwrap_or(metadata.task_order.len()); .unwrap_or(metadata.task_order.len());
metadata.task_order.insert(insert_pos, new_id); metadata.task_order.insert(insert_pos, new_id);
if let Ok(json) = serde_json::to_string_pretty(&metadata) { if let Ok(json) = serde_json::to_string_pretty(&metadata) {
let _ = std::fs::write(&listdata_path, json); if let Err(e) = std::fs::write(&listdata_path, json) {
eprintln!("Warning: failed to update listdata after conflict recovery: {}", e);
}
} }
} }
} }

View file

@ -5,6 +5,9 @@ use crate::error::{Error, Result};
/// Hard timeout for any WebDAV network operation. /// Hard timeout for any WebDAV network operation.
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(30); pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
const CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
/// Maximum size for file downloads (10 MB).
const MAX_FILE_BYTES: u64 = 10 * 1024 * 1024;
/// Information about a file on the remote WebDAV server. /// Information about a file on the remote WebDAV server.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -36,8 +39,8 @@ impl WebDavClient {
let base_url = base_url.trim_end_matches('/').to_string(); let base_url = base_url.trim_end_matches('/').to_string();
Self { Self {
_client: Client::builder() _client: Client::builder()
.timeout(Duration::from_secs(30)) .timeout(REQUEST_TIMEOUT)
.connect_timeout(Duration::from_secs(10)) .connect_timeout(CONNECT_TIMEOUT)
.build() .build()
.unwrap_or_else(|_| Client::new()), .unwrap_or_else(|_| Client::new()),
_base_url: base_url, _base_url: base_url,
@ -64,7 +67,7 @@ impl WebDavClient {
/// Test connection by issuing a PROPFIND depth 0 on the root. /// Test connection by issuing a PROPFIND depth 0 on the root.
pub async fn test_connection(&self) -> Result<()> { pub async fn test_connection(&self) -> Result<()> {
let resp = self._client let resp = self._client
.request(reqwest::Method::from_bytes(b"PROPFIND").unwrap(), &self._base_url) .request(reqwest::Method::from_bytes(b"PROPFIND").expect("PROPFIND is a valid HTTP method"), &self._base_url)
.basic_auth(self._username.as_str(), Some(self._password.as_str())) .basic_auth(self._username.as_str(), Some(self._password.as_str()))
.header("Depth", "0") .header("Depth", "0")
.header("Content-Type", "application/xml") .header("Content-Type", "application/xml")
@ -86,7 +89,7 @@ impl WebDavClient {
pub async fn list_files(&self, path: &str) -> Result<Vec<RemoteFileInfo>> { pub async fn list_files(&self, path: &str) -> Result<Vec<RemoteFileInfo>> {
let url = self.full_url(path); let url = self.full_url(path);
let resp = self._client let resp = self._client
.request(reqwest::Method::from_bytes(b"PROPFIND").unwrap(), &url) .request(reqwest::Method::from_bytes(b"PROPFIND").expect("PROPFIND is a valid HTTP method"), &url)
.basic_auth(self._username.as_str(), Some(self._password.as_str())) .basic_auth(self._username.as_str(), Some(self._password.as_str()))
.header("Depth", "1") .header("Depth", "1")
.header("Content-Type", "application/xml") .header("Content-Type", "application/xml")
@ -129,7 +132,15 @@ impl WebDavClient {
return Err(Error::WebDav(format!("GET failed with status {}", status))); return Err(Error::WebDav(format!("GET failed with status {}", status)));
} }
Ok(resp.bytes().await?.to_vec()) if resp.content_length().unwrap_or(0) > MAX_FILE_BYTES {
return Err(Error::WebDav(format!("File too large (>{}MB)", MAX_FILE_BYTES / (1024 * 1024))));
}
let bytes = resp.bytes().await?;
if bytes.len() as u64 > MAX_FILE_BYTES {
return Err(Error::WebDav(format!("File too large (>{}MB)", MAX_FILE_BYTES / (1024 * 1024))));
}
Ok(bytes.to_vec())
} }
/// Upload a file. /// Upload a file.
@ -172,7 +183,7 @@ impl WebDavClient {
pub async fn create_dir(&self, path: &str) -> Result<()> { pub async fn create_dir(&self, path: &str) -> Result<()> {
let url = self.full_url(path); let url = self.full_url(path);
let resp = self._client let resp = self._client
.request(reqwest::Method::from_bytes(b"MKCOL").unwrap(), &url) .request(reqwest::Method::from_bytes(b"MKCOL").expect("MKCOL is a valid HTTP method"), &url)
.basic_auth(self._username.as_str(), Some(self._password.as_str())) .basic_auth(self._username.as_str(), Some(self._password.as_str()))
.send() .send()
.await?; .await?;
@ -192,7 +203,7 @@ impl WebDavClient {
let from_url = self.full_url(from); let from_url = self.full_url(from);
let to_url = self.full_url(to); let to_url = self.full_url(to);
let resp = self._client let resp = self._client
.request(reqwest::Method::from_bytes(b"MOVE").unwrap(), &from_url) .request(reqwest::Method::from_bytes(b"MOVE").expect("MOVE is a valid HTTP method"), &from_url)
.basic_auth(self._username.as_str(), Some(self._password.as_str())) .basic_auth(self._username.as_str(), Some(self._password.as_str()))
.header("Destination", &to_url) .header("Destination", &to_url)
.header("Overwrite", "F") .header("Overwrite", "F")