Harden codebase: fix security, quality, and maintainability issues
- Replace dangerous unwrap() with proper error handling (storage.rs, webdav.rs) - Add atomic writes (temp + rename) for config, sync state, and metadata files - Add path traversal validation in sync executor - Add workspace path validation in Tauri commands - Add input size limits for task titles, descriptions, and list names - Add file download size limit (10MB) to WebDAV get_file - Fix move_task rollback to log failures instead of silently ignoring - Fix JSON serialization unwrap in Tauri create_remote_workspace - Fix swallowed errors in sync queue backup, metadata writes, sync state load - Extract hardcoded strings into named constants (filenames, extensions, limits) - Use REQUEST_TIMEOUT/CONNECT_TIMEOUT constants in WebDAV client builder - Fix frontend: clear taskStack when viewed task is deleted or list is switched - Fix frontend: surface credential loading and focus listener errors https://claude.ai/code/session_01F67yfLLmSaBtT7aKKNus1M
This commit is contained in:
parent
b45f39c96c
commit
a12deb5182
|
|
@ -41,6 +41,33 @@ fn lock_state(state: &Mutex<AppState>) -> Result<std::sync::MutexGuard<'_, AppSt
|
|||
state.lock().map_err(|e| format!("State lock poisoned: {}", e))
|
||||
}
|
||||
|
||||
/// Validate that a workspace path is a reasonable directory and not a system path.
|
||||
fn validate_workspace_path(path: &str) -> Result<(), String> {
|
||||
let p = PathBuf::from(path);
|
||||
// Reject obviously dangerous paths
|
||||
let normalized = p.to_string_lossy();
|
||||
if normalized.is_empty() {
|
||||
return Err("Workspace path cannot be empty".into());
|
||||
}
|
||||
// Reject paths that are system root directories
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let forbidden = ["/", "/etc", "/usr", "/bin", "/sbin", "/var", "/proc", "/sys", "/dev"];
|
||||
let canonical = normalized.trim_end_matches('/');
|
||||
if forbidden.contains(&canonical) {
|
||||
return Err(format!("Cannot use system directory as workspace: {}", path));
|
||||
}
|
||||
}
|
||||
#[cfg(windows)]
|
||||
{
|
||||
let upper = normalized.to_uppercase();
|
||||
if upper.len() <= 3 && upper.ends_with(":\\") || upper.ends_with(":") {
|
||||
return Err(format!("Cannot use drive root as workspace: {}", path));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serializable sync result for the frontend.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
struct SyncResult {
|
||||
|
|
@ -120,6 +147,7 @@ fn add_workspace(
|
|||
path: String,
|
||||
state: State<'_, Mutex<AppState>>,
|
||||
) -> Result<(), String> {
|
||||
validate_workspace_path(&path)?;
|
||||
let mut s = lock_state(&state)?;
|
||||
let ws = WorkspaceConfig::new(name, PathBuf::from(&path));
|
||||
let id = s.config.add_workspace(ws);
|
||||
|
|
@ -243,6 +271,7 @@ async fn rename_workspace(
|
|||
|
||||
#[tauri::command]
|
||||
fn init_workspace(path: String) -> Result<(), String> {
|
||||
validate_workspace_path(&path)?;
|
||||
TaskRepository::init(PathBuf::from(path))
|
||||
.map(|_| ())
|
||||
.map_err(|e| e.to_string())
|
||||
|
|
@ -625,7 +654,7 @@ async fn create_remote_workspace(
|
|||
} else {
|
||||
format!("{}/{}", path.trim_end_matches('/'), ".onyx-workspace.json")
|
||||
};
|
||||
client.put_file(&file_path, serde_json::to_string_pretty(&metadata).unwrap().into_bytes())
|
||||
client.put_file(&file_path, serde_json::to_string_pretty(&metadata).map_err(|e| e.to_string())?.into_bytes())
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
Ok(())
|
||||
|
|
|
|||
|
|
@ -26,7 +26,9 @@
|
|||
invoke<[string, string]>("load_credentials", { domain }).then(([u, p]) => {
|
||||
webdavUser = u;
|
||||
webdavPass = p;
|
||||
}).catch(() => {});
|
||||
}).catch((e) => {
|
||||
console.warn("Failed to load credentials:", e);
|
||||
});
|
||||
} catch {}
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,13 @@
|
|||
let parentTask = $derived(taskStack.length >= 1 ? app.tasks.find(t => t.id === taskStack[0]) ?? null : null);
|
||||
let subtaskDetail = $derived(taskStack.length >= 2 ? app.tasks.find(t => t.id === taskStack[1]) ?? null : null);
|
||||
|
||||
// Clear taskStack when the viewed task no longer exists (e.g. deleted or list switched)
|
||||
$effect(() => {
|
||||
if (taskStack.length > 0 && !parentTask) {
|
||||
taskStack = [];
|
||||
}
|
||||
});
|
||||
|
||||
function openTask(task: Task) {
|
||||
taskStack = [task.id];
|
||||
}
|
||||
|
|
@ -282,7 +289,7 @@
|
|||
<div class="flex-1 overflow-y-auto py-2">
|
||||
{#each app.lists as list (list.id)}
|
||||
<button
|
||||
onclick={() => { app.selectList(list.id); closeDrawer(); }}
|
||||
onclick={() => { app.selectList(list.id); taskStack = []; closeDrawer(); }}
|
||||
class="group flex w-full items-center gap-2 px-5 py-2.5 text-left text-sm hover:bg-black/5 dark:hover:bg-white/10 {list.id === app.activeListId ? 'font-bold' : ''}"
|
||||
>
|
||||
{#if list.id === app.activeListId}
|
||||
|
|
|
|||
|
|
@ -369,7 +369,9 @@ function startAutoSync() {
|
|||
_syncInterval = setInterval(triggerSync, syncIntervalSecs * 1000);
|
||||
getCurrentWindow().onFocusChanged(({ payload: focused }) => {
|
||||
if (focused && Date.now() - lastSyncTime > SYNC_FOCUS_THRESHOLD_MS) triggerSync();
|
||||
}).then((unlisten) => { _focusUnlisten = unlisten; });
|
||||
}).then((unlisten) => { _focusUnlisten = unlisten; }).catch((e) => {
|
||||
console.warn("Failed to set up focus listener:", e);
|
||||
});
|
||||
}
|
||||
|
||||
function stopAutoSync() {
|
||||
|
|
|
|||
|
|
@ -115,7 +115,10 @@ impl AppConfig {
|
|||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
let content = serde_json::to_string_pretty(&self)?;
|
||||
std::fs::write(path, content)?;
|
||||
// Atomic write: write to temp file then rename to prevent corruption on crash
|
||||
let temp = path.with_extension("tmp");
|
||||
std::fs::write(&temp, &content)?;
|
||||
std::fs::rename(&temp, path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -78,7 +78,9 @@ impl TaskRepository {
|
|||
self.storage.write_task(to_list_id, &task)?;
|
||||
// If delete from source fails, roll back by removing the copy from destination
|
||||
if let Err(e) = self.storage.delete_task(from_list_id, task_id) {
|
||||
let _ = self.storage.delete_task(to_list_id, task_id);
|
||||
if let Err(rollback_err) = self.storage.delete_task(to_list_id, task_id) {
|
||||
eprintln!("Warning: move_task rollback failed: {}", rollback_err);
|
||||
}
|
||||
return Err(e);
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
|||
|
|
@ -7,6 +7,30 @@ use uuid::Uuid;
|
|||
use crate::error::{Error, Result};
|
||||
use crate::models::{Task, TaskList, TaskStatus};
|
||||
|
||||
/// Maximum allowed length for task titles.
|
||||
const MAX_TITLE_LENGTH: usize = 500;
|
||||
/// Maximum allowed length for task descriptions.
|
||||
const MAX_DESCRIPTION_LENGTH: usize = 1_000_000; // 1 MB
|
||||
/// Maximum allowed length for list names.
|
||||
const MAX_LIST_NAME_LENGTH: usize = 255;
|
||||
/// Workspace root metadata filename.
|
||||
const WORKSPACE_METADATA_FILE: &str = ".onyx-workspace.json";
|
||||
/// Per-list metadata filename.
|
||||
const LIST_METADATA_FILE: &str = ".listdata.json";
|
||||
/// Task file extension.
|
||||
const TASK_FILE_EXT: &str = "md";
|
||||
/// Default version for tasks without a version field (legacy files).
|
||||
const DEFAULT_TASK_VERSION: u64 = 1;
|
||||
|
||||
/// Write data to a temporary file then atomically rename to the target path.
|
||||
/// Prevents corruption from partial writes on crash.
|
||||
fn atomic_write(path: &Path, content: &[u8]) -> std::io::Result<()> {
|
||||
let temp = path.with_extension("tmp");
|
||||
fs::write(&temp, content)?;
|
||||
fs::rename(&temp, path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Metadata stored in root .onyx-workspace.json
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RootMetadata {
|
||||
|
|
@ -50,7 +74,7 @@ impl ListMetadata {
|
|||
}
|
||||
|
||||
fn is_false(v: &bool) -> bool { !v }
|
||||
fn default_version() -> u64 { 1 }
|
||||
fn default_version() -> u64 { DEFAULT_TASK_VERSION }
|
||||
|
||||
/// Frontmatter for task markdown files
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
|
|
@ -126,7 +150,7 @@ impl FileSystemStorage {
|
|||
}
|
||||
|
||||
fn metadata_path(&self) -> PathBuf {
|
||||
self.root_path.join(".onyx-workspace.json")
|
||||
self.root_path.join(WORKSPACE_METADATA_FILE)
|
||||
}
|
||||
|
||||
fn list_dir_path(&self, list_id: Uuid) -> Result<PathBuf> {
|
||||
|
|
@ -137,7 +161,7 @@ impl FileSystemStorage {
|
|||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
let listdata_path = path.join(".listdata.json");
|
||||
let listdata_path = path.join(LIST_METADATA_FILE);
|
||||
if listdata_path.exists() {
|
||||
let content = fs::read_to_string(&listdata_path)?;
|
||||
let list_metadata: ListMetadata = serde_json::from_str(&content)?;
|
||||
|
|
@ -191,7 +215,7 @@ impl FileSystemStorage {
|
|||
} else {
|
||||
safe_title
|
||||
};
|
||||
list_dir.join(format!("{}.md", filename))
|
||||
list_dir.join(format!("{}.{}", filename, TASK_FILE_EXT))
|
||||
}
|
||||
|
||||
fn parse_markdown_with_frontmatter(&self, content: &str) -> Result<(TaskFrontmatter, String)> {
|
||||
|
|
@ -247,7 +271,7 @@ impl FileSystemStorage {
|
|||
fn write_root_metadata_internal(&self, metadata: &RootMetadata) -> Result<()> {
|
||||
let path = self.metadata_path();
|
||||
let content = serde_json::to_string_pretty(&metadata)?;
|
||||
fs::write(&path, content)?;
|
||||
atomic_write(&path, content.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -263,7 +287,7 @@ impl Storage for FileSystemStorage {
|
|||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some("md") {
|
||||
if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some(TASK_FILE_EXT) {
|
||||
let content = fs::read_to_string(&path)?;
|
||||
let (frontmatter, description) = self.parse_markdown_with_frontmatter(&content)?;
|
||||
|
||||
|
|
@ -291,6 +315,13 @@ impl Storage for FileSystemStorage {
|
|||
}
|
||||
|
||||
fn write_task(&mut self, list_id: Uuid, task: &Task) -> Result<()> {
|
||||
if task.title.len() > MAX_TITLE_LENGTH {
|
||||
return Err(Error::InvalidData(format!("Task title too long ({} chars, max {})", task.title.len(), MAX_TITLE_LENGTH)));
|
||||
}
|
||||
if task.description.len() > MAX_DESCRIPTION_LENGTH {
|
||||
return Err(Error::InvalidData(format!("Task description too long ({} bytes, max {})", task.description.len(), MAX_DESCRIPTION_LENGTH)));
|
||||
}
|
||||
|
||||
let list_dir = self.list_dir_path(list_id)?;
|
||||
let task_path = self.task_file_path(&list_dir, task);
|
||||
|
||||
|
|
@ -299,7 +330,7 @@ impl Storage for FileSystemStorage {
|
|||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
if path == task_path { continue; }
|
||||
if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some("md") {
|
||||
if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some(TASK_FILE_EXT) {
|
||||
if let Ok(content) = fs::read_to_string(&path) {
|
||||
if let Ok((fm, _)) = self.parse_markdown_with_frontmatter(&content) {
|
||||
if fm.id == task.id {
|
||||
|
|
@ -352,7 +383,7 @@ impl Storage for FileSystemStorage {
|
|||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some("md") {
|
||||
if path.is_file() && path.extension().and_then(|s| s.to_str()) == Some(TASK_FILE_EXT) {
|
||||
let content = fs::read_to_string(&path)?;
|
||||
let (frontmatter, description) = self.parse_markdown_with_frontmatter(&content)?;
|
||||
|
||||
|
|
@ -387,10 +418,13 @@ impl Storage for FileSystemStorage {
|
|||
if entries.len() > 1 {
|
||||
entries.sort_by(|a, b| b.1.version.cmp(&a.1.version));
|
||||
for (stale_path, _) in entries.drain(1..) {
|
||||
let _ = fs::remove_file(&stale_path);
|
||||
if let Err(e) = fs::remove_file(&stale_path) {
|
||||
eprintln!("Warning: failed to remove stale duplicate task file {:?}: {}", stale_path, e);
|
||||
}
|
||||
}
|
||||
let (_, task) = entries.into_iter().next().unwrap();
|
||||
}
|
||||
let (_, task) = entries.into_iter().next()
|
||||
.ok_or_else(|| Error::InvalidData("Empty dedup entries for task".to_string()))?;
|
||||
tasks.push(task);
|
||||
}
|
||||
|
||||
|
|
@ -407,6 +441,12 @@ impl Storage for FileSystemStorage {
|
|||
}
|
||||
|
||||
fn create_list(&mut self, name: String) -> Result<TaskList> {
|
||||
if name.trim().is_empty() {
|
||||
return Err(Error::InvalidData("List name cannot be empty".to_string()));
|
||||
}
|
||||
if name.len() > MAX_LIST_NAME_LENGTH {
|
||||
return Err(Error::InvalidData(format!("List name too long ({} chars, max {})", name.len(), MAX_LIST_NAME_LENGTH)));
|
||||
}
|
||||
let list_dir = self.list_dir_path_by_name(&name)?;
|
||||
|
||||
if list_dir.exists() {
|
||||
|
|
@ -420,7 +460,7 @@ impl Storage for FileSystemStorage {
|
|||
|
||||
let metadata_path = list_dir.join(".listdata.json");
|
||||
let content = serde_json::to_string_pretty(&list_metadata)?;
|
||||
fs::write(&metadata_path, content)?;
|
||||
atomic_write(&metadata_path, content.as_bytes())?;
|
||||
|
||||
// Add to root metadata
|
||||
let mut root_metadata = self.read_root_metadata_internal()?;
|
||||
|
|
@ -453,7 +493,7 @@ impl Storage for FileSystemStorage {
|
|||
let path = entry.path();
|
||||
|
||||
if path.is_dir() {
|
||||
let listdata_path = path.join(".listdata.json");
|
||||
let listdata_path = path.join(LIST_METADATA_FILE);
|
||||
if listdata_path.exists() {
|
||||
let content = fs::read_to_string(&listdata_path)?;
|
||||
let list_metadata: ListMetadata = serde_json::from_str(&content)?;
|
||||
|
|
@ -508,6 +548,12 @@ impl Storage for FileSystemStorage {
|
|||
}
|
||||
|
||||
fn rename_list(&mut self, list_id: Uuid, new_name: String) -> Result<()> {
|
||||
if new_name.trim().is_empty() {
|
||||
return Err(Error::InvalidData("List name cannot be empty".to_string()));
|
||||
}
|
||||
if new_name.len() > MAX_LIST_NAME_LENGTH {
|
||||
return Err(Error::InvalidData(format!("List name too long ({} chars, max {})", new_name.len(), MAX_LIST_NAME_LENGTH)));
|
||||
}
|
||||
let old_dir = self.list_dir_path(list_id)?;
|
||||
let new_dir = self.list_dir_path_by_name(&new_name)?;
|
||||
|
||||
|
|
@ -523,7 +569,7 @@ impl Storage for FileSystemStorage {
|
|||
let mut metadata: ListMetadata = serde_json::from_str(&content)?;
|
||||
metadata.updated_at = Utc::now();
|
||||
let json = serde_json::to_string_pretty(&metadata)?;
|
||||
fs::write(&metadata_path, json)?;
|
||||
atomic_write(&metadata_path, json.as_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -554,7 +600,7 @@ impl Storage for FileSystemStorage {
|
|||
let metadata_path = list_dir.join(".listdata.json");
|
||||
|
||||
let content = serde_json::to_string_pretty(&metadata)?;
|
||||
fs::write(&metadata_path, content)?;
|
||||
atomic_write(&metadata_path, content.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -265,7 +265,9 @@ impl OfflineQueue {
|
|||
Err(e) => {
|
||||
eprintln!("Warning: corrupt sync queue, backing up and resetting: {}", e);
|
||||
let backup = workspace_path.join(".syncqueue.json.bak");
|
||||
let _ = std::fs::copy(&queue_path, &backup);
|
||||
if let Err(backup_err) = std::fs::copy(&queue_path, &backup) {
|
||||
eprintln!("Warning: failed to backup corrupt sync queue: {}", backup_err);
|
||||
}
|
||||
Self::default()
|
||||
}
|
||||
},
|
||||
|
|
@ -281,12 +283,17 @@ impl OfflineQueue {
|
|||
if self.operations.is_empty() {
|
||||
// Clean up empty queue file
|
||||
if queue_path.exists() {
|
||||
let _ = std::fs::remove_file(&queue_path);
|
||||
if let Err(e) = std::fs::remove_file(&queue_path) {
|
||||
eprintln!("Warning: failed to remove empty sync queue file: {}", e);
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
let content = serde_json::to_string_pretty(self)?;
|
||||
std::fs::write(&queue_path, content)?;
|
||||
// Atomic write: write to temp then rename
|
||||
let temp_path = workspace_path.join(".syncqueue.json.tmp");
|
||||
std::fs::write(&temp_path, &content)?;
|
||||
std::fs::rename(&temp_path, &queue_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -349,16 +356,21 @@ pub fn compute_checksum(data: &[u8]) -> String {
|
|||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
/// Workspace root metadata filename.
|
||||
const WORKSPACE_METADATA_FILE: &str = ".onyx-workspace.json";
|
||||
/// Per-list metadata filename.
|
||||
const LIST_METADATA_FILE: &str = ".listdata.json";
|
||||
|
||||
/// Check if a file is syncable: *.md files and metadata files at expected depths.
|
||||
fn is_syncable(path: &str) -> bool {
|
||||
let parts: Vec<&str> = path.split('/').collect();
|
||||
let filename = parts.last().copied().unwrap_or(path);
|
||||
// .onyx-workspace.json only at workspace root (depth 1)
|
||||
if filename == ".onyx-workspace.json" {
|
||||
if filename == WORKSPACE_METADATA_FILE {
|
||||
return parts.len() == 1;
|
||||
}
|
||||
// .listdata.json only inside a list directory (depth 2)
|
||||
if filename == ".listdata.json" {
|
||||
if filename == LIST_METADATA_FILE {
|
||||
return parts.len() == 2;
|
||||
}
|
||||
// .md files inside a list directory (depth 2)
|
||||
|
|
@ -451,15 +463,27 @@ impl SyncState {
|
|||
return Self::default();
|
||||
}
|
||||
match std::fs::read_to_string(&state_path) {
|
||||
Ok(content) => serde_json::from_str(&content).unwrap_or_default(),
|
||||
Err(_) => Self::default(),
|
||||
Ok(content) => match serde_json::from_str(&content) {
|
||||
Ok(state) => state,
|
||||
Err(e) => {
|
||||
eprintln!("Warning: corrupt sync state file, resetting: {}", e);
|
||||
Self::default()
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
eprintln!("Warning: failed to read sync state file: {}", e);
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save(&self, workspace_path: &Path) -> Result<()> {
|
||||
let state_path = workspace_path.join(".syncstate.json");
|
||||
let content = serde_json::to_string_pretty(self)?;
|
||||
std::fs::write(&state_path, content)?;
|
||||
// Atomic write: write to temp file then rename to prevent corruption on crash
|
||||
let temp_path = workspace_path.join(".syncstate.json.tmp");
|
||||
std::fs::write(&temp_path, &content)?;
|
||||
std::fs::rename(&temp_path, &state_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -589,6 +613,16 @@ async fn sync_workspace_inner(
|
|||
Ok(result)
|
||||
}
|
||||
|
||||
/// Validate that a sync path doesn't escape the workspace via path traversal.
|
||||
fn validate_sync_path(path: &str) -> Result<()> {
|
||||
for component in path.split('/') {
|
||||
if component == ".." || component.contains('\\') {
|
||||
return Err(Error::Sync(format!("Path traversal not allowed: {}", path)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Execute a single sync action.
|
||||
async fn execute_action(
|
||||
client: &WebDavClient,
|
||||
|
|
@ -598,6 +632,9 @@ async fn execute_action(
|
|||
remote_meta: &HashMap<&str, &RemoteFileSnapshot>,
|
||||
report: &(dyn Fn(&str) + Send + Sync),
|
||||
) -> Result<()> {
|
||||
// Validate path before any file system operation
|
||||
validate_sync_path(action.path())?;
|
||||
|
||||
match action {
|
||||
SyncAction::Upload { path } => {
|
||||
let local_path = workspace_path.join(path.replace('/', std::path::MAIN_SEPARATOR_STR));
|
||||
|
|
@ -681,7 +718,9 @@ async fn execute_action(
|
|||
.unwrap_or(metadata.task_order.len());
|
||||
metadata.task_order.insert(insert_pos, new_id);
|
||||
if let Ok(json) = serde_json::to_string_pretty(&metadata) {
|
||||
let _ = std::fs::write(&listdata_path, json);
|
||||
if let Err(e) = std::fs::write(&listdata_path, json) {
|
||||
eprintln!("Warning: failed to update listdata after conflict recovery: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,9 @@ use crate::error::{Error, Result};
|
|||
|
||||
/// Hard timeout for any WebDAV network operation.
|
||||
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
const CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
/// Maximum size for file downloads (10 MB).
|
||||
const MAX_FILE_BYTES: u64 = 10 * 1024 * 1024;
|
||||
|
||||
/// Information about a file on the remote WebDAV server.
|
||||
#[derive(Debug, Clone)]
|
||||
|
|
@ -36,8 +39,8 @@ impl WebDavClient {
|
|||
let base_url = base_url.trim_end_matches('/').to_string();
|
||||
Self {
|
||||
_client: Client::builder()
|
||||
.timeout(Duration::from_secs(30))
|
||||
.connect_timeout(Duration::from_secs(10))
|
||||
.timeout(REQUEST_TIMEOUT)
|
||||
.connect_timeout(CONNECT_TIMEOUT)
|
||||
.build()
|
||||
.unwrap_or_else(|_| Client::new()),
|
||||
_base_url: base_url,
|
||||
|
|
@ -64,7 +67,7 @@ impl WebDavClient {
|
|||
/// Test connection by issuing a PROPFIND depth 0 on the root.
|
||||
pub async fn test_connection(&self) -> Result<()> {
|
||||
let resp = self._client
|
||||
.request(reqwest::Method::from_bytes(b"PROPFIND").unwrap(), &self._base_url)
|
||||
.request(reqwest::Method::from_bytes(b"PROPFIND").expect("PROPFIND is a valid HTTP method"), &self._base_url)
|
||||
.basic_auth(self._username.as_str(), Some(self._password.as_str()))
|
||||
.header("Depth", "0")
|
||||
.header("Content-Type", "application/xml")
|
||||
|
|
@ -86,7 +89,7 @@ impl WebDavClient {
|
|||
pub async fn list_files(&self, path: &str) -> Result<Vec<RemoteFileInfo>> {
|
||||
let url = self.full_url(path);
|
||||
let resp = self._client
|
||||
.request(reqwest::Method::from_bytes(b"PROPFIND").unwrap(), &url)
|
||||
.request(reqwest::Method::from_bytes(b"PROPFIND").expect("PROPFIND is a valid HTTP method"), &url)
|
||||
.basic_auth(self._username.as_str(), Some(self._password.as_str()))
|
||||
.header("Depth", "1")
|
||||
.header("Content-Type", "application/xml")
|
||||
|
|
@ -129,7 +132,15 @@ impl WebDavClient {
|
|||
return Err(Error::WebDav(format!("GET failed with status {}", status)));
|
||||
}
|
||||
|
||||
Ok(resp.bytes().await?.to_vec())
|
||||
if resp.content_length().unwrap_or(0) > MAX_FILE_BYTES {
|
||||
return Err(Error::WebDav(format!("File too large (>{}MB)", MAX_FILE_BYTES / (1024 * 1024))));
|
||||
}
|
||||
let bytes = resp.bytes().await?;
|
||||
if bytes.len() as u64 > MAX_FILE_BYTES {
|
||||
return Err(Error::WebDav(format!("File too large (>{}MB)", MAX_FILE_BYTES / (1024 * 1024))));
|
||||
}
|
||||
|
||||
Ok(bytes.to_vec())
|
||||
}
|
||||
|
||||
/// Upload a file.
|
||||
|
|
@ -172,7 +183,7 @@ impl WebDavClient {
|
|||
pub async fn create_dir(&self, path: &str) -> Result<()> {
|
||||
let url = self.full_url(path);
|
||||
let resp = self._client
|
||||
.request(reqwest::Method::from_bytes(b"MKCOL").unwrap(), &url)
|
||||
.request(reqwest::Method::from_bytes(b"MKCOL").expect("MKCOL is a valid HTTP method"), &url)
|
||||
.basic_auth(self._username.as_str(), Some(self._password.as_str()))
|
||||
.send()
|
||||
.await?;
|
||||
|
|
@ -192,7 +203,7 @@ impl WebDavClient {
|
|||
let from_url = self.full_url(from);
|
||||
let to_url = self.full_url(to);
|
||||
let resp = self._client
|
||||
.request(reqwest::Method::from_bytes(b"MOVE").unwrap(), &from_url)
|
||||
.request(reqwest::Method::from_bytes(b"MOVE").expect("MOVE is a valid HTTP method"), &from_url)
|
||||
.basic_auth(self._username.as_str(), Some(self._password.as_str()))
|
||||
.header("Destination", &to_url)
|
||||
.header("Overwrite", "F")
|
||||
|
|
|
|||
Loading…
Reference in a new issue