From 76dc7cf1fa4912132f47e8b70b0699342fe9210b Mon Sep 17 00:00:00 2001 From: Chip Senkbeil Date: Wed, 31 May 2023 02:38:16 -0500 Subject: [PATCH] Refactor into protocol crate & change capabilities -> version (#189) --- CHANGELOG.md | 12 + Cargo.lock | 57 +- Cargo.toml | 4 +- README.md | 6 +- distant-core/Cargo.toml | 9 +- distant-core/README.md | 11 +- distant-core/src/api.rs | 14 +- distant-core/src/api/local.rs | 95 +- distant-core/src/api/local/state/watcher.rs | 20 +- distant-core/src/client/ext.rs | 30 +- distant-core/src/client/watcher.rs | 18 +- distant-core/src/lib.rs | 7 +- distant-core/src/protocol.rs | 572 ---- distant-core/src/protocol/capabilities.rs | 207 -- distant-core/src/protocol/change.rs | 516 --- distant-core/src/protocol/cmd.rs | 53 - distant-core/src/protocol/filesystem.rs | 59 - distant-core/src/protocol/metadata.rs | 404 --- distant-core/src/protocol/permissions.rs | 294 -- distant-core/src/protocol/pty.rs | 140 - distant-core/src/protocol/search.rs | 425 --- distant-core/src/protocol/system.rs | 59 - distant-core/tests/stress/distant/watch.rs | 4 +- distant-net/Cargo.toml | 3 - distant-net/README.md | 11 +- distant-net/src/common/map.rs | 8 - distant-net/src/common/packet/request.rs | 8 - distant-net/src/common/packet/response.rs | 8 - distant-net/src/manager/data/capabilities.rs | 23 - distant-net/src/manager/data/request.rs | 4 - distant-protocol/Cargo.toml | 29 + distant-protocol/src/common.rs | 29 + distant-protocol/src/common/capabilities.rs | 380 +++ distant-protocol/src/common/change.rs | 380 +++ distant-protocol/src/common/cmd.rs | 89 + .../src/common}/error.rs | 206 +- distant-protocol/src/common/filesystem.rs | 173 + distant-protocol/src/common/metadata.rs | 1044 ++++++ distant-protocol/src/common/permissions.rs | 658 ++++ distant-protocol/src/common/pty.rs | 241 ++ distant-protocol/src/common/search.rs | 1829 ++++++++++ distant-protocol/src/common/system.rs | 142 + distant-protocol/src/common/version.rs | 130 + distant-protocol/src/lib.rs | 17 + distant-protocol/src/msg.rs | 192 ++ distant-protocol/src/request.rs | 2981 +++++++++++++++++ distant-protocol/src/response.rs | 1973 +++++++++++ .../src}/utils.rs | 22 +- distant-ssh2/src/api.rs | 38 +- distant-ssh2/src/process.rs | 4 +- src/cli/commands/client.rs | 181 +- src/cli/commands/common/format.rs | 24 +- src/cli/commands/generate.rs | 31 - src/options.rs | 77 +- tests/cli/api/mod.rs | 2 +- tests/cli/api/search.rs | 10 +- tests/cli/api/{capabilities.rs => version.rs} | 14 +- tests/cli/client/capabilities.rs | 68 - tests/cli/client/mod.rs | 2 +- tests/cli/client/version.rs | 34 + tests/cli/utils.rs | 7 +- tests/cli/utils/predicates.rs | 50 + 62 files changed, 10904 insertions(+), 3234 deletions(-) delete mode 100644 distant-core/src/protocol.rs delete mode 100644 distant-core/src/protocol/capabilities.rs delete mode 100644 distant-core/src/protocol/change.rs delete mode 100644 distant-core/src/protocol/cmd.rs delete mode 100644 distant-core/src/protocol/filesystem.rs delete mode 100644 distant-core/src/protocol/metadata.rs delete mode 100644 distant-core/src/protocol/permissions.rs delete mode 100644 distant-core/src/protocol/pty.rs delete mode 100644 distant-core/src/protocol/search.rs delete mode 100644 distant-core/src/protocol/system.rs create mode 100644 distant-protocol/Cargo.toml create mode 100644 distant-protocol/src/common.rs create mode 100644 distant-protocol/src/common/capabilities.rs create mode 100644 distant-protocol/src/common/change.rs create mode 100644 distant-protocol/src/common/cmd.rs rename {distant-core/src/protocol => distant-protocol/src/common}/error.rs (59%) create mode 100644 distant-protocol/src/common/filesystem.rs create mode 100644 distant-protocol/src/common/metadata.rs create mode 100644 distant-protocol/src/common/permissions.rs create mode 100644 distant-protocol/src/common/pty.rs create mode 100644 distant-protocol/src/common/search.rs create mode 100644 distant-protocol/src/common/system.rs create mode 100644 distant-protocol/src/common/version.rs create mode 100644 distant-protocol/src/lib.rs create mode 100644 distant-protocol/src/msg.rs create mode 100644 distant-protocol/src/request.rs create mode 100644 distant-protocol/src/response.rs rename {distant-core/src/protocol => distant-protocol/src}/utils.rs (52%) rename tests/cli/api/{capabilities.rs => version.rs} (65%) delete mode 100644 tests/cli/client/capabilities.rs create mode 100644 tests/cli/client/version.rs create mode 100644 tests/cli/utils/predicates.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fe1ab2..afedef0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - New `set_permissions` method available `DistantApi` and implemented by local server (ssh unavailable due to https://github.com/wez/wezterm/issues/3784) - Implementation of `DistantChannelExt::set_permissions` +- `distant version` to display information about connected server ### Changed @@ -22,6 +23,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 `distant_net::common::Keychain` - Moved `distant_net::common::transport::framed::codec::encryption::SecretKey` and similar to `distant_net::common::SecretKey` +- Search matches reported with `match` key are now inlined as either a byte + array or a string and no longer an object with a `type` and `value` field +- Unset options and values are not now returned in `JSON` serialization versus + the explicit `null` value provided +- `Capabilities` message type has been changed to `Version` with new struct to + report the version information that includes a server version string, + protocol version tuple, and capabilities + +### Removed + +- `distant capabilities` has been removed in favor of `distant version` ## [0.20.0-alpha.6] diff --git a/Cargo.lock b/Cargo.lock index 919ddb1..a23b8a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -872,6 +872,7 @@ dependencies = [ "bytes", "derive_more", "distant-net", + "distant-protocol", "env_logger", "futures", "grep", @@ -887,7 +888,6 @@ dependencies = [ "rand", "regex", "rstest", - "schemars", "serde", "serde_bytes", "serde_json", @@ -920,7 +920,6 @@ dependencies = [ "paste", "rand", "rmp-serde", - "schemars", "serde", "serde_bytes", "serde_json", @@ -931,6 +930,21 @@ dependencies = [ "tokio", ] +[[package]] +name = "distant-protocol" +version = "0.20.0-alpha.7" +dependencies = [ + "bitflags 2.3.1", + "derive_more", + "regex", + "rmp", + "rmp-serde", + "serde", + "serde_bytes", + "serde_json", + "strum", +] + [[package]] name = "distant-ssh2" version = "0.20.0-alpha.7" @@ -1934,9 +1948,9 @@ checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" [[package]] name = "notify" -version = "5.2.0" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "729f63e1ca555a43fe3efa4f3efdf4801c479da85b432242a7b726f353c88486" +checksum = "4d9ba6c734de18ca27c8cef5cd7058aa4ac9f63596131e4c7e41e579319032a2" dependencies = [ "bitflags 1.3.2", "crossbeam-channel", @@ -2693,30 +2707,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "schemars" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c613288622e5f0c3fdc5dbd4db1c5fbe752746b1d1a56a0630b78fd00de44f" -dependencies = [ - "dyn-clone", - "schemars_derive", - "serde", - "serde_json", -] - -[[package]] -name = "schemars_derive" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109da1e6b197438deb6db99952990c7f959572794b80ff93707d55a232545e7c" -dependencies = [ - "proc-macro2", - "quote", - "serde_derive_internals", - "syn 1.0.109", -] - [[package]] name = "scopeguard" version = "1.1.0" @@ -2790,17 +2780,6 @@ dependencies = [ "syn 2.0.16", ] -[[package]] -name = "serde_derive_internals" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "serde_json" version = "1.0.96" diff --git a/Cargo.toml b/Cargo.toml index 8d30b04..45ea843 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ readme = "README.md" license = "MIT OR Apache-2.0" [workspace] -members = ["distant-auth", "distant-core", "distant-net", "distant-ssh2"] +members = ["distant-auth", "distant-core", "distant-net", "distant-protocol", "distant-ssh2"] [profile.release] opt-level = 'z' @@ -32,7 +32,7 @@ clap_complete = "4.2.0" config = { version = "0.13.3", default-features = false, features = ["toml"] } derive_more = { version = "0.99.17", default-features = false, features = ["display", "from", "error", "is_variant"] } dialoguer = { version = "0.10.3", default-features = false } -distant-core = { version = "=0.20.0-alpha.7", path = "distant-core", features = ["schemars"] } +distant-core = { version = "=0.20.0-alpha.7", path = "distant-core" } directories = "5.0.0" file-mode = "0.1.2" flexi_logger = "0.25.3" diff --git a/README.md b/README.md index 826e61e..853322b 100644 --- a/README.md +++ b/README.md @@ -64,13 +64,13 @@ the available features and which backend supports each feature: | Feature | distant | ssh | | --------------------- | --------| ----| -| Capabilities | ✅ | ✅ | | Filesystem I/O | ✅ | ✅ | | Filesystem Watching | ✅ | ✅ | | Process Execution | ✅ | ✅ | | Reconnect | ✅ | ❌ | | Search | ✅ | ❌ | -| System Information | ✅ | ⚠ | +| System Information | ✅ | ⚠ | +| Version | ✅ | ✅ | * ✅ means full support * ⚠ means partial support @@ -78,7 +78,6 @@ the available features and which backend supports each feature: ### Feature Details -* `Capabilities` - able to report back what it is capable of performing * `Filesystem I/O` - able to read from and write to the filesystem * `Filesystem Watching` - able to receive notifications when changes to the filesystem occur @@ -86,6 +85,7 @@ the available features and which backend supports each feature: * `Reconnect` - able to reconnect after network outages * `Search` - able to search the filesystem * `System Information` - able to retrieve information about the system +* `Version` - able to report back version information ## Example diff --git a/distant-core/Cargo.toml b/distant-core/Cargo.toml index a62fe58..bc9725a 100644 --- a/distant-core/Cargo.toml +++ b/distant-core/Cargo.toml @@ -11,21 +11,19 @@ repository = "https://github.com/chipsenkbeil/distant" readme = "README.md" license = "MIT OR Apache-2.0" -[features] -schemars = ["dep:schemars", "distant-net/schemars"] - [dependencies] async-trait = "0.1.68" bitflags = "2.0.2" bytes = "1.4.0" derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] } distant-net = { version = "=0.20.0-alpha.7", path = "../distant-net" } +distant-protocol = { version = "=0.20.0-alpha.7", path = "../distant-protocol" } futures = "0.3.28" grep = "0.2.11" hex = "0.4.3" ignore = "0.4.20" log = "0.4.17" -notify = { version = "5.1.0", features = ["serde"] } +notify = { version = "6.0.0", features = ["serde"] } num_cpus = "1.15.0" once_cell = "1.17.1" portable-pty = "0.8.1" @@ -42,9 +40,6 @@ walkdir = "2.3.3" whoami = "1.4.0" winsplit = "0.1.0" -# Optional dependencies based on features -schemars = { version = "0.8.12", optional = true } - [dev-dependencies] assert_fs = "1.0.12" env_logger = "0.10.0" diff --git a/distant-core/README.md b/distant-core/README.md index 16e8d23..a679ca1 100644 --- a/distant-core/README.md +++ b/distant-core/README.md @@ -28,18 +28,9 @@ You can import the dependency by adding the following to your `Cargo.toml`: ```toml [dependencies] -distant-core = "0.19" +distant-core = "0.20" ``` -## Features - -Currently, the library supports the following features: - -- `schemars`: derives the `schemars::JsonSchema` interface on - `DistantMsg`, `DistantRequestData`, and `DistantResponseData` data types - -By default, no features are enabled on the library. - ## Examples Below is an example of connecting to a distant server over TCP without any diff --git a/distant-core/src/api.rs b/distant-core/src/api.rs index 3c9e25b..39c1faf 100644 --- a/distant-core/src/api.rs +++ b/distant-core/src/api.rs @@ -8,8 +8,8 @@ use distant_net::server::{ConnectionCtx, Reply, ServerCtx, ServerHandler}; use log::*; use crate::protocol::{ - self, Capabilities, ChangeKind, DirEntry, Environment, Error, Metadata, Permissions, ProcessId, - PtySize, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, + self, ChangeKind, DirEntry, Environment, Error, Metadata, Permissions, ProcessId, PtySize, + SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, Version, }; mod local; @@ -76,8 +76,8 @@ pub trait DistantApi { /// /// *Override this, otherwise it will return "unsupported" as an error.* #[allow(unused_variables)] - async fn capabilities(&self, ctx: DistantCtx) -> io::Result { - unsupported("capabilities") + async fn version(&self, ctx: DistantCtx) -> io::Result { + unsupported("version") } /// Reads bytes from a file. @@ -536,11 +536,11 @@ where D: Send + Sync, { match request { - protocol::Request::Capabilities {} => server + protocol::Request::Version {} => server .api - .capabilities(ctx) + .version(ctx) .await - .map(|supported| protocol::Response::Capabilities { supported }) + .map(protocol::Response::Version) .unwrap_or_else(protocol::Response::from), protocol::Request::FileRead { path } => server .api diff --git a/distant-core/src/api/local.rs b/distant-core/src/api/local.rs index 3eab60e..0a69575 100644 --- a/distant-core/src/api/local.rs +++ b/distant-core/src/api/local.rs @@ -1,5 +1,6 @@ -use std::io; use std::path::{Path, PathBuf}; +use std::time::SystemTime; +use std::{env, io}; use async_trait::async_trait; use ignore::{DirEntry as WalkDirEntry, WalkBuilder}; @@ -10,6 +11,7 @@ use walkdir::WalkDir; use crate::protocol::{ Capabilities, ChangeKind, ChangeKindSet, DirEntry, Environment, FileType, Metadata, Permissions, ProcessId, PtySize, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, + Version, PROTOCOL_VERSION, }; use crate::{DistantApi, DistantCtx}; @@ -39,12 +41,6 @@ impl LocalDistantApi { impl DistantApi for LocalDistantApi { type LocalData = (); - async fn capabilities(&self, ctx: DistantCtx) -> io::Result { - debug!("[Conn {}] Querying capabilities", ctx.connection_id); - - Ok(Capabilities::all()) - } - async fn read_file( &self, ctx: DistantCtx, @@ -409,7 +405,66 @@ impl DistantApi for LocalDistantApi { "[Conn {}] Reading metadata for {:?} {{canonicalize: {}, resolve_file_type: {}}}", ctx.connection_id, path, canonicalize, resolve_file_type ); - Metadata::read(path, canonicalize, resolve_file_type).await + let metadata = tokio::fs::symlink_metadata(path.as_path()).await?; + let canonicalized_path = if canonicalize { + Some(tokio::fs::canonicalize(path.as_path()).await?) + } else { + None + }; + + // If asking for resolved file type and current type is symlink, then we want to refresh + // our metadata to get the filetype for the resolved link + let file_type = if resolve_file_type && metadata.file_type().is_symlink() { + tokio::fs::metadata(path).await?.file_type() + } else { + metadata.file_type() + }; + + Ok(Metadata { + canonicalized_path, + accessed: metadata + .accessed() + .ok() + .and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok()) + .map(|d| d.as_millis()), + created: metadata + .created() + .ok() + .and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok()) + .map(|d| d.as_millis()), + modified: metadata + .modified() + .ok() + .and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok()) + .map(|d| d.as_millis()), + len: metadata.len(), + readonly: metadata.permissions().readonly(), + file_type: if file_type.is_dir() { + FileType::Dir + } else if file_type.is_file() { + FileType::File + } else { + FileType::Symlink + }, + + #[cfg(unix)] + unix: Some({ + use std::os::unix::prelude::*; + let mode = metadata.mode(); + crate::protocol::UnixMetadata::from(mode) + }), + #[cfg(not(unix))] + unix: None, + + #[cfg(windows)] + windows: Some({ + use std::os::windows::prelude::*; + let attributes = metadata.file_attributes(); + crate::protocol::WindowsMetadata::from(attributes) + }), + #[cfg(not(windows))] + windows: None, + }) } async fn set_permissions( @@ -615,7 +670,29 @@ impl DistantApi for LocalDistantApi { async fn system_info(&self, ctx: DistantCtx) -> io::Result { debug!("[Conn {}] Reading system information", ctx.connection_id); - Ok(SystemInfo::default()) + Ok(SystemInfo { + family: env::consts::FAMILY.to_string(), + os: env::consts::OS.to_string(), + arch: env::consts::ARCH.to_string(), + current_dir: env::current_dir().unwrap_or_default(), + main_separator: std::path::MAIN_SEPARATOR, + username: whoami::username(), + shell: if cfg!(windows) { + env::var("ComSpec").unwrap_or_else(|_| String::from("cmd.exe")) + } else { + env::var("SHELL").unwrap_or_else(|_| String::from("/bin/sh")) + }, + }) + } + + async fn version(&self, ctx: DistantCtx) -> io::Result { + debug!("[Conn {}] Querying version", ctx.connection_id); + + Ok(Version { + server_version: format!("{} {}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")), + protocol_version: PROTOCOL_VERSION, + capabilities: Capabilities::all(), + }) } } diff --git a/distant-core/src/api/local/state/watcher.rs b/distant-core/src/api/local/state/watcher.rs index a564875..477ab02 100644 --- a/distant-core/src/api/local/state/watcher.rs +++ b/distant-core/src/api/local/state/watcher.rs @@ -5,12 +5,13 @@ use std::path::{Path, PathBuf}; use distant_net::common::ConnectionId; use log::*; +use notify::event::{AccessKind, AccessMode, ModifyKind}; use notify::{ Config as WatcherConfig, Error as WatcherError, ErrorKind as WatcherErrorKind, - Event as WatcherEvent, PollWatcher, RecursiveMode, Watcher, + Event as WatcherEvent, EventKind, PollWatcher, RecursiveMode, Watcher, }; +use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; -use tokio::sync::mpsc::{self}; use tokio::sync::oneshot; use tokio::task::JoinHandle; @@ -256,7 +257,20 @@ async fn watcher_task(mut watcher: impl Watcher, mut rx: mpsc::Receiver { - let kind = ChangeKind::from(ev.kind); + let kind = match ev.kind { + EventKind::Access(AccessKind::Read) => ChangeKind::Access, + EventKind::Modify(ModifyKind::Metadata(_)) => ChangeKind::Attribute, + EventKind::Access(AccessKind::Close(AccessMode::Write)) => { + ChangeKind::CloseWrite + } + EventKind::Access(AccessKind::Close(_)) => ChangeKind::CloseNoWrite, + EventKind::Create(_) => ChangeKind::Create, + EventKind::Remove(_) => ChangeKind::Delete, + EventKind::Modify(ModifyKind::Data(_)) => ChangeKind::Modify, + EventKind::Access(AccessKind::Open(_)) => ChangeKind::Open, + EventKind::Modify(ModifyKind::Name(_)) => ChangeKind::Rename, + _ => ChangeKind::Unknown, + }; for registered_path in registered_paths.iter() { match registered_path.filter_and_send(kind, &ev.paths).await { diff --git a/distant-core/src/client/ext.rs b/distant-core/src/client/ext.rs index 8361d40..f65f0f0 100644 --- a/distant-core/src/client/ext.rs +++ b/distant-core/src/client/ext.rs @@ -11,8 +11,8 @@ use crate::client::{ Watcher, }; use crate::protocol::{ - self, Capabilities, ChangeKindSet, DirEntry, Environment, Error as Failure, Metadata, - Permissions, PtySize, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, + self, ChangeKindSet, DirEntry, Environment, Error as Failure, Metadata, Permissions, PtySize, + SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, Version, }; pub type AsyncReturn<'a, T, E = io::Error> = @@ -38,9 +38,6 @@ pub trait DistantChannelExt { data: impl Into, ) -> AsyncReturn<'_, ()>; - /// Retrieves server capabilities - fn capabilities(&mut self) -> AsyncReturn<'_, Capabilities>; - /// Copies a remote file or directory from src to dst fn copy(&mut self, src: impl Into, dst: impl Into) -> AsyncReturn<'_, ()>; @@ -136,6 +133,9 @@ pub trait DistantChannelExt { /// Retrieves information about the remote system fn system_info(&mut self) -> AsyncReturn<'_, SystemInfo>; + /// Retrieves server version information + fn version(&mut self) -> AsyncReturn<'_, Version>; + /// Writes a remote file with the data from a collection of bytes fn write_file( &mut self, @@ -204,18 +204,6 @@ impl DistantChannelExt ) } - fn capabilities(&mut self) -> AsyncReturn<'_, Capabilities> { - make_body!( - self, - protocol::Request::Capabilities {}, - |data| match data { - protocol::Response::Capabilities { supported } => Ok(supported), - protocol::Response::Error(x) => Err(io::Error::from(x)), - _ => Err(mismatched_response()), - } - ) - } - fn copy(&mut self, src: impl Into, dst: impl Into) -> AsyncReturn<'_, ()> { make_body!( self, @@ -457,6 +445,14 @@ impl DistantChannelExt }) } + fn version(&mut self) -> AsyncReturn<'_, Version> { + make_body!(self, protocol::Request::Version {}, |data| match data { + protocol::Response::Version(x) => Ok(x), + protocol::Response::Error(x) => Err(io::Error::from(x)), + _ => Err(mismatched_response()), + }) + } + fn write_file( &mut self, path: impl Into, diff --git a/distant-core/src/client/watcher.rs b/distant-core/src/client/watcher.rs index 1708916..ec66455 100644 --- a/distant-core/src/client/watcher.rs +++ b/distant-core/src/client/watcher.rs @@ -267,7 +267,7 @@ mod tests { paths: vec![test_path.to_path_buf()], }), protocol::Response::Changed(Change { - kind: ChangeKind::Content, + kind: ChangeKind::Modify, paths: vec![test_path.to_path_buf()], }), ], @@ -289,7 +289,7 @@ mod tests { assert_eq!( change, Change { - kind: ChangeKind::Content, + kind: ChangeKind::Modify, paths: vec![test_path.to_path_buf()] } ); @@ -342,7 +342,7 @@ mod tests { .write_frame_for(&Response::new( req.id.clone() + "1", protocol::Response::Changed(Change { - kind: ChangeKind::Content, + kind: ChangeKind::Modify, paths: vec![test_path.to_path_buf()], }), )) @@ -354,7 +354,7 @@ mod tests { .write_frame_for(&Response::new( req.id, protocol::Response::Changed(Change { - kind: ChangeKind::Remove, + kind: ChangeKind::Delete, paths: vec![test_path.to_path_buf()], }), )) @@ -375,7 +375,7 @@ mod tests { assert_eq!( change, Change { - kind: ChangeKind::Remove, + kind: ChangeKind::Delete, paths: vec![test_path.to_path_buf()] } ); @@ -418,11 +418,11 @@ mod tests { paths: vec![test_path.to_path_buf()], }), protocol::Response::Changed(Change { - kind: ChangeKind::Content, + kind: ChangeKind::Modify, paths: vec![test_path.to_path_buf()], }), protocol::Response::Changed(Change { - kind: ChangeKind::Remove, + kind: ChangeKind::Delete, paths: vec![test_path.to_path_buf()], }), ], @@ -482,14 +482,14 @@ mod tests { assert_eq!( watcher.lock().await.next().await, Some(Change { - kind: ChangeKind::Content, + kind: ChangeKind::Modify, paths: vec![test_path.to_path_buf()] }) ); assert_eq!( watcher.lock().await.next().await, Some(Change { - kind: ChangeKind::Remove, + kind: ChangeKind::Delete, paths: vec![test_path.to_path_buf()] }) ); diff --git a/distant-core/src/lib.rs b/distant-core/src/lib.rs index 2ab5778..80237ac 100644 --- a/distant-core/src/lib.rs +++ b/distant-core/src/lib.rs @@ -7,10 +7,11 @@ pub use client::*; mod credentials; pub use credentials::*; -pub mod protocol; - mod constants; mod serde_str; -/// Re-export of `distant-net` as `net` +/// Network functionality. pub use distant_net as net; + +/// Protocol structures. +pub use distant_protocol as protocol; diff --git a/distant-core/src/protocol.rs b/distant-core/src/protocol.rs deleted file mode 100644 index 02d44b0..0000000 --- a/distant-core/src/protocol.rs +++ /dev/null @@ -1,572 +0,0 @@ -use std::io; -use std::path::PathBuf; - -use derive_more::{From, IsVariant}; -use serde::{Deserialize, Serialize}; -use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString}; - -mod capabilities; -pub use capabilities::*; - -mod change; -pub use change::*; - -mod cmd; -pub use cmd::*; - -mod error; -pub use error::*; - -mod filesystem; -pub use filesystem::*; - -mod metadata; -pub use metadata::*; - -mod permissions; -pub use permissions::*; - -mod pty; -pub use pty::*; - -mod search; -pub use search::*; - -mod system; -pub use system::*; - -mod utils; -pub(crate) use utils::*; - -/// Id for a remote process -pub type ProcessId = u32; - -/// Mapping of environment variables -pub type Environment = distant_net::common::Map; - -/// Represents a wrapper around a distant message, supporting single and batch requests -#[derive(Clone, Debug, From, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(untagged)] -pub enum Msg { - Single(T), - Batch(Vec), -} - -impl Msg { - /// Returns true if msg has a single payload - pub fn is_single(&self) -> bool { - matches!(self, Self::Single(_)) - } - - /// Returns reference to single value if msg is single variant - pub fn as_single(&self) -> Option<&T> { - match self { - Self::Single(x) => Some(x), - _ => None, - } - } - - /// Returns mutable reference to single value if msg is single variant - pub fn as_mut_single(&mut self) -> Option<&T> { - match self { - Self::Single(x) => Some(x), - _ => None, - } - } - - /// Returns the single value if msg is single variant - pub fn into_single(self) -> Option { - match self { - Self::Single(x) => Some(x), - _ => None, - } - } - - /// Returns true if msg has a batch of payloads - pub fn is_batch(&self) -> bool { - matches!(self, Self::Batch(_)) - } - - /// Returns reference to batch value if msg is batch variant - pub fn as_batch(&self) -> Option<&[T]> { - match self { - Self::Batch(x) => Some(x), - _ => None, - } - } - - /// Returns mutable reference to batch value if msg is batch variant - pub fn as_mut_batch(&mut self) -> Option<&mut [T]> { - match self { - Self::Batch(x) => Some(x), - _ => None, - } - } - - /// Returns the batch value if msg is batch variant - pub fn into_batch(self) -> Option> { - match self { - Self::Batch(x) => Some(x), - _ => None, - } - } - - /// Convert into a collection of payload data - pub fn into_vec(self) -> Vec { - match self { - Self::Single(x) => vec![x], - Self::Batch(x) => x, - } - } -} - -#[cfg(feature = "schemars")] -impl Msg { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Msg) - } -} - -/// Represents the payload of a request to be performed on the remote machine -#[derive(Clone, Debug, PartialEq, Eq, EnumDiscriminants, IsVariant, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[strum_discriminants(derive( - AsRefStr, - strum::Display, - EnumIter, - EnumMessage, - EnumString, - Hash, - PartialOrd, - Ord, - IsVariant, - Serialize, - Deserialize -))] -#[cfg_attr( - feature = "schemars", - strum_discriminants(derive(schemars::JsonSchema)) -)] -#[strum_discriminants(name(CapabilityKind))] -#[strum_discriminants(strum(serialize_all = "snake_case"))] -#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")] -pub enum Request { - /// Retrieve information about the server's capabilities - #[strum_discriminants(strum(message = "Supports retrieving capabilities"))] - Capabilities {}, - - /// Reads a file from the specified path on the remote machine - #[strum_discriminants(strum(message = "Supports reading binary file"))] - FileRead { - /// The path to the file on the remote machine - path: PathBuf, - }, - - /// Reads a file from the specified path on the remote machine - /// and treats the contents as text - #[strum_discriminants(strum(message = "Supports reading text file"))] - FileReadText { - /// The path to the file on the remote machine - path: PathBuf, - }, - - /// Writes a file, creating it if it does not exist, and overwriting any existing content - /// on the remote machine - #[strum_discriminants(strum(message = "Supports writing binary file"))] - FileWrite { - /// The path to the file on the remote machine - path: PathBuf, - - /// Data for server-side writing of content - #[serde(with = "serde_bytes")] - #[cfg_attr(feature = "schemars", schemars(with = "Vec"))] - data: Vec, - }, - - /// Writes a file using text instead of bytes, creating it if it does not exist, - /// and overwriting any existing content on the remote machine - #[strum_discriminants(strum(message = "Supports writing text file"))] - FileWriteText { - /// The path to the file on the remote machine - path: PathBuf, - - /// Data for server-side writing of content - text: String, - }, - - /// Appends to a file, creating it if it does not exist, on the remote machine - #[strum_discriminants(strum(message = "Supports appending to binary file"))] - FileAppend { - /// The path to the file on the remote machine - path: PathBuf, - - /// Data for server-side writing of content - #[serde(with = "serde_bytes")] - #[cfg_attr(feature = "schemars", schemars(with = "Vec"))] - data: Vec, - }, - - /// Appends text to a file, creating it if it does not exist, on the remote machine - #[strum_discriminants(strum(message = "Supports appending to text file"))] - FileAppendText { - /// The path to the file on the remote machine - path: PathBuf, - - /// Data for server-side writing of content - text: String, - }, - - /// Reads a directory from the specified path on the remote machine - #[strum_discriminants(strum(message = "Supports reading directory"))] - DirRead { - /// The path to the directory on the remote machine - path: PathBuf, - - /// Maximum depth to traverse with 0 indicating there is no maximum - /// depth and 1 indicating the most immediate children within the - /// directory - #[serde(default = "one")] - depth: usize, - - /// Whether or not to return absolute or relative paths - #[serde(default)] - absolute: bool, - - /// Whether or not to canonicalize the resulting paths, meaning - /// returning the canonical, absolute form of a path with all - /// intermediate components normalized and symbolic links resolved - /// - /// Note that the flag absolute must be true to have absolute paths - /// returned, even if canonicalize is flagged as true - #[serde(default)] - canonicalize: bool, - - /// Whether or not to include the root directory in the retrieved - /// entries - /// - /// If included, the root directory will also be a canonicalized, - /// absolute path and will not follow any of the other flags - #[serde(default)] - include_root: bool, - }, - - /// Creates a directory on the remote machine - #[strum_discriminants(strum(message = "Supports creating directory"))] - DirCreate { - /// The path to the directory on the remote machine - path: PathBuf, - - /// Whether or not to create all parent directories - #[serde(default)] - all: bool, - }, - - /// Removes a file or directory on the remote machine - #[strum_discriminants(strum(message = "Supports removing files, directories, and symlinks"))] - Remove { - /// The path to the file or directory on the remote machine - path: PathBuf, - - /// Whether or not to remove all contents within directory if is a directory. - /// Does nothing different for files - #[serde(default)] - force: bool, - }, - - /// Copies a file or directory on the remote machine - #[strum_discriminants(strum(message = "Supports copying files, directories, and symlinks"))] - Copy { - /// The path to the file or directory on the remote machine - src: PathBuf, - - /// New location on the remote machine for copy of file or directory - dst: PathBuf, - }, - - /// Moves/renames a file or directory on the remote machine - #[strum_discriminants(strum(message = "Supports renaming files, directories, and symlinks"))] - Rename { - /// The path to the file or directory on the remote machine - src: PathBuf, - - /// New location on the remote machine for the file or directory - dst: PathBuf, - }, - - /// Watches a path for changes - #[strum_discriminants(strum(message = "Supports watching filesystem for changes"))] - Watch { - /// The path to the file, directory, or symlink on the remote machine - path: PathBuf, - - /// If true, will recursively watch for changes within directories, othewise - /// will only watch for changes immediately within directories - #[serde(default)] - recursive: bool, - - /// Filter to only report back specified changes - #[serde(default)] - only: Vec, - - /// Filter to report back changes except these specified changes - #[serde(default)] - except: Vec, - }, - - /// Unwatches a path for changes, meaning no additional changes will be reported - #[strum_discriminants(strum(message = "Supports unwatching filesystem for changes"))] - Unwatch { - /// The path to the file, directory, or symlink on the remote machine - path: PathBuf, - }, - - /// Checks whether the given path exists - #[strum_discriminants(strum(message = "Supports checking if a path exists"))] - Exists { - /// The path to the file or directory on the remote machine - path: PathBuf, - }, - - /// Retrieves filesystem metadata for the specified path on the remote machine - #[strum_discriminants(strum( - message = "Supports retrieving metadata about a file, directory, or symlink" - ))] - Metadata { - /// The path to the file, directory, or symlink on the remote machine - path: PathBuf, - - /// Whether or not to include a canonicalized version of the path, meaning - /// returning the canonical, absolute form of a path with all - /// intermediate components normalized and symbolic links resolved - #[serde(default)] - canonicalize: bool, - - /// Whether or not to follow symlinks to determine absolute file type (dir/file) - #[serde(default)] - resolve_file_type: bool, - }, - - /// Sets permissions on a file, directory, or symlink on the remote machine - #[strum_discriminants(strum( - message = "Supports setting permissions on a file, directory, or symlink" - ))] - SetPermissions { - /// The path to the file, directory, or symlink on the remote machine - path: PathBuf, - - /// New permissions to apply to the file, directory, or symlink - permissions: Permissions, - - /// Additional options to supply when setting permissions - #[serde(default)] - options: SetPermissionsOptions, - }, - - /// Searches filesystem using the provided query - #[strum_discriminants(strum(message = "Supports searching filesystem using queries"))] - Search { - /// Query to perform against the filesystem - query: SearchQuery, - }, - - /// Cancels an active search being run against the filesystem - #[strum_discriminants(strum( - message = "Supports canceling an active search against the filesystem" - ))] - CancelSearch { - /// Id of the search to cancel - id: SearchId, - }, - - /// Spawns a new process on the remote machine - #[strum_discriminants(strum(message = "Supports spawning a process"))] - ProcSpawn { - /// The full command to run including arguments - cmd: Cmd, - - /// Environment to provide to the remote process - #[serde(default)] - environment: Environment, - - /// Alternative current directory for the remote process - #[serde(default)] - current_dir: Option, - - /// If provided, will spawn process in a pty, otherwise spawns directly - #[serde(default)] - pty: Option, - }, - - /// Kills a process running on the remote machine - #[strum_discriminants(strum(message = "Supports killing a spawned process"))] - ProcKill { - /// Id of the actively-running process - id: ProcessId, - }, - - /// Sends additional data to stdin of running process - #[strum_discriminants(strum(message = "Supports sending stdin to a spawned process"))] - ProcStdin { - /// Id of the actively-running process to send stdin data - id: ProcessId, - - /// Data to send to a process's stdin pipe - #[serde(with = "serde_bytes")] - #[cfg_attr(feature = "schemars", schemars(with = "Vec"))] - data: Vec, - }, - - /// Resize pty of remote process - #[strum_discriminants(strum(message = "Supports resizing the pty of a spawned process"))] - ProcResizePty { - /// Id of the actively-running process whose pty to resize - id: ProcessId, - - /// The new pty dimensions - size: PtySize, - }, - - /// Retrieve information about the server and the system it is on - #[strum_discriminants(strum(message = "Supports retrieving system information"))] - SystemInfo {}, -} - -#[cfg(feature = "schemars")] -impl Request { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Request) - } -} - -/// Represents the payload of a successful response -#[derive(Clone, Debug, PartialEq, Eq, AsRefStr, IsVariant, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")] -#[strum(serialize_all = "snake_case")] -pub enum Response { - /// General okay with no extra data, returned in cases like - /// creating or removing a directory, copying a file, or renaming - /// a file - Ok, - - /// General-purpose failure that occurred from some request - Error(Error), - - /// Response containing some arbitrary, binary data - Blob { - /// Binary data associated with the response - #[serde(with = "serde_bytes")] - #[cfg_attr(feature = "schemars", schemars(with = "Vec"))] - data: Vec, - }, - - /// Response containing some arbitrary, text data - Text { - /// Text data associated with the response - data: String, - }, - - /// Response to reading a directory - DirEntries { - /// Entries contained within the requested directory - entries: Vec, - - /// Errors encountered while scanning for entries - errors: Vec, - }, - - /// Response to a filesystem change for some watched file, directory, or symlink - Changed(Change), - - /// Response to checking if a path exists - Exists { value: bool }, - - /// Represents metadata about some filesystem object (file, directory, symlink) on remote machine - Metadata(Metadata), - - /// Represents a search being started - SearchStarted { - /// Arbitrary id associated with search - id: SearchId, - }, - - /// Represents some subset of results for a search query (may not be all of them) - SearchResults { - /// Arbitrary id associated with search - id: SearchId, - - /// Collection of matches from performing a query - matches: Vec, - }, - - /// Represents a search being completed - SearchDone { - /// Arbitrary id associated with search - id: SearchId, - }, - - /// Response to starting a new process - ProcSpawned { - /// Arbitrary id associated with running process - id: ProcessId, - }, - - /// Actively-transmitted stdout as part of running process - ProcStdout { - /// Arbitrary id associated with running process - id: ProcessId, - - /// Data read from a process' stdout pipe - #[serde(with = "serde_bytes")] - #[cfg_attr(feature = "schemars", schemars(with = "Vec"))] - data: Vec, - }, - - /// Actively-transmitted stderr as part of running process - ProcStderr { - /// Arbitrary id associated with running process - id: ProcessId, - - /// Data read from a process' stderr pipe - #[serde(with = "serde_bytes")] - #[cfg_attr(feature = "schemars", schemars(with = "Vec"))] - data: Vec, - }, - - /// Response to a process finishing - ProcDone { - /// Arbitrary id associated with running process - id: ProcessId, - - /// Whether or not termination was successful - success: bool, - - /// Exit code associated with termination, will be missing if terminated by signal - code: Option, - }, - - /// Response to retrieving information about the server and the system it is on - SystemInfo(SystemInfo), - - /// Response to retrieving information about the server's capabilities - Capabilities { supported: Capabilities }, -} - -#[cfg(feature = "schemars")] -impl Response { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Response) - } -} - -impl From for Response { - fn from(x: io::Error) -> Self { - Self::Error(Error::from(x)) - } -} - -/// Used to provide a default serde value of 1 -const fn one() -> usize { - 1 -} diff --git a/distant-core/src/protocol/capabilities.rs b/distant-core/src/protocol/capabilities.rs deleted file mode 100644 index 45e9b8f..0000000 --- a/distant-core/src/protocol/capabilities.rs +++ /dev/null @@ -1,207 +0,0 @@ -use std::cmp::Ordering; -use std::collections::HashSet; -use std::hash::{Hash, Hasher}; -use std::ops::{BitAnd, BitOr, BitXor}; -use std::str::FromStr; - -use derive_more::{From, Into, IntoIterator}; -use serde::{Deserialize, Serialize}; -use strum::{EnumMessage, IntoEnumIterator}; - -use super::CapabilityKind; - -/// Set of supported capabilities for a server -#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(transparent)] -pub struct Capabilities(#[into_iterator(owned, ref)] HashSet); - -impl Capabilities { - /// Return set of capabilities encompassing all possible capabilities - pub fn all() -> Self { - Self(CapabilityKind::iter().map(Capability::from).collect()) - } - - /// Return empty set of capabilities - pub fn none() -> Self { - Self(HashSet::new()) - } - - /// Returns true if the capability with described kind is included - pub fn contains(&self, kind: impl AsRef) -> bool { - let cap = Capability { - kind: kind.as_ref().to_string(), - description: String::new(), - }; - self.0.contains(&cap) - } - - /// Adds the specified capability to the set of capabilities - /// - /// * If the set did not have this capability, returns `true` - /// * If the set did have this capability, returns `false` - pub fn insert(&mut self, cap: impl Into) -> bool { - self.0.insert(cap.into()) - } - - /// Removes the capability with the described kind, returning the capability - pub fn take(&mut self, kind: impl AsRef) -> Option { - let cap = Capability { - kind: kind.as_ref().to_string(), - description: String::new(), - }; - self.0.take(&cap) - } - - /// Removes the capability with the described kind, returning true if it existed - pub fn remove(&mut self, kind: impl AsRef) -> bool { - let cap = Capability { - kind: kind.as_ref().to_string(), - description: String::new(), - }; - self.0.remove(&cap) - } - - /// Converts into vec of capabilities sorted by kind - pub fn into_sorted_vec(self) -> Vec { - let mut this = self.0.into_iter().collect::>(); - - this.sort_unstable(); - - this - } -} - -#[cfg(feature = "schemars")] -impl Capabilities { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Capabilities) - } -} - -impl BitAnd for &Capabilities { - type Output = Capabilities; - - fn bitand(self, rhs: Self) -> Self::Output { - Capabilities(self.0.bitand(&rhs.0)) - } -} - -impl BitOr for &Capabilities { - type Output = Capabilities; - - fn bitor(self, rhs: Self) -> Self::Output { - Capabilities(self.0.bitor(&rhs.0)) - } -} - -impl BitOr for &Capabilities { - type Output = Capabilities; - - fn bitor(self, rhs: Capability) -> Self::Output { - let mut other = Capabilities::none(); - other.0.insert(rhs); - - self.bitor(&other) - } -} - -impl BitXor for &Capabilities { - type Output = Capabilities; - - fn bitxor(self, rhs: Self) -> Self::Output { - Capabilities(self.0.bitxor(&rhs.0)) - } -} - -impl FromIterator for Capabilities { - fn from_iter>(iter: I) -> Self { - let mut this = Capabilities::none(); - - for capability in iter { - this.0.insert(capability); - } - - this - } -} - -/// Capability tied to a server. A capability is equivalent based on its kind and not description. -#[derive(Clone, Debug, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(rename_all = "snake_case", deny_unknown_fields)] -pub struct Capability { - /// Label describing the kind of capability - pub kind: String, - - /// Information about the capability - pub description: String, -} - -impl Capability { - /// Will convert the [`Capability`]'s `kind` into a known [`CapabilityKind`] if possible, - /// returning None if the capability is unknown - pub fn to_capability_kind(&self) -> Option { - CapabilityKind::from_str(&self.kind).ok() - } - - /// Returns true if the described capability is unknown - pub fn is_unknown(&self) -> bool { - self.to_capability_kind().is_none() - } -} - -impl PartialEq for Capability { - fn eq(&self, other: &Self) -> bool { - self.kind.eq_ignore_ascii_case(&other.kind) - } -} - -impl Eq for Capability {} - -impl PartialOrd for Capability { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Capability { - fn cmp(&self, other: &Self) -> Ordering { - self.kind - .to_ascii_lowercase() - .cmp(&other.kind.to_ascii_lowercase()) - } -} - -impl Hash for Capability { - fn hash(&self, state: &mut H) { - self.kind.to_ascii_lowercase().hash(state); - } -} - -impl From for Capability { - /// Creates a new capability using the kind's default message - fn from(kind: CapabilityKind) -> Self { - Self { - kind: kind.to_string(), - description: kind - .get_message() - .map(ToString::to_string) - .unwrap_or_default(), - } - } -} - -#[cfg(feature = "schemars")] -impl Capability { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Capability) - } -} - -#[cfg(feature = "schemars")] -impl CapabilityKind { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(CapabilityKind) - } -} diff --git a/distant-core/src/protocol/change.rs b/distant-core/src/protocol/change.rs deleted file mode 100644 index 578a337..0000000 --- a/distant-core/src/protocol/change.rs +++ /dev/null @@ -1,516 +0,0 @@ -use std::collections::HashSet; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::iter::FromIterator; -use std::ops::{BitOr, Sub}; -use std::path::PathBuf; -use std::str::FromStr; - -use derive_more::{Deref, DerefMut, IntoIterator}; -use notify::event::Event as NotifyEvent; -use notify::EventKind as NotifyEventKind; -use serde::{Deserialize, Serialize}; -use strum::{EnumString, EnumVariantNames, VariantNames}; - -/// Change to one or more paths on the filesystem -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(rename_all = "snake_case", deny_unknown_fields)] -pub struct Change { - /// Label describing the kind of change - pub kind: ChangeKind, - - /// Paths that were changed - pub paths: Vec, -} - -#[cfg(feature = "schemars")] -impl Change { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Change) - } -} - -impl From for Change { - fn from(x: NotifyEvent) -> Self { - Self { - kind: x.kind.into(), - paths: x.paths, - } - } -} - -#[derive( - Copy, - Clone, - Debug, - strum::Display, - EnumString, - EnumVariantNames, - Hash, - PartialEq, - Eq, - PartialOrd, - Ord, - Serialize, - Deserialize, -)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(rename_all = "snake_case", deny_unknown_fields)] -#[strum(serialize_all = "snake_case")] -pub enum ChangeKind { - /// Something about a file or directory was accessed, but - /// no specific details were known - Access, - - /// A file was closed for executing - AccessCloseExecute, - - /// A file was closed for reading - AccessCloseRead, - - /// A file was closed for writing - AccessCloseWrite, - - /// A file was opened for executing - AccessOpenExecute, - - /// A file was opened for reading - AccessOpenRead, - - /// A file was opened for writing - AccessOpenWrite, - - /// A file or directory was read - AccessRead, - - /// The access time of a file or directory was changed - AccessTime, - - /// A file, directory, or something else was created - Create, - - /// The content of a file or directory changed - Content, - - /// The data of a file or directory was modified, but - /// no specific details were known - Data, - - /// The metadata of a file or directory was modified, but - /// no specific details were known - Metadata, - - /// Something about a file or directory was modified, but - /// no specific details were known - Modify, - - /// A file, directory, or something else was removed - Remove, - - /// A file or directory was renamed, but no specific details were known - Rename, - - /// A file or directory was renamed, and the provided paths - /// are the source and target in that order (from, to) - RenameBoth, - - /// A file or directory was renamed, and the provided path - /// is the origin of the rename (before being renamed) - RenameFrom, - - /// A file or directory was renamed, and the provided path - /// is the result of the rename - RenameTo, - - /// A file's size changed - Size, - - /// The ownership of a file or directory was changed - Ownership, - - /// The permissions of a file or directory was changed - Permissions, - - /// The write or modify time of a file or directory was changed - WriteTime, - - // Catchall in case we have no insight as to the type of change - Unknown, -} - -impl ChangeKind { - /// Returns a list of all variants as str names - pub const fn variants() -> &'static [&'static str] { - Self::VARIANTS - } - - /// Returns a list of all variants as a vec - pub fn all() -> Vec { - ChangeKindSet::all().into_sorted_vec() - } - - /// Returns true if the change is a kind of access - pub fn is_access_kind(&self) -> bool { - self.is_open_access_kind() - || self.is_close_access_kind() - || matches!(self, Self::Access | Self::AccessRead) - } - - /// Returns true if the change is a kind of open access - pub fn is_open_access_kind(&self) -> bool { - matches!( - self, - Self::AccessOpenExecute | Self::AccessOpenRead | Self::AccessOpenWrite - ) - } - - /// Returns true if the change is a kind of close access - pub fn is_close_access_kind(&self) -> bool { - matches!( - self, - Self::AccessCloseExecute | Self::AccessCloseRead | Self::AccessCloseWrite - ) - } - - /// Returns true if the change is a kind of creation - pub fn is_create_kind(&self) -> bool { - matches!(self, Self::Create) - } - - /// Returns true if the change is a kind of modification - pub fn is_modify_kind(&self) -> bool { - self.is_data_modify_kind() || self.is_metadata_modify_kind() || matches!(self, Self::Modify) - } - - /// Returns true if the change is a kind of data modification - pub fn is_data_modify_kind(&self) -> bool { - matches!(self, Self::Content | Self::Data | Self::Size) - } - - /// Returns true if the change is a kind of metadata modification - pub fn is_metadata_modify_kind(&self) -> bool { - matches!( - self, - Self::AccessTime - | Self::Metadata - | Self::Ownership - | Self::Permissions - | Self::WriteTime - ) - } - - /// Returns true if the change is a kind of rename - pub fn is_rename_kind(&self) -> bool { - matches!( - self, - Self::Rename | Self::RenameBoth | Self::RenameFrom | Self::RenameTo - ) - } - - /// Returns true if the change is a kind of removal - pub fn is_remove_kind(&self) -> bool { - matches!(self, Self::Remove) - } - - /// Returns true if the change kind is unknown - pub fn is_unknown_kind(&self) -> bool { - matches!(self, Self::Unknown) - } -} - -#[cfg(feature = "schemars")] -impl ChangeKind { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(ChangeKind) - } -} - -impl BitOr for ChangeKind { - type Output = ChangeKindSet; - - fn bitor(self, rhs: Self) -> Self::Output { - let mut set = ChangeKindSet::empty(); - set.insert(self); - set.insert(rhs); - set - } -} - -impl From for ChangeKind { - fn from(x: NotifyEventKind) -> Self { - use notify::event::{ - AccessKind, AccessMode, DataChange, MetadataKind, ModifyKind, RenameMode, - }; - match x { - // File/directory access events - NotifyEventKind::Access(AccessKind::Read) => Self::AccessRead, - NotifyEventKind::Access(AccessKind::Open(AccessMode::Execute)) => { - Self::AccessOpenExecute - } - NotifyEventKind::Access(AccessKind::Open(AccessMode::Read)) => Self::AccessOpenRead, - NotifyEventKind::Access(AccessKind::Open(AccessMode::Write)) => Self::AccessOpenWrite, - NotifyEventKind::Access(AccessKind::Close(AccessMode::Execute)) => { - Self::AccessCloseExecute - } - NotifyEventKind::Access(AccessKind::Close(AccessMode::Read)) => Self::AccessCloseRead, - NotifyEventKind::Access(AccessKind::Close(AccessMode::Write)) => Self::AccessCloseWrite, - NotifyEventKind::Access(_) => Self::Access, - - // File/directory creation events - NotifyEventKind::Create(_) => Self::Create, - - // Rename-oriented events - NotifyEventKind::Modify(ModifyKind::Name(RenameMode::Both)) => Self::RenameBoth, - NotifyEventKind::Modify(ModifyKind::Name(RenameMode::From)) => Self::RenameFrom, - NotifyEventKind::Modify(ModifyKind::Name(RenameMode::To)) => Self::RenameTo, - NotifyEventKind::Modify(ModifyKind::Name(_)) => Self::Rename, - - // Data-modification events - NotifyEventKind::Modify(ModifyKind::Data(DataChange::Content)) => Self::Content, - NotifyEventKind::Modify(ModifyKind::Data(DataChange::Size)) => Self::Size, - NotifyEventKind::Modify(ModifyKind::Data(_)) => Self::Data, - - // Metadata-modification events - NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::AccessTime)) => { - Self::AccessTime - } - NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::WriteTime)) => { - Self::WriteTime - } - NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::Permissions)) => { - Self::Permissions - } - NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::Ownership)) => { - Self::Ownership - } - NotifyEventKind::Modify(ModifyKind::Metadata(_)) => Self::Metadata, - - // General modification events - NotifyEventKind::Modify(_) => Self::Modify, - - // File/directory removal events - NotifyEventKind::Remove(_) => Self::Remove, - - // Catch-all for other events - NotifyEventKind::Any | NotifyEventKind::Other => Self::Unknown, - } - } -} - -/// Represents a distinct set of different change kinds -#[derive(Clone, Debug, Deref, DerefMut, IntoIterator, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct ChangeKindSet(HashSet); - -impl ChangeKindSet { - /// Produces an empty set of [`ChangeKind`] - pub fn empty() -> Self { - Self(HashSet::new()) - } - - /// Produces a set of all [`ChangeKind`] - pub fn all() -> Self { - vec![ - ChangeKind::Access, - ChangeKind::AccessCloseExecute, - ChangeKind::AccessCloseRead, - ChangeKind::AccessCloseWrite, - ChangeKind::AccessOpenExecute, - ChangeKind::AccessOpenRead, - ChangeKind::AccessOpenWrite, - ChangeKind::AccessRead, - ChangeKind::AccessTime, - ChangeKind::Create, - ChangeKind::Content, - ChangeKind::Data, - ChangeKind::Metadata, - ChangeKind::Modify, - ChangeKind::Remove, - ChangeKind::Rename, - ChangeKind::RenameBoth, - ChangeKind::RenameFrom, - ChangeKind::RenameTo, - ChangeKind::Size, - ChangeKind::Ownership, - ChangeKind::Permissions, - ChangeKind::WriteTime, - ChangeKind::Unknown, - ] - .into_iter() - .collect() - } - - /// Produces a changeset containing all of the access kinds - pub fn access_set() -> Self { - Self::access_open_set() - | Self::access_close_set() - | ChangeKind::AccessRead - | ChangeKind::Access - } - - /// Produces a changeset containing all of the open access kinds - pub fn access_open_set() -> Self { - ChangeKind::AccessOpenExecute | ChangeKind::AccessOpenRead | ChangeKind::AccessOpenWrite - } - - /// Produces a changeset containing all of the close access kinds - pub fn access_close_set() -> Self { - ChangeKind::AccessCloseExecute | ChangeKind::AccessCloseRead | ChangeKind::AccessCloseWrite - } - - // Produces a changeset containing all of the modification kinds - pub fn modify_set() -> Self { - Self::modify_data_set() | Self::modify_metadata_set() | ChangeKind::Modify - } - - /// Produces a changeset containing all of the data modification kinds - pub fn modify_data_set() -> Self { - ChangeKind::Content | ChangeKind::Data | ChangeKind::Size - } - - /// Produces a changeset containing all of the metadata modification kinds - pub fn modify_metadata_set() -> Self { - ChangeKind::AccessTime - | ChangeKind::Metadata - | ChangeKind::Ownership - | ChangeKind::Permissions - | ChangeKind::WriteTime - } - - /// Produces a changeset containing all of the rename kinds - pub fn rename_set() -> Self { - ChangeKind::Rename | ChangeKind::RenameBoth | ChangeKind::RenameFrom | ChangeKind::RenameTo - } - - /// Consumes set and returns a sorted vec of the kinds of changes - pub fn into_sorted_vec(self) -> Vec { - let mut v = self.0.into_iter().collect::>(); - v.sort(); - v - } -} - -#[cfg(feature = "schemars")] -impl ChangeKindSet { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(ChangeKindSet) - } -} - -impl fmt::Display for ChangeKindSet { - /// Outputs a comma-separated series of [`ChangeKind`] as string that are sorted - /// such that this will always be consistent output - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut kinds = self - .0 - .iter() - .map(ToString::to_string) - .collect::>(); - kinds.sort_unstable(); - write!(f, "{}", kinds.join(",")) - } -} - -impl PartialEq for ChangeKindSet { - fn eq(&self, other: &Self) -> bool { - self.to_string() == other.to_string() - } -} - -impl Eq for ChangeKindSet {} - -impl Hash for ChangeKindSet { - /// Hashes based on the output of [`fmt::Display`] - fn hash(&self, state: &mut H) { - self.to_string().hash(state); - } -} - -impl BitOr for ChangeKindSet { - type Output = Self; - - fn bitor(mut self, rhs: ChangeKindSet) -> Self::Output { - self.extend(rhs.0); - self - } -} - -impl BitOr for ChangeKindSet { - type Output = Self; - - fn bitor(mut self, rhs: ChangeKind) -> Self::Output { - self.0.insert(rhs); - self - } -} - -impl BitOr for ChangeKind { - type Output = ChangeKindSet; - - fn bitor(self, rhs: ChangeKindSet) -> Self::Output { - rhs | self - } -} - -impl Sub for ChangeKindSet { - type Output = Self; - - fn sub(self, other: Self) -> Self::Output { - ChangeKindSet(&self.0 - &other.0) - } -} - -impl Sub<&'_ ChangeKindSet> for &ChangeKindSet { - type Output = ChangeKindSet; - - fn sub(self, other: &ChangeKindSet) -> Self::Output { - ChangeKindSet(&self.0 - &other.0) - } -} - -impl FromStr for ChangeKindSet { - type Err = strum::ParseError; - - fn from_str(s: &str) -> Result { - let mut change_set = HashSet::new(); - - for word in s.split(',') { - change_set.insert(ChangeKind::from_str(word.trim())?); - } - - Ok(ChangeKindSet(change_set)) - } -} - -impl FromIterator for ChangeKindSet { - fn from_iter>(iter: I) -> Self { - let mut change_set = HashSet::new(); - - for i in iter { - change_set.insert(i); - } - - ChangeKindSet(change_set) - } -} - -impl From for ChangeKindSet { - fn from(change_kind: ChangeKind) -> Self { - let mut set = Self::empty(); - set.insert(change_kind); - set - } -} - -impl From> for ChangeKindSet { - fn from(changes: Vec) -> Self { - changes.into_iter().collect() - } -} - -impl Default for ChangeKindSet { - fn default() -> Self { - Self::empty() - } -} diff --git a/distant-core/src/protocol/cmd.rs b/distant-core/src/protocol/cmd.rs deleted file mode 100644 index 74ff005..0000000 --- a/distant-core/src/protocol/cmd.rs +++ /dev/null @@ -1,53 +0,0 @@ -use std::ops::{Deref, DerefMut}; - -use derive_more::{Display, From, Into}; -use serde::{Deserialize, Serialize}; - -/// Represents some command with arguments to execute -#[derive(Clone, Debug, Display, From, Into, Hash, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct Cmd(String); - -impl Cmd { - /// Creates a new command from the given `cmd` - pub fn new(cmd: impl Into) -> Self { - Self(cmd.into()) - } - - /// Returns reference to the program portion of the command - pub fn program(&self) -> &str { - match self.0.split_once(' ') { - Some((program, _)) => program.trim(), - None => self.0.trim(), - } - } - - /// Returns reference to the arguments portion of the command - pub fn arguments(&self) -> &str { - match self.0.split_once(' ') { - Some((_, arguments)) => arguments.trim(), - None => "", - } - } -} - -#[cfg(feature = "schemars")] -impl Cmd { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Cmd) - } -} - -impl Deref for Cmd { - type Target = String; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for Cmd { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} diff --git a/distant-core/src/protocol/filesystem.rs b/distant-core/src/protocol/filesystem.rs deleted file mode 100644 index b71328d..0000000 --- a/distant-core/src/protocol/filesystem.rs +++ /dev/null @@ -1,59 +0,0 @@ -use std::fs::FileType as StdFileType; -use std::path::PathBuf; - -use derive_more::IsVariant; -use serde::{Deserialize, Serialize}; -use strum::AsRefStr; - -/// Represents information about a single entry within a directory -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(rename_all = "snake_case", deny_unknown_fields)] -pub struct DirEntry { - /// Represents the full path to the entry - pub path: PathBuf, - - /// Represents the type of the entry as a file/dir/symlink - pub file_type: FileType, - - /// Depth at which this entry was created relative to the root (0 being immediately within - /// root) - pub depth: usize, -} - -#[cfg(feature = "schemars")] -impl DirEntry { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(DirEntry) - } -} - -/// Represents the type associated with a dir entry -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, AsRefStr, IsVariant, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(rename_all = "snake_case", deny_unknown_fields)] -#[strum(serialize_all = "snake_case")] -pub enum FileType { - Dir, - File, - Symlink, -} - -impl From for FileType { - fn from(ft: StdFileType) -> Self { - if ft.is_dir() { - Self::Dir - } else if ft.is_symlink() { - Self::Symlink - } else { - Self::File - } - } -} - -#[cfg(feature = "schemars")] -impl FileType { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(FileType) - } -} diff --git a/distant-core/src/protocol/metadata.rs b/distant-core/src/protocol/metadata.rs deleted file mode 100644 index dcd7208..0000000 --- a/distant-core/src/protocol/metadata.rs +++ /dev/null @@ -1,404 +0,0 @@ -use std::io; -use std::path::{Path, PathBuf}; -use std::time::SystemTime; - -use bitflags::bitflags; -use serde::{Deserialize, Serialize}; - -use super::{deserialize_u128_option, serialize_u128_option, FileType}; - -/// Represents metadata about some path on a remote machine -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct Metadata { - /// Canonicalized path to the file or directory, resolving symlinks, only included - /// if flagged during the request - pub canonicalized_path: Option, - - /// Represents the type of the entry as a file/dir/symlink - pub file_type: FileType, - - /// Size of the file/directory/symlink in bytes - pub len: u64, - - /// Whether or not the file/directory/symlink is marked as unwriteable - pub readonly: bool, - - /// Represents the last time (in milliseconds) when the file/directory/symlink was accessed; - /// can be optional as certain systems don't support this - #[serde(serialize_with = "serialize_u128_option")] - #[serde(deserialize_with = "deserialize_u128_option")] - pub accessed: Option, - - /// Represents when (in milliseconds) the file/directory/symlink was created; - /// can be optional as certain systems don't support this - #[serde(serialize_with = "serialize_u128_option")] - #[serde(deserialize_with = "deserialize_u128_option")] - pub created: Option, - - /// Represents the last time (in milliseconds) when the file/directory/symlink was modified; - /// can be optional as certain systems don't support this - #[serde(serialize_with = "serialize_u128_option")] - #[serde(deserialize_with = "deserialize_u128_option")] - pub modified: Option, - - /// Represents metadata that is specific to a unix remote machine - pub unix: Option, - - /// Represents metadata that is specific to a windows remote machine - pub windows: Option, -} - -impl Metadata { - pub async fn read( - path: impl AsRef, - canonicalize: bool, - resolve_file_type: bool, - ) -> io::Result { - let metadata = tokio::fs::symlink_metadata(path.as_ref()).await?; - let canonicalized_path = if canonicalize { - Some(tokio::fs::canonicalize(path.as_ref()).await?) - } else { - None - }; - - // If asking for resolved file type and current type is symlink, then we want to refresh - // our metadata to get the filetype for the resolved link - let file_type = if resolve_file_type && metadata.file_type().is_symlink() { - tokio::fs::metadata(path).await?.file_type() - } else { - metadata.file_type() - }; - - Ok(Self { - canonicalized_path, - accessed: metadata - .accessed() - .ok() - .and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok()) - .map(|d| d.as_millis()), - created: metadata - .created() - .ok() - .and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok()) - .map(|d| d.as_millis()), - modified: metadata - .modified() - .ok() - .and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok()) - .map(|d| d.as_millis()), - len: metadata.len(), - readonly: metadata.permissions().readonly(), - file_type: if file_type.is_dir() { - FileType::Dir - } else if file_type.is_file() { - FileType::File - } else { - FileType::Symlink - }, - - #[cfg(unix)] - unix: Some({ - use std::os::unix::prelude::*; - let mode = metadata.mode(); - crate::protocol::UnixMetadata::from(mode) - }), - #[cfg(not(unix))] - unix: None, - - #[cfg(windows)] - windows: Some({ - use std::os::windows::prelude::*; - let attributes = metadata.file_attributes(); - crate::protocol::WindowsMetadata::from(attributes) - }), - #[cfg(not(windows))] - windows: None, - }) - } -} - -#[cfg(feature = "schemars")] -impl Metadata { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Metadata) - } -} - -/// Represents unix-specific metadata about some path on a remote machine -#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct UnixMetadata { - /// Represents whether or not owner can read from the file - pub owner_read: bool, - - /// Represents whether or not owner can write to the file - pub owner_write: bool, - - /// Represents whether or not owner can execute the file - pub owner_exec: bool, - - /// Represents whether or not associated group can read from the file - pub group_read: bool, - - /// Represents whether or not associated group can write to the file - pub group_write: bool, - - /// Represents whether or not associated group can execute the file - pub group_exec: bool, - - /// Represents whether or not other can read from the file - pub other_read: bool, - - /// Represents whether or not other can write to the file - pub other_write: bool, - - /// Represents whether or not other can execute the file - pub other_exec: bool, -} - -#[cfg(feature = "schemars")] -impl UnixMetadata { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(UnixMetadata) - } -} - -impl From for UnixMetadata { - /// Create from a unix mode bitset - fn from(mode: u32) -> Self { - let flags = UnixFilePermissionFlags::from_bits_truncate(mode); - Self { - owner_read: flags.contains(UnixFilePermissionFlags::OWNER_READ), - owner_write: flags.contains(UnixFilePermissionFlags::OWNER_WRITE), - owner_exec: flags.contains(UnixFilePermissionFlags::OWNER_EXEC), - group_read: flags.contains(UnixFilePermissionFlags::GROUP_READ), - group_write: flags.contains(UnixFilePermissionFlags::GROUP_WRITE), - group_exec: flags.contains(UnixFilePermissionFlags::GROUP_EXEC), - other_read: flags.contains(UnixFilePermissionFlags::OTHER_READ), - other_write: flags.contains(UnixFilePermissionFlags::OTHER_WRITE), - other_exec: flags.contains(UnixFilePermissionFlags::OTHER_EXEC), - } - } -} - -impl From for u32 { - /// Convert to a unix mode bitset - fn from(metadata: UnixMetadata) -> Self { - let mut flags = UnixFilePermissionFlags::empty(); - - if metadata.owner_read { - flags.insert(UnixFilePermissionFlags::OWNER_READ); - } - if metadata.owner_write { - flags.insert(UnixFilePermissionFlags::OWNER_WRITE); - } - if metadata.owner_exec { - flags.insert(UnixFilePermissionFlags::OWNER_EXEC); - } - - if metadata.group_read { - flags.insert(UnixFilePermissionFlags::GROUP_READ); - } - if metadata.group_write { - flags.insert(UnixFilePermissionFlags::GROUP_WRITE); - } - if metadata.group_exec { - flags.insert(UnixFilePermissionFlags::GROUP_EXEC); - } - - if metadata.other_read { - flags.insert(UnixFilePermissionFlags::OTHER_READ); - } - if metadata.other_write { - flags.insert(UnixFilePermissionFlags::OTHER_WRITE); - } - if metadata.other_exec { - flags.insert(UnixFilePermissionFlags::OTHER_EXEC); - } - - flags.bits() - } -} - -impl UnixMetadata { - pub fn is_readonly(self) -> bool { - !(self.owner_read || self.group_read || self.other_read) - } -} - -bitflags! { - struct UnixFilePermissionFlags: u32 { - const OWNER_READ = 0o400; - const OWNER_WRITE = 0o200; - const OWNER_EXEC = 0o100; - const GROUP_READ = 0o40; - const GROUP_WRITE = 0o20; - const GROUP_EXEC = 0o10; - const OTHER_READ = 0o4; - const OTHER_WRITE = 0o2; - const OTHER_EXEC = 0o1; - } -} - -/// Represents windows-specific metadata about some path on a remote machine -#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct WindowsMetadata { - /// Represents whether or not a file or directory is an archive - pub archive: bool, - - /// Represents whether or not a file or directory is compressed - pub compressed: bool, - - /// Represents whether or not the file or directory is encrypted - pub encrypted: bool, - - /// Represents whether or not a file or directory is hidden - pub hidden: bool, - - /// Represents whether or not a directory or user data stream is configured with integrity - pub integrity_stream: bool, - - /// Represents whether or not a file does not have other attributes set - pub normal: bool, - - /// Represents whether or not a file or directory is not to be indexed by content indexing - /// service - pub not_content_indexed: bool, - - /// Represents whether or not a user data stream is not to be read by the background data - /// integrity scanner - pub no_scrub_data: bool, - - /// Represents whether or not the data of a file is not available immediately - pub offline: bool, - - /// Represents whether or not a file or directory is not fully present locally - pub recall_on_data_access: bool, - - /// Represents whether or not a file or directory has no physical representation on the local - /// system (is virtual) - pub recall_on_open: bool, - - /// Represents whether or not a file or directory has an associated reparse point, or a file is - /// a symbolic link - pub reparse_point: bool, - - /// Represents whether or not a file is a sparse file - pub sparse_file: bool, - - /// Represents whether or not a file or directory is used partially or exclusively by the - /// operating system - pub system: bool, - - /// Represents whether or not a file is being used for temporary storage - pub temporary: bool, -} - -#[cfg(feature = "schemars")] -impl WindowsMetadata { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(WindowsMetadata) - } -} - -impl From for WindowsMetadata { - /// Create from a windows file attribute bitset - fn from(file_attributes: u32) -> Self { - let flags = WindowsFileAttributeFlags::from_bits_truncate(file_attributes); - Self { - archive: flags.contains(WindowsFileAttributeFlags::ARCHIVE), - compressed: flags.contains(WindowsFileAttributeFlags::COMPRESSED), - encrypted: flags.contains(WindowsFileAttributeFlags::ENCRYPTED), - hidden: flags.contains(WindowsFileAttributeFlags::HIDDEN), - integrity_stream: flags.contains(WindowsFileAttributeFlags::INTEGRITY_SYSTEM), - normal: flags.contains(WindowsFileAttributeFlags::NORMAL), - not_content_indexed: flags.contains(WindowsFileAttributeFlags::NOT_CONTENT_INDEXED), - no_scrub_data: flags.contains(WindowsFileAttributeFlags::NO_SCRUB_DATA), - offline: flags.contains(WindowsFileAttributeFlags::OFFLINE), - recall_on_data_access: flags.contains(WindowsFileAttributeFlags::RECALL_ON_DATA_ACCESS), - recall_on_open: flags.contains(WindowsFileAttributeFlags::RECALL_ON_OPEN), - reparse_point: flags.contains(WindowsFileAttributeFlags::REPARSE_POINT), - sparse_file: flags.contains(WindowsFileAttributeFlags::SPARSE_FILE), - system: flags.contains(WindowsFileAttributeFlags::SYSTEM), - temporary: flags.contains(WindowsFileAttributeFlags::TEMPORARY), - } - } -} - -impl From for u32 { - /// Convert to a windows file attribute bitset - fn from(metadata: WindowsMetadata) -> Self { - let mut flags = WindowsFileAttributeFlags::empty(); - - if metadata.archive { - flags.insert(WindowsFileAttributeFlags::ARCHIVE); - } - if metadata.compressed { - flags.insert(WindowsFileAttributeFlags::COMPRESSED); - } - if metadata.encrypted { - flags.insert(WindowsFileAttributeFlags::ENCRYPTED); - } - if metadata.hidden { - flags.insert(WindowsFileAttributeFlags::HIDDEN); - } - if metadata.integrity_stream { - flags.insert(WindowsFileAttributeFlags::INTEGRITY_SYSTEM); - } - if metadata.normal { - flags.insert(WindowsFileAttributeFlags::NORMAL); - } - if metadata.not_content_indexed { - flags.insert(WindowsFileAttributeFlags::NOT_CONTENT_INDEXED); - } - if metadata.no_scrub_data { - flags.insert(WindowsFileAttributeFlags::NO_SCRUB_DATA); - } - if metadata.offline { - flags.insert(WindowsFileAttributeFlags::OFFLINE); - } - if metadata.recall_on_data_access { - flags.insert(WindowsFileAttributeFlags::RECALL_ON_DATA_ACCESS); - } - if metadata.recall_on_open { - flags.insert(WindowsFileAttributeFlags::RECALL_ON_OPEN); - } - if metadata.reparse_point { - flags.insert(WindowsFileAttributeFlags::REPARSE_POINT); - } - if metadata.sparse_file { - flags.insert(WindowsFileAttributeFlags::SPARSE_FILE); - } - if metadata.system { - flags.insert(WindowsFileAttributeFlags::SYSTEM); - } - if metadata.temporary { - flags.insert(WindowsFileAttributeFlags::TEMPORARY); - } - - flags.bits() - } -} - -bitflags! { - struct WindowsFileAttributeFlags: u32 { - const ARCHIVE = 0x20; - const COMPRESSED = 0x800; - const ENCRYPTED = 0x4000; - const HIDDEN = 0x2; - const INTEGRITY_SYSTEM = 0x8000; - const NORMAL = 0x80; - const NOT_CONTENT_INDEXED = 0x2000; - const NO_SCRUB_DATA = 0x20000; - const OFFLINE = 0x1000; - const RECALL_ON_DATA_ACCESS = 0x400000; - const RECALL_ON_OPEN = 0x40000; - const REPARSE_POINT = 0x400; - const SPARSE_FILE = 0x200; - const SYSTEM = 0x4; - const TEMPORARY = 0x100; - const VIRTUAL = 0x10000; - } -} diff --git a/distant-core/src/protocol/permissions.rs b/distant-core/src/protocol/permissions.rs deleted file mode 100644 index 4968e1a..0000000 --- a/distant-core/src/protocol/permissions.rs +++ /dev/null @@ -1,294 +0,0 @@ -use bitflags::bitflags; -use serde::{Deserialize, Serialize}; - -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(default, deny_unknown_fields, rename_all = "snake_case")] -pub struct SetPermissionsOptions { - /// Whether or not to exclude symlinks from traversal entirely, meaning that permissions will - /// not be set on symlinks (usually resolving the symlink and setting the permission of the - /// referenced file or directory) that are explicitly provided or show up during recursion. - pub exclude_symlinks: bool, - - /// Whether or not to traverse symlinks when recursively setting permissions. Note that this - /// does NOT influence setting permissions when encountering a symlink as most platforms will - /// resolve the symlink before setting permissions. - pub follow_symlinks: bool, - - /// Whether or not to set the permissions of the file hierarchies rooted in the paths, instead - /// of just the paths themselves. - pub recursive: bool, -} - -#[cfg(feature = "schemars")] -impl SetPermissionsOptions { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SetPermissionsOptions) - } -} - -/// Represents permissions to apply to some path on a remote machine -/// -/// When used to set permissions on a file, directory, or symlink, -/// only fields that are set (not `None`) will be applied. -/// -/// On `Unix` platforms, this translates directly into the mode that -/// you would find with `chmod`. On all other platforms, this uses the -/// write flags to determine whether or not to set the readonly status. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct Permissions { - /// Represents whether or not owner can read from the file - pub owner_read: Option, - - /// Represents whether or not owner can write to the file - pub owner_write: Option, - - /// Represents whether or not owner can execute the file - pub owner_exec: Option, - - /// Represents whether or not associated group can read from the file - pub group_read: Option, - - /// Represents whether or not associated group can write to the file - pub group_write: Option, - - /// Represents whether or not associated group can execute the file - pub group_exec: Option, - - /// Represents whether or not other can read from the file - pub other_read: Option, - - /// Represents whether or not other can write to the file - pub other_write: Option, - - /// Represents whether or not other can execute the file - pub other_exec: Option, -} - -impl Permissions { - /// Creates a set of [`Permissions`] that indicate readonly status. - /// - /// ``` - /// use distant_core::protocol::Permissions; - /// - /// let permissions = Permissions::readonly(); - /// assert_eq!(permissions.is_readonly(), Some(true)); - /// assert_eq!(permissions.is_writable(), Some(false)); - /// ``` - pub fn readonly() -> Self { - Self { - owner_write: Some(false), - group_write: Some(false), - other_write: Some(false), - - owner_read: Some(true), - group_read: Some(true), - other_read: Some(true), - - owner_exec: None, - group_exec: None, - other_exec: None, - } - } - /// Creates a set of [`Permissions`] that indicate globally writable status. - /// - /// ``` - /// use distant_core::protocol::Permissions; - /// - /// let permissions = Permissions::writable(); - /// assert_eq!(permissions.is_readonly(), Some(false)); - /// assert_eq!(permissions.is_writable(), Some(true)); - /// ``` - pub fn writable() -> Self { - Self { - owner_write: Some(true), - group_write: Some(true), - other_write: Some(true), - - owner_read: Some(true), - group_read: Some(true), - other_read: Some(true), - - owner_exec: None, - group_exec: None, - other_exec: None, - } - } - - /// Returns true if the permission set has a value specified for each permission (no `None` - /// settings). - pub fn is_complete(&self) -> bool { - self.owner_read.is_some() - && self.owner_write.is_some() - && self.owner_exec.is_some() - && self.group_read.is_some() - && self.group_write.is_some() - && self.group_exec.is_some() - && self.other_read.is_some() - && self.other_write.is_some() - && self.other_exec.is_some() - } - - /// Returns `true` if permissions represent readonly, `false` if permissions represent - /// writable, and `None` if no permissions have been set to indicate either status. - #[inline] - pub fn is_readonly(&self) -> Option { - // Negate the writable status to indicate whether or not readonly - self.is_writable().map(|x| !x) - } - - /// Returns `true` if permissions represent ability to write, `false` if permissions represent - /// inability to write, and `None` if no permissions have been set to indicate either status. - #[inline] - pub fn is_writable(&self) -> Option { - self.owner_write - .zip(self.group_write) - .zip(self.other_write) - .map(|((owner, group), other)| owner || group || other) - } - - /// Applies `other` settings to `self`, overwriting any of the permissions in `self` with `other`. - #[inline] - pub fn apply_from(&mut self, other: &Self) { - macro_rules! apply { - ($key:ident) => {{ - if let Some(value) = other.$key { - self.$key = Some(value); - } - }}; - } - - apply!(owner_read); - apply!(owner_write); - apply!(owner_exec); - apply!(group_read); - apply!(group_write); - apply!(group_exec); - apply!(other_read); - apply!(other_write); - apply!(other_exec); - } - - /// Applies `self` settings to `other`, overwriting any of the permissions in `other` with - /// `self`. - #[inline] - pub fn apply_to(&self, other: &mut Self) { - Self::apply_from(other, self) - } - - /// Converts a Unix `mode` into the permission set. - pub fn from_unix_mode(mode: u32) -> Self { - let flags = UnixFilePermissionFlags::from_bits_truncate(mode); - Self { - owner_read: Some(flags.contains(UnixFilePermissionFlags::OWNER_READ)), - owner_write: Some(flags.contains(UnixFilePermissionFlags::OWNER_WRITE)), - owner_exec: Some(flags.contains(UnixFilePermissionFlags::OWNER_EXEC)), - group_read: Some(flags.contains(UnixFilePermissionFlags::GROUP_READ)), - group_write: Some(flags.contains(UnixFilePermissionFlags::GROUP_WRITE)), - group_exec: Some(flags.contains(UnixFilePermissionFlags::GROUP_EXEC)), - other_read: Some(flags.contains(UnixFilePermissionFlags::OTHER_READ)), - other_write: Some(flags.contains(UnixFilePermissionFlags::OTHER_WRITE)), - other_exec: Some(flags.contains(UnixFilePermissionFlags::OTHER_EXEC)), - } - } - - /// Converts to a Unix `mode` from a permission set. For any missing setting, a 0 bit is used. - pub fn to_unix_mode(&self) -> u32 { - let mut flags = UnixFilePermissionFlags::empty(); - - macro_rules! is_true { - ($opt:expr) => {{ - $opt.is_some() && $opt.unwrap() - }}; - } - - if is_true!(self.owner_read) { - flags.insert(UnixFilePermissionFlags::OWNER_READ); - } - if is_true!(self.owner_write) { - flags.insert(UnixFilePermissionFlags::OWNER_WRITE); - } - if is_true!(self.owner_exec) { - flags.insert(UnixFilePermissionFlags::OWNER_EXEC); - } - - if is_true!(self.group_read) { - flags.insert(UnixFilePermissionFlags::GROUP_READ); - } - if is_true!(self.group_write) { - flags.insert(UnixFilePermissionFlags::GROUP_WRITE); - } - if is_true!(self.group_exec) { - flags.insert(UnixFilePermissionFlags::GROUP_EXEC); - } - - if is_true!(self.other_read) { - flags.insert(UnixFilePermissionFlags::OTHER_READ); - } - if is_true!(self.other_write) { - flags.insert(UnixFilePermissionFlags::OTHER_WRITE); - } - if is_true!(self.other_exec) { - flags.insert(UnixFilePermissionFlags::OTHER_EXEC); - } - - flags.bits() - } -} - -#[cfg(feature = "schemars")] -impl Permissions { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Permissions) - } -} - -#[cfg(unix)] -impl From for Permissions { - /// Converts [`std::fs::Permissions`] into [`Permissions`] using - /// [`std::os::unix::fs::PermissionsExt::mode`] to supply the bitset. - fn from(permissions: std::fs::Permissions) -> Self { - use std::os::unix::prelude::*; - Self::from_unix_mode(permissions.mode()) - } -} - -#[cfg(not(unix))] -impl From for Permissions { - /// Converts [`std::fs::Permissions`] into [`Permissions`] using the `readonly` flag. - /// - /// This will not set executable flags, but will set all read and write flags with write flags - /// being `false` if `readonly`, otherwise set to `true`. - fn from(permissions: std::fs::Permissions) -> Self { - if permissions.readonly() { - Self::readonly() - } else { - Self::writable() - } - } -} - -#[cfg(unix)] -impl From for std::fs::Permissions { - /// Converts [`Permissions`] into [`std::fs::Permissions`] using - /// [`std::os::unix::fs::PermissionsExt::from_mode`]. - fn from(permissions: Permissions) -> Self { - use std::os::unix::prelude::*; - std::fs::Permissions::from_mode(permissions.to_unix_mode()) - } -} - -bitflags! { - struct UnixFilePermissionFlags: u32 { - const OWNER_READ = 0o400; - const OWNER_WRITE = 0o200; - const OWNER_EXEC = 0o100; - const GROUP_READ = 0o40; - const GROUP_WRITE = 0o20; - const GROUP_EXEC = 0o10; - const OTHER_READ = 0o4; - const OTHER_WRITE = 0o2; - const OTHER_EXEC = 0o1; - } -} diff --git a/distant-core/src/protocol/pty.rs b/distant-core/src/protocol/pty.rs deleted file mode 100644 index 6d6a054..0000000 --- a/distant-core/src/protocol/pty.rs +++ /dev/null @@ -1,140 +0,0 @@ -use std::fmt; -use std::num::ParseIntError; -use std::str::FromStr; - -use derive_more::{Display, Error}; -use portable_pty::PtySize as PortablePtySize; -use serde::{Deserialize, Serialize}; - -/// Represents the size associated with a remote PTY -#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct PtySize { - /// Number of lines of text - pub rows: u16, - - /// Number of columns of text - pub cols: u16, - - /// Width of a cell in pixels. Note that some systems never fill this value and ignore it. - #[serde(default)] - pub pixel_width: u16, - - /// Height of a cell in pixels. Note that some systems never fill this value and ignore it. - #[serde(default)] - pub pixel_height: u16, -} - -impl PtySize { - /// Creates new size using just rows and columns - pub fn from_rows_and_cols(rows: u16, cols: u16) -> Self { - Self { - rows, - cols, - ..Default::default() - } - } -} - -#[cfg(feature = "schemars")] -impl PtySize { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(PtySize) - } -} - -impl From for PtySize { - fn from(size: PortablePtySize) -> Self { - Self { - rows: size.rows, - cols: size.cols, - pixel_width: size.pixel_width, - pixel_height: size.pixel_height, - } - } -} - -impl From for PortablePtySize { - fn from(size: PtySize) -> Self { - Self { - rows: size.rows, - cols: size.cols, - pixel_width: size.pixel_width, - pixel_height: size.pixel_height, - } - } -} - -impl fmt::Display for PtySize { - /// Prints out `rows,cols[,pixel_width,pixel_height]` where the - /// pixel width and pixel height are only included if either - /// one of them is not zero - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{},{}", self.rows, self.cols)?; - if self.pixel_width > 0 || self.pixel_height > 0 { - write!(f, ",{},{}", self.pixel_width, self.pixel_height)?; - } - - Ok(()) - } -} - -impl Default for PtySize { - fn default() -> Self { - PtySize { - rows: 24, - cols: 80, - pixel_width: 0, - pixel_height: 0, - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Display, Error)] -pub enum PtySizeParseError { - MissingRows, - MissingColumns, - InvalidRows(ParseIntError), - InvalidColumns(ParseIntError), - InvalidPixelWidth(ParseIntError), - InvalidPixelHeight(ParseIntError), -} - -impl FromStr for PtySize { - type Err = PtySizeParseError; - - /// Attempts to parse a str into PtySize using one of the following formats: - /// - /// * rows,cols (defaults to 0 for pixel_width & pixel_height) - /// * rows,cols,pixel_width,pixel_height - fn from_str(s: &str) -> Result { - let mut tokens = s.split(','); - - Ok(Self { - rows: tokens - .next() - .ok_or(PtySizeParseError::MissingRows)? - .trim() - .parse() - .map_err(PtySizeParseError::InvalidRows)?, - cols: tokens - .next() - .ok_or(PtySizeParseError::MissingColumns)? - .trim() - .parse() - .map_err(PtySizeParseError::InvalidColumns)?, - pixel_width: tokens - .next() - .map(|s| s.trim().parse()) - .transpose() - .map_err(PtySizeParseError::InvalidPixelWidth)? - .unwrap_or(0), - pixel_height: tokens - .next() - .map(|s| s.trim().parse()) - .transpose() - .map_err(PtySizeParseError::InvalidPixelHeight)? - .unwrap_or(0), - }) - } -} diff --git a/distant-core/src/protocol/search.rs b/distant-core/src/protocol/search.rs deleted file mode 100644 index e1060b5..0000000 --- a/distant-core/src/protocol/search.rs +++ /dev/null @@ -1,425 +0,0 @@ -use std::borrow::Cow; -use std::collections::HashSet; -use std::path::PathBuf; -use std::str::FromStr; - -use serde::{Deserialize, Serialize}; - -use super::FileType; - -/// Id associated with a search -pub type SearchId = u32; - -/// Represents a query to perform against the filesystem -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct SearchQuery { - /// Kind of data to examine using condition - pub target: SearchQueryTarget, - - /// Condition to meet to be considered a match - pub condition: SearchQueryCondition, - - /// Paths in which to perform the query - pub paths: Vec, - - /// Options to apply to the query - #[serde(default)] - pub options: SearchQueryOptions, -} - -#[cfg(feature = "schemars")] -impl SearchQuery { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SearchQuery) - } -} - -impl FromStr for SearchQuery { - type Err = serde_json::error::Error; - - /// Parses search query from a JSON string - fn from_str(s: &str) -> Result { - serde_json::from_str(s) - } -} - -/// Kind of data to examine using conditions -#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(rename_all = "snake_case")] -pub enum SearchQueryTarget { - /// Checks path of file, directory, or symlink - Path, - - /// Checks contents of files - Contents, -} - -#[cfg(feature = "schemars")] -impl SearchQueryTarget { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SearchQueryTarget) - } -} - -/// Condition used to find a match in a search query -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")] -pub enum SearchQueryCondition { - /// Text is found anywhere (all regex patterns are escaped) - Contains { value: String }, - - /// Begins with some text (all regex patterns are escaped) - EndsWith { value: String }, - - /// Matches some text exactly (all regex patterns are escaped) - Equals { value: String }, - - /// Any of the conditions match - Or { value: Vec }, - - /// Matches some regex - Regex { value: String }, - - /// Begins with some text (all regex patterns are escaped) - StartsWith { value: String }, -} - -impl SearchQueryCondition { - /// Creates a new instance with `Contains` variant - pub fn contains(value: impl Into) -> Self { - Self::Contains { - value: value.into(), - } - } - - /// Creates a new instance with `EndsWith` variant - pub fn ends_with(value: impl Into) -> Self { - Self::EndsWith { - value: value.into(), - } - } - - /// Creates a new instance with `Equals` variant - pub fn equals(value: impl Into) -> Self { - Self::Equals { - value: value.into(), - } - } - - /// Creates a new instance with `Or` variant - pub fn or(value: I) -> Self - where - I: IntoIterator, - C: Into, - { - Self::Or { - value: value.into_iter().map(|s| s.into()).collect(), - } - } - - /// Creates a new instance with `Regex` variant - pub fn regex(value: impl Into) -> Self { - Self::Regex { - value: value.into(), - } - } - - /// Creates a new instance with `StartsWith` variant - pub fn starts_with(value: impl Into) -> Self { - Self::StartsWith { - value: value.into(), - } - } - - /// Converts the condition in a regex string - pub fn to_regex_string(&self) -> String { - match self { - Self::Contains { value } => regex::escape(value), - Self::EndsWith { value } => format!(r"{}$", regex::escape(value)), - Self::Equals { value } => format!(r"^{}$", regex::escape(value)), - Self::Regex { value } => value.to_string(), - Self::StartsWith { value } => format!(r"^{}", regex::escape(value)), - Self::Or { value } => { - let mut s = String::new(); - for (i, condition) in value.iter().enumerate() { - if i > 0 { - s.push('|'); - } - s.push_str(&condition.to_regex_string()); - } - s - } - } - } -} - -#[cfg(feature = "schemars")] -impl SearchQueryCondition { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SearchQueryCondition) - } -} - -impl FromStr for SearchQueryCondition { - type Err = std::convert::Infallible; - - /// Parses search query from a JSON string - fn from_str(s: &str) -> Result { - Ok(Self::regex(s)) - } -} - -/// Options associated with a search query -#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(default)] -pub struct SearchQueryOptions { - /// Restrict search to only these file types (otherwise all are allowed). - pub allowed_file_types: HashSet, - - /// Regex to use to filter paths being searched to only those that match the include condition. - pub include: Option, - - /// Regex to use to filter paths being searched to only those that do not match the exclude. - /// condition - pub exclude: Option, - - /// If true, will search upward through parent directories rather than the traditional downward - /// search that recurses through all children directories. - /// - /// Note that this will use maximum depth to apply to the reverse direction, and will only look - /// through each ancestor directory's immediate entries. In other words, this will not result - /// in recursing through sibling directories. - /// - /// An upward search will ALWAYS search the contents of a directory, so this means providing a - /// path to a directory will search its entries EVEN if the max_depth is 0. - pub upward: bool, - - /// Search should follow symbolic links. - pub follow_symbolic_links: bool, - - /// Maximum results to return before stopping the query. - pub limit: Option, - - /// Maximum depth (directories) to search - /// - /// The smallest depth is 0 and always corresponds to the path given to the new function on - /// this type. Its direct descendents have depth 1, and their descendents have depth 2, and so - /// on. - /// - /// Note that this will not simply filter the entries of the iterator, but it will actually - /// avoid descending into directories when the depth is exceeded. - pub max_depth: Option, - - /// Amount of results to batch before sending back excluding final submission that will always - /// include the remaining results even if less than pagination request. - pub pagination: Option, -} - -#[cfg(feature = "schemars")] -impl SearchQueryOptions { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SearchQueryOptions) - } -} - -/// Represents a match for a search query -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")] -pub enum SearchQueryMatch { - /// Matches part of a file's path - Path(SearchQueryPathMatch), - - /// Matches part of a file's contents - Contents(SearchQueryContentsMatch), -} - -impl SearchQueryMatch { - pub fn into_path_match(self) -> Option { - match self { - Self::Path(x) => Some(x), - _ => None, - } - } - - pub fn into_contents_match(self) -> Option { - match self { - Self::Contents(x) => Some(x), - _ => None, - } - } -} - -#[cfg(feature = "schemars")] -impl SearchQueryMatch { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SearchQueryMatch) - } -} - -/// Represents details for a match on a path -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct SearchQueryPathMatch { - /// Path associated with the match - pub path: PathBuf, - - /// Collection of matches tied to `path` where each submatch's byte offset is relative to - /// `path` - pub submatches: Vec, -} - -#[cfg(feature = "schemars")] -impl SearchQueryPathMatch { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SearchQueryPathMatch) - } -} - -/// Represents details for a match on a file's contents -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct SearchQueryContentsMatch { - /// Path to file whose contents match - pub path: PathBuf, - - /// Line(s) that matched - pub lines: SearchQueryMatchData, - - /// Line number where match starts (base index 1) - pub line_number: u64, - - /// Absolute byte offset corresponding to the start of `lines` in the data being searched - pub absolute_offset: u64, - - /// Collection of matches tied to `lines` where each submatch's byte offset is relative to - /// `lines` and not the overall content - pub submatches: Vec, -} - -#[cfg(feature = "schemars")] -impl SearchQueryContentsMatch { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SearchQueryContentsMatch) - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct SearchQuerySubmatch { - /// Content matched by query - pub r#match: SearchQueryMatchData, - - /// Byte offset representing start of submatch (inclusive) - pub start: u64, - - /// Byte offset representing end of submatch (exclusive) - pub end: u64, -} - -#[cfg(feature = "schemars")] -impl SearchQuerySubmatch { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SearchQuerySubmatch) - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -#[serde( - rename_all = "snake_case", - deny_unknown_fields, - tag = "type", - content = "value" -)] -pub enum SearchQueryMatchData { - /// Match represented as UTF-8 text - Text(String), - - /// Match represented as bytes - Bytes(Vec), -} - -impl SearchQueryMatchData { - /// Creates a new instance with `Text` variant - pub fn text(value: impl Into) -> Self { - Self::Text(value.into()) - } - - /// Creates a new instance with `Bytes` variant - pub fn bytes(value: impl Into>) -> Self { - Self::Bytes(value.into()) - } - - /// Returns the UTF-8 str reference to the data, if is valid UTF-8 - pub fn to_str(&self) -> Option<&str> { - match self { - Self::Text(x) => Some(x), - Self::Bytes(x) => std::str::from_utf8(x).ok(), - } - } - - /// Converts data to a UTF-8 string, replacing any invalid UTF-8 sequences with - /// [`U+FFFD REPLACEMENT CHARACTER`](https://doc.rust-lang.org/nightly/core/char/const.REPLACEMENT_CHARACTER.html) - pub fn to_string_lossy(&self) -> Cow<'_, str> { - match self { - Self::Text(x) => Cow::Borrowed(x), - Self::Bytes(x) => String::from_utf8_lossy(x), - } - } -} - -#[cfg(feature = "schemars")] -impl SearchQueryMatchData { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SearchQueryMatchData) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - mod search_query_condition { - use test_log::test; - - use super::*; - - #[test] - fn to_regex_string_should_convert_to_appropriate_regex_and_escape_as_needed() { - assert_eq!( - SearchQueryCondition::contains("t^es$t").to_regex_string(), - r"t\^es\$t" - ); - assert_eq!( - SearchQueryCondition::ends_with("t^es$t").to_regex_string(), - r"t\^es\$t$" - ); - assert_eq!( - SearchQueryCondition::equals("t^es$t").to_regex_string(), - r"^t\^es\$t$" - ); - assert_eq!( - SearchQueryCondition::or([ - SearchQueryCondition::contains("t^es$t"), - SearchQueryCondition::equals("t^es$t"), - SearchQueryCondition::regex("^test$"), - ]) - .to_regex_string(), - r"t\^es\$t|^t\^es\$t$|^test$" - ); - assert_eq!( - SearchQueryCondition::regex("test").to_regex_string(), - "test" - ); - assert_eq!( - SearchQueryCondition::starts_with("t^es$t").to_regex_string(), - r"^t\^es\$t" - ); - } - } -} diff --git a/distant-core/src/protocol/system.rs b/distant-core/src/protocol/system.rs deleted file mode 100644 index 48ae816..0000000 --- a/distant-core/src/protocol/system.rs +++ /dev/null @@ -1,59 +0,0 @@ -use std::env; -use std::path::PathBuf; - -use serde::{Deserialize, Serialize}; - -/// Represents information about a system -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct SystemInfo { - /// Family of the operating system as described in - /// https://doc.rust-lang.org/std/env/consts/constant.FAMILY.html - pub family: String, - - /// Name of the specific operating system as described in - /// https://doc.rust-lang.org/std/env/consts/constant.OS.html - pub os: String, - - /// Architecture of the CPI as described in - /// https://doc.rust-lang.org/std/env/consts/constant.ARCH.html - pub arch: String, - - /// Current working directory of the running server process - pub current_dir: PathBuf, - - /// Primary separator for path components for the current platform - /// as defined in https://doc.rust-lang.org/std/path/constant.MAIN_SEPARATOR.html - pub main_separator: char, - - /// Name of the user running the server process - pub username: String, - - /// Default shell tied to user running the server process - pub shell: String, -} - -#[cfg(feature = "schemars")] -impl SystemInfo { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(SystemInfo) - } -} - -impl Default for SystemInfo { - fn default() -> Self { - Self { - family: env::consts::FAMILY.to_string(), - os: env::consts::OS.to_string(), - arch: env::consts::ARCH.to_string(), - current_dir: env::current_dir().unwrap_or_default(), - main_separator: std::path::MAIN_SEPARATOR, - username: whoami::username(), - shell: if cfg!(windows) { - env::var("ComSpec").unwrap_or_else(|_| String::from("cmd.exe")) - } else { - env::var("SHELL").unwrap_or_else(|_| String::from("/bin/sh")) - }, - } - } -} diff --git a/distant-core/tests/stress/distant/watch.rs b/distant-core/tests/stress/distant/watch.rs index ea4e442..deb1291 100644 --- a/distant-core/tests/stress/distant/watch.rs +++ b/distant-core/tests/stress/distant/watch.rs @@ -1,5 +1,5 @@ use assert_fs::prelude::*; -use distant_core::protocol::ChangeKindSet; +use distant_core::protocol::{ChangeKind, ChangeKindSet}; use distant_core::DistantChannelExt; use rstest::*; use test_log::test; @@ -29,7 +29,7 @@ async fn should_handle_large_volume_of_file_watching(#[future] ctx: DistantClien .watch( file.path(), false, - ChangeKindSet::modify_set(), + ChangeKindSet::new([ChangeKind::Modify]), ChangeKindSet::empty(), ) .await diff --git a/distant-net/Cargo.toml b/distant-net/Cargo.toml index ae19871..39452d2 100644 --- a/distant-net/Cargo.toml +++ b/distant-net/Cargo.toml @@ -32,9 +32,6 @@ serde_bytes = "0.11.9" strum = { version = "0.24.1", features = ["derive"] } tokio = { version = "1.27.0", features = ["full"] } -# Optional dependencies based on features -schemars = { version = "0.8.12", optional = true } - [dev-dependencies] distant-auth = { version = "=0.20.0-alpha.7", path = "../distant-auth", features = ["tests"] } env_logger = "0.10.0" diff --git a/distant-net/README.md b/distant-net/README.md index 682d897..06f097f 100644 --- a/distant-net/README.md +++ b/distant-net/README.md @@ -25,18 +25,9 @@ You can import the dependency by adding the following to your `Cargo.toml`: ```toml [dependencies] -distant-net = "0.19" +distant-net = "0.20" ``` -## Features - -Currently, the library supports the following features: - -- `schemars`: derives the `schemars::JsonSchema` interface on `Request` - and `Response` data types - -By default, no features are enabled on the library. - ## License This project is licensed under either of diff --git a/distant-net/src/common/map.rs b/distant-net/src/common/map.rs index 68b9b72..2affac8 100644 --- a/distant-net/src/common/map.rs +++ b/distant-net/src/common/map.rs @@ -13,7 +13,6 @@ use crate::common::utils::{deserialize_from_str, serialize_to_str}; /// Contains map information for connections and other use cases #[derive(Clone, Debug, From, IntoIterator, PartialEq, Eq)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] pub struct Map(HashMap); impl Map { @@ -77,13 +76,6 @@ impl Map { } } -#[cfg(feature = "schemars")] -impl Map { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Map) - } -} - impl Default for Map { fn default() -> Self { Self::new() diff --git a/distant-net/src/common/packet/request.rs b/distant-net/src/common/packet/request.rs index 9d2f3e9..84c8b81 100644 --- a/distant-net/src/common/packet/request.rs +++ b/distant-net/src/common/packet/request.rs @@ -10,7 +10,6 @@ use crate::common::utils; /// Represents a request to send #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] pub struct Request { /// Unique id associated with the request pub id: Id, @@ -62,13 +61,6 @@ where } } -#[cfg(feature = "schemars")] -impl Request { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Request) - } -} - impl From for Request { fn from(payload: T) -> Self { Self::new(payload) diff --git a/distant-net/src/common/packet/response.rs b/distant-net/src/common/packet/response.rs index e5d38d8..cc96e96 100644 --- a/distant-net/src/common/packet/response.rs +++ b/distant-net/src/common/packet/response.rs @@ -10,7 +10,6 @@ use crate::common::utils; /// Represents a response received related to some response #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] pub struct Response { /// Unique id associated with the response pub id: Id, @@ -67,13 +66,6 @@ where } } -#[cfg(feature = "schemars")] -impl Response { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Response) - } -} - /// Error encountered when attempting to parse bytes as an untyped response #[derive(Copy, Clone, Debug, Display, Error, PartialEq, Eq, Hash)] pub enum UntypedResponseParseError { diff --git a/distant-net/src/manager/data/capabilities.rs b/distant-net/src/manager/data/capabilities.rs index 3d490a4..fdab311 100644 --- a/distant-net/src/manager/data/capabilities.rs +++ b/distant-net/src/manager/data/capabilities.rs @@ -12,7 +12,6 @@ use super::ManagerCapabilityKind; /// Set of supported capabilities for a manager #[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[serde(transparent)] pub struct ManagerCapabilities(#[into_iterator(owned, ref)] HashSet); @@ -76,13 +75,6 @@ impl ManagerCapabilities { } } -#[cfg(feature = "schemars")] -impl ManagerCapabilities { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(ManagerCapabilities) - } -} - impl BitAnd for &ManagerCapabilities { type Output = ManagerCapabilities; @@ -133,7 +125,6 @@ impl FromIterator for ManagerCapabilities { /// ManagerCapability tied to a manager. A capability is equivalent based on its kind and not /// description. #[derive(Clone, Debug, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[serde(rename_all = "snake_case", deny_unknown_fields)] pub struct ManagerCapability { /// Label describing the kind of capability @@ -196,17 +187,3 @@ impl From for ManagerCapability { } } } - -#[cfg(feature = "schemars")] -impl ManagerCapability { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(ManagerCapability) - } -} - -#[cfg(feature = "schemars")] -impl ManagerCapabilityKind { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(ManagerCapabilityKind) - } -} diff --git a/distant-net/src/manager/data/request.rs b/distant-net/src/manager/data/request.rs index 938b2f1..ebfc77c 100644 --- a/distant-net/src/manager/data/request.rs +++ b/distant-net/src/manager/data/request.rs @@ -21,10 +21,6 @@ use crate::common::{ConnectionId, Destination, Map, UntypedRequest}; Serialize, Deserialize ))] -#[cfg_attr( - feature = "schemars", - strum_discriminants(derive(schemars::JsonSchema)) -)] #[strum_discriminants(name(ManagerCapabilityKind))] #[strum_discriminants(strum(serialize_all = "snake_case"))] #[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")] diff --git a/distant-protocol/Cargo.toml b/distant-protocol/Cargo.toml new file mode 100644 index 0000000..30630dc --- /dev/null +++ b/distant-protocol/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "distant-protocol" +description = "Protocol library for distant, providing data structures used between the client and server" +categories = ["data-structures"] +keywords = ["protocol"] +version = "0.20.0-alpha.7" +authors = ["Chip Senkbeil "] +edition = "2021" +homepage = "https://github.com/chipsenkbeil/distant" +repository = "https://github.com/chipsenkbeil/distant" +readme = "README.md" +license = "MIT OR Apache-2.0" + +[features] +default = [] +tests = [] + +[dependencies] +bitflags = "2.0.2" +derive_more = { version = "0.99.17", default-features = false, features = ["deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant"] } +regex = "1.7.3" +serde = { version = "1.0.159", features = ["derive"] } +serde_bytes = "0.11.9" +strum = { version = "0.24.1", features = ["derive"] } + +[dev-dependencies] +rmp = "0.8.11" +rmp-serde = "1.1.1" +serde_json = "1.0.96" diff --git a/distant-protocol/src/common.rs b/distant-protocol/src/common.rs new file mode 100644 index 0000000..8e891cd --- /dev/null +++ b/distant-protocol/src/common.rs @@ -0,0 +1,29 @@ +mod capabilities; +mod change; +mod cmd; +mod error; +mod filesystem; +mod metadata; +mod permissions; +mod pty; +mod search; +mod system; +mod version; + +pub use capabilities::*; +pub use change::*; +pub use cmd::*; +pub use error::*; +pub use filesystem::*; +pub use metadata::*; +pub use permissions::*; +pub use pty::*; +pub use search::*; +pub use system::*; +pub use version::*; + +/// Id for a remote process +pub type ProcessId = u32; + +/// Version indicated by the tuple of (major, minor, patch). +pub type SemVer = (u8, u8, u8); diff --git a/distant-protocol/src/common/capabilities.rs b/distant-protocol/src/common/capabilities.rs new file mode 100644 index 0000000..7a9d7c1 --- /dev/null +++ b/distant-protocol/src/common/capabilities.rs @@ -0,0 +1,380 @@ +use std::cmp::Ordering; +use std::collections::HashSet; +use std::hash::{Hash, Hasher}; +use std::ops::{BitAnd, BitOr, BitXor, Deref, DerefMut}; +use std::str::FromStr; + +use derive_more::{From, Into, IntoIterator}; +use serde::{Deserialize, Serialize}; +use strum::{EnumMessage, IntoEnumIterator}; + +/// Represents the kinds of capabilities available. +pub use crate::request::RequestKind as CapabilityKind; + +/// Set of supported capabilities for a server +#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)] +#[serde(transparent)] +pub struct Capabilities(#[into_iterator(owned, ref)] HashSet); + +impl Capabilities { + /// Return set of capabilities encompassing all possible capabilities + pub fn all() -> Self { + Self(CapabilityKind::iter().map(Capability::from).collect()) + } + + /// Return empty set of capabilities + pub fn none() -> Self { + Self(HashSet::new()) + } + + /// Returns true if the capability with described kind is included + pub fn contains(&self, kind: impl AsRef) -> bool { + let cap = Capability { + kind: kind.as_ref().to_string(), + description: String::new(), + }; + self.0.contains(&cap) + } + + /// Adds the specified capability to the set of capabilities + /// + /// * If the set did not have this capability, returns `true` + /// * If the set did have this capability, returns `false` + pub fn insert(&mut self, cap: impl Into) -> bool { + self.0.insert(cap.into()) + } + + /// Removes the capability with the described kind, returning the capability + pub fn take(&mut self, kind: impl AsRef) -> Option { + let cap = Capability { + kind: kind.as_ref().to_string(), + description: String::new(), + }; + self.0.take(&cap) + } + + /// Removes the capability with the described kind, returning true if it existed + pub fn remove(&mut self, kind: impl AsRef) -> bool { + let cap = Capability { + kind: kind.as_ref().to_string(), + description: String::new(), + }; + self.0.remove(&cap) + } + + /// Converts into vec of capabilities sorted by kind + pub fn into_sorted_vec(self) -> Vec { + let mut this = self.0.into_iter().collect::>(); + + this.sort_unstable(); + + this + } +} + +impl AsRef> for Capabilities { + fn as_ref(&self) -> &HashSet { + &self.0 + } +} + +impl AsMut> for Capabilities { + fn as_mut(&mut self) -> &mut HashSet { + &mut self.0 + } +} + +impl Deref for Capabilities { + type Target = HashSet; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Capabilities { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl BitAnd for &Capabilities { + type Output = Capabilities; + + fn bitand(self, rhs: Self) -> Self::Output { + Capabilities(self.0.bitand(&rhs.0)) + } +} + +impl BitOr for &Capabilities { + type Output = Capabilities; + + fn bitor(self, rhs: Self) -> Self::Output { + Capabilities(self.0.bitor(&rhs.0)) + } +} + +impl BitOr for &Capabilities { + type Output = Capabilities; + + fn bitor(self, rhs: Capability) -> Self::Output { + let mut other = Capabilities::none(); + other.0.insert(rhs); + + self.bitor(&other) + } +} + +impl BitXor for &Capabilities { + type Output = Capabilities; + + fn bitxor(self, rhs: Self) -> Self::Output { + Capabilities(self.0.bitxor(&rhs.0)) + } +} + +impl FromIterator for Capabilities { + fn from_iter>(iter: I) -> Self { + let mut this = Capabilities::none(); + + for capability in iter { + this.0.insert(capability); + } + + this + } +} + +/// Capability tied to a server. A capability is equivalent based on its kind and not description. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case", deny_unknown_fields)] +pub struct Capability { + /// Label describing the kind of capability + pub kind: String, + + /// Information about the capability + pub description: String, +} + +impl Capability { + /// Will convert the [`Capability`]'s `kind` into a known [`CapabilityKind`] if possible, + /// returning None if the capability is unknown + pub fn to_capability_kind(&self) -> Option { + CapabilityKind::from_str(&self.kind).ok() + } + + /// Returns true if the described capability is unknown + pub fn is_unknown(&self) -> bool { + self.to_capability_kind().is_none() + } +} + +impl PartialEq for Capability { + fn eq(&self, other: &Self) -> bool { + self.kind.eq_ignore_ascii_case(&other.kind) + } +} + +impl Eq for Capability {} + +impl PartialOrd for Capability { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Capability { + fn cmp(&self, other: &Self) -> Ordering { + self.kind + .to_ascii_lowercase() + .cmp(&other.kind.to_ascii_lowercase()) + } +} + +impl Hash for Capability { + fn hash(&self, state: &mut H) { + self.kind.to_ascii_lowercase().hash(state); + } +} + +impl From for Capability { + /// Creates a new capability using the kind's default message + fn from(kind: CapabilityKind) -> Self { + Self { + kind: kind.to_string(), + description: kind + .get_message() + .map(ToString::to_string) + .unwrap_or_default(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + mod capabilities { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let capabilities: Capabilities = [Capability { + kind: "some kind".to_string(), + description: "some description".to_string(), + }] + .into_iter() + .collect(); + + let value = serde_json::to_value(capabilities).unwrap(); + assert_eq!( + value, + serde_json::json!([ + { + "kind": "some kind", + "description": "some description", + } + ]) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!([ + { + "kind": "some kind", + "description": "some description", + } + ]); + + let capabilities: Capabilities = serde_json::from_value(value).unwrap(); + assert_eq!( + capabilities, + [Capability { + kind: "some kind".to_string(), + description: "some description".to_string(), + }] + .into_iter() + .collect() + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let capabilities: Capabilities = [Capability { + kind: "some kind".to_string(), + description: "some description".to_string(), + }] + .into_iter() + .collect(); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&capabilities).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or preventing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named( + &[Capability { + kind: "some kind".to_string(), + description: "some description".to_string(), + }] + .into_iter() + .collect::(), + ) + .unwrap(); + + let capabilities: Capabilities = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + capabilities, + [Capability { + kind: "some kind".to_string(), + description: "some description".to_string(), + }] + .into_iter() + .collect() + ); + } + } + + mod capability { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let capability = Capability { + kind: "some kind".to_string(), + description: "some description".to_string(), + }; + + let value = serde_json::to_value(capability).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "kind": "some kind", + "description": "some description", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "kind": "some kind", + "description": "some description", + }); + + let capability: Capability = serde_json::from_value(value).unwrap(); + assert_eq!( + capability, + Capability { + kind: "some kind".to_string(), + description: "some description".to_string(), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let capability = Capability { + kind: "some kind".to_string(), + description: "some description".to_string(), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&capability).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Capability { + kind: "some kind".to_string(), + description: "some description".to_string(), + }) + .unwrap(); + + let capability: Capability = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + capability, + Capability { + kind: "some kind".to_string(), + description: "some description".to_string(), + } + ); + } + } +} diff --git a/distant-protocol/src/common/change.rs b/distant-protocol/src/common/change.rs new file mode 100644 index 0000000..d2a70fd --- /dev/null +++ b/distant-protocol/src/common/change.rs @@ -0,0 +1,380 @@ +use std::collections::HashSet; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::iter::FromIterator; +use std::ops::{BitOr, Sub}; +use std::path::PathBuf; +use std::str::FromStr; + +use derive_more::{Deref, DerefMut, IntoIterator}; +use serde::{Deserialize, Serialize}; +use strum::{EnumString, EnumVariantNames, VariantNames}; + +/// Change to one or more paths on the filesystem. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case", deny_unknown_fields)] +pub struct Change { + /// Label describing the kind of change + pub kind: ChangeKind, + + /// Paths that were changed + pub paths: Vec, +} + +/// Represents a label attached to a [`Change`] that describes the kind of change. +/// +/// This mirrors events seen from `incron`. +#[derive( + Copy, + Clone, + Debug, + strum::Display, + EnumString, + EnumVariantNames, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + Serialize, + Deserialize, +)] +#[serde(rename_all = "snake_case", deny_unknown_fields)] +#[strum(serialize_all = "snake_case")] +pub enum ChangeKind { + /// A file was read + Access, + + /// A file's or directory's attributes were changed + Attribute, + + /// A file open for writing was closed + CloseWrite, + + /// A file not open for writing was closed + CloseNoWrite, + + /// A file, directory, or something else was created within a watched directory + Create, + + /// A file, directory, or something else was deleted + Delete, + + /// A file's content was modified + Modify, + + /// A file was opened + Open, + + /// A file, directory, or something else was renamed in some way + Rename, + + /// Catch-all for any other change + Unknown, +} + +impl ChangeKind { + /// Returns a list of all variants as str names + pub const fn variants() -> &'static [&'static str] { + Self::VARIANTS + } + + /// Returns a list of all variants as a vec + pub fn all() -> Vec { + ChangeKindSet::all().into_sorted_vec() + } + + /// Returns true if kind is part of the access family. + pub fn is_access(&self) -> bool { + matches!( + self, + Self::Access | Self::CloseWrite | Self::CloseNoWrite | Self::Open + ) + } + + /// Returns true if kind is part of the create family. + pub fn is_create(&self) -> bool { + matches!(self, Self::Create) + } + + /// Returns true if kind is part of the delete family. + pub fn is_delete(&self) -> bool { + matches!(self, Self::Delete) + } + + /// Returns true if kind is part of the modify family. + pub fn is_modify(&self) -> bool { + matches!(self, Self::Attribute | Self::Modify) + } + + /// Returns true if kind is part of the rename family. + pub fn is_rename(&self) -> bool { + matches!(self, Self::Rename) + } + + /// Returns true if kind is unknown. + pub fn is_unknown(&self) -> bool { + matches!(self, Self::Unknown) + } +} + +impl BitOr for ChangeKind { + type Output = ChangeKindSet; + + fn bitor(self, rhs: Self) -> Self::Output { + let mut set = ChangeKindSet::empty(); + set.insert(self); + set.insert(rhs); + set + } +} + +/// Represents a distinct set of different change kinds +#[derive(Clone, Debug, Deref, DerefMut, IntoIterator, Serialize, Deserialize)] +pub struct ChangeKindSet(HashSet); + +impl ChangeKindSet { + pub fn new(set: impl IntoIterator) -> Self { + set.into_iter().collect() + } + + /// Produces an empty set of [`ChangeKind`] + pub fn empty() -> Self { + Self(HashSet::new()) + } + + /// Produces a set of all [`ChangeKind`] + pub fn all() -> Self { + vec![ + ChangeKind::Access, + ChangeKind::Attribute, + ChangeKind::CloseWrite, + ChangeKind::CloseNoWrite, + ChangeKind::Create, + ChangeKind::Delete, + ChangeKind::Modify, + ChangeKind::Open, + ChangeKind::Rename, + ChangeKind::Unknown, + ] + .into_iter() + .collect() + } + + /// Consumes set and returns a sorted vec of the kinds of changes + pub fn into_sorted_vec(self) -> Vec { + let mut v = self.0.into_iter().collect::>(); + v.sort(); + v + } +} + +impl fmt::Display for ChangeKindSet { + /// Outputs a comma-separated series of [`ChangeKind`] as string that are sorted + /// such that this will always be consistent output + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut kinds = self + .0 + .iter() + .map(ToString::to_string) + .collect::>(); + kinds.sort_unstable(); + write!(f, "{}", kinds.join(",")) + } +} + +impl PartialEq for ChangeKindSet { + fn eq(&self, other: &Self) -> bool { + self.to_string() == other.to_string() + } +} + +impl Eq for ChangeKindSet {} + +impl Hash for ChangeKindSet { + /// Hashes based on the output of [`fmt::Display`] + fn hash(&self, state: &mut H) { + self.to_string().hash(state); + } +} + +impl BitOr for ChangeKindSet { + type Output = Self; + + fn bitor(mut self, rhs: ChangeKindSet) -> Self::Output { + self.extend(rhs.0); + self + } +} + +impl BitOr for ChangeKindSet { + type Output = Self; + + fn bitor(mut self, rhs: ChangeKind) -> Self::Output { + self.0.insert(rhs); + self + } +} + +impl BitOr for ChangeKind { + type Output = ChangeKindSet; + + fn bitor(self, rhs: ChangeKindSet) -> Self::Output { + rhs | self + } +} + +impl Sub for ChangeKindSet { + type Output = Self; + + fn sub(self, other: Self) -> Self::Output { + ChangeKindSet(&self.0 - &other.0) + } +} + +impl Sub<&'_ ChangeKindSet> for &ChangeKindSet { + type Output = ChangeKindSet; + + fn sub(self, other: &ChangeKindSet) -> Self::Output { + ChangeKindSet(&self.0 - &other.0) + } +} + +impl FromStr for ChangeKindSet { + type Err = strum::ParseError; + + fn from_str(s: &str) -> Result { + let mut change_set = HashSet::new(); + + for word in s.split(',') { + change_set.insert(ChangeKind::from_str(word.trim())?); + } + + Ok(ChangeKindSet(change_set)) + } +} + +impl FromIterator for ChangeKindSet { + fn from_iter>(iter: I) -> Self { + let mut change_set = HashSet::new(); + + for i in iter { + change_set.insert(i); + } + + ChangeKindSet(change_set) + } +} + +impl From for ChangeKindSet { + fn from(change_kind: ChangeKind) -> Self { + let mut set = Self::empty(); + set.insert(change_kind); + set + } +} + +impl From> for ChangeKindSet { + fn from(changes: Vec) -> Self { + changes.into_iter().collect() + } +} + +impl Default for ChangeKindSet { + fn default() -> Self { + Self::empty() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + mod change_kind_set { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let set = ChangeKindSet::new([ChangeKind::CloseWrite]); + + let value = serde_json::to_value(set).unwrap(); + assert_eq!(value, serde_json::json!(["close_write"])); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!(["close_write"]); + + let set: ChangeKindSet = serde_json::from_value(value).unwrap(); + assert_eq!(set, ChangeKindSet::new([ChangeKind::CloseWrite])); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let set = ChangeKindSet::new([ChangeKind::CloseWrite]); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&set).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = + rmp_serde::encode::to_vec_named(&ChangeKindSet::new([ChangeKind::CloseWrite])) + .unwrap(); + + let set: ChangeKindSet = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(set, ChangeKindSet::new([ChangeKind::CloseWrite])); + } + } + + mod change_kind { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let kind = ChangeKind::CloseWrite; + + let value = serde_json::to_value(kind).unwrap(); + assert_eq!(value, serde_json::json!("close_write")); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!("close_write"); + + let kind: ChangeKind = serde_json::from_value(value).unwrap(); + assert_eq!(kind, ChangeKind::CloseWrite); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let kind = ChangeKind::CloseWrite; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&kind).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&ChangeKind::CloseWrite).unwrap(); + + let kind: ChangeKind = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(kind, ChangeKind::CloseWrite); + } + } +} diff --git a/distant-protocol/src/common/cmd.rs b/distant-protocol/src/common/cmd.rs new file mode 100644 index 0000000..786531b --- /dev/null +++ b/distant-protocol/src/common/cmd.rs @@ -0,0 +1,89 @@ +use std::ops::{Deref, DerefMut}; + +use derive_more::{Display, From, Into}; +use serde::{Deserialize, Serialize}; + +/// Represents some command with arguments to execute +#[derive(Clone, Debug, Display, From, Into, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub struct Cmd(String); + +impl Cmd { + /// Creates a new command from the given `cmd` + pub fn new(cmd: impl Into) -> Self { + Self(cmd.into()) + } + + /// Returns reference to the program portion of the command + pub fn program(&self) -> &str { + match self.0.split_once(' ') { + Some((program, _)) => program.trim(), + None => self.0.trim(), + } + } + + /// Returns reference to the arguments portion of the command + pub fn arguments(&self) -> &str { + match self.0.split_once(' ') { + Some((_, arguments)) => arguments.trim(), + None => "", + } + } +} + +impl Deref for Cmd { + type Target = String; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Cmd { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let cmd = Cmd::new("echo some text"); + + let value = serde_json::to_value(cmd).unwrap(); + assert_eq!(value, serde_json::json!("echo some text")); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!("echo some text"); + + let cmd: Cmd = serde_json::from_value(value).unwrap(); + assert_eq!(cmd, Cmd::new("echo some text")); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let cmd = Cmd::new("echo some text"); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&cmd).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Cmd::new("echo some text")).unwrap(); + + let cmd: Cmd = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(cmd, Cmd::new("echo some text")); + } +} diff --git a/distant-core/src/protocol/error.rs b/distant-protocol/src/common/error.rs similarity index 59% rename from distant-core/src/protocol/error.rs rename to distant-protocol/src/common/error.rs index 17d6de0..cdab4b7 100644 --- a/distant-core/src/protocol/error.rs +++ b/distant-protocol/src/common/error.rs @@ -1,12 +1,10 @@ use std::io; use derive_more::Display; -use notify::ErrorKind as NotifyErrorKind; use serde::{Deserialize, Serialize}; /// General purpose error type that can be sent across the wire #[derive(Clone, Debug, Display, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[display(fmt = "{kind}: {description}")] #[serde(rename_all = "snake_case", deny_unknown_fields)] pub struct Error { @@ -26,13 +24,6 @@ impl Error { } } -#[cfg(feature = "schemars")] -impl Error { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(Error) - } -} - impl<'a> From<&'a str> for Error { fn from(x: &'a str) -> Self { Self::from(x.to_string()) @@ -63,76 +54,8 @@ impl From for io::Error { } } -impl From for Error { - fn from(x: notify::Error) -> Self { - let err = match x.kind { - NotifyErrorKind::Generic(x) => Self { - kind: ErrorKind::Other, - description: x, - }, - NotifyErrorKind::Io(x) => Self::from(x), - NotifyErrorKind::PathNotFound => Self { - kind: ErrorKind::Other, - description: String::from("Path not found"), - }, - NotifyErrorKind::WatchNotFound => Self { - kind: ErrorKind::Other, - description: String::from("Watch not found"), - }, - NotifyErrorKind::InvalidConfig(_) => Self { - kind: ErrorKind::Other, - description: String::from("Invalid config"), - }, - NotifyErrorKind::MaxFilesWatch => Self { - kind: ErrorKind::Other, - description: String::from("Max files watched"), - }, - }; - - Self { - kind: err.kind, - description: format!( - "{}\n\nPaths: {}", - err.description, - x.paths - .into_iter() - .map(|p| p.to_string_lossy().to_string()) - .collect::>() - .join(", ") - ), - } - } -} - -impl From for Error { - fn from(x: walkdir::Error) -> Self { - if x.io_error().is_some() { - x.into_io_error().map(Self::from).unwrap() - } else { - Self { - kind: ErrorKind::Loop, - description: format!("{x}"), - } - } - } -} - -impl From for Error { - fn from(x: tokio::task::JoinError) -> Self { - Self { - kind: if x.is_cancelled() { - ErrorKind::TaskCancelled - } else { - ErrorKind::TaskPanicked - }, - description: format!("{x}"), - } - } -} - /// All possible kinds of errors that can be returned #[derive(Copy, Clone, Debug, Display, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[serde(rename_all = "snake_case", deny_unknown_fields)] pub enum ErrorKind { /// An entity was not found, often a file @@ -211,13 +134,6 @@ pub enum ErrorKind { Unknown, } -#[cfg(feature = "schemars")] -impl ErrorKind { - pub fn root_schema() -> schemars::schema::RootSchema { - schemars::schema_for!(ErrorKind) - } -} - impl From for ErrorKind { fn from(kind: io::ErrorKind) -> Self { match kind { @@ -275,3 +191,125 @@ impl From for io::ErrorKind { } } } + +#[cfg(test)] +mod tests { + use super::*; + + mod error { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let error = Error { + kind: ErrorKind::AddrInUse, + description: "some description".to_string(), + }; + + let value = serde_json::to_value(error).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "kind": "addr_in_use", + "description": "some description", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "kind": "addr_in_use", + "description": "some description", + }); + + let error: Error = serde_json::from_value(value).unwrap(); + assert_eq!( + error, + Error { + kind: ErrorKind::AddrInUse, + description: "some description".to_string(), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let error = Error { + kind: ErrorKind::AddrInUse, + description: "some description".to_string(), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&error).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or preventing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Error { + kind: ErrorKind::AddrInUse, + description: "some description".to_string(), + }) + .unwrap(); + + let error: Error = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + error, + Error { + kind: ErrorKind::AddrInUse, + description: "some description".to_string(), + } + ); + } + } + + mod error_kind { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let kind = ErrorKind::AddrInUse; + + let value = serde_json::to_value(kind).unwrap(); + assert_eq!(value, serde_json::json!("addr_in_use")); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!("addr_in_use"); + + let kind: ErrorKind = serde_json::from_value(value).unwrap(); + assert_eq!(kind, ErrorKind::AddrInUse); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let kind = ErrorKind::AddrInUse; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&kind).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&ErrorKind::AddrInUse).unwrap(); + + let kind: ErrorKind = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(kind, ErrorKind::AddrInUse); + } + } +} diff --git a/distant-protocol/src/common/filesystem.rs b/distant-protocol/src/common/filesystem.rs new file mode 100644 index 0000000..fdfb3ca --- /dev/null +++ b/distant-protocol/src/common/filesystem.rs @@ -0,0 +1,173 @@ +use std::fs::FileType as StdFileType; +use std::path::PathBuf; + +use derive_more::IsVariant; +use serde::{Deserialize, Serialize}; +use strum::AsRefStr; + +/// Represents information about a single entry within a directory +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case", deny_unknown_fields)] +pub struct DirEntry { + /// Represents the full path to the entry + pub path: PathBuf, + + /// Represents the type of the entry as a file/dir/symlink + pub file_type: FileType, + + /// Depth at which this entry was created relative to the root (0 being immediately within + /// root) + pub depth: usize, +} + +/// Represents the type associated with a dir entry +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, AsRefStr, IsVariant, Serialize, Deserialize)] +#[serde(rename_all = "snake_case", deny_unknown_fields)] +#[strum(serialize_all = "snake_case")] +pub enum FileType { + Dir, + File, + Symlink, +} + +impl From for FileType { + fn from(ft: StdFileType) -> Self { + if ft.is_dir() { + Self::Dir + } else if ft.is_symlink() { + Self::Symlink + } else { + Self::File + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + mod dir_entry { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let entry = DirEntry { + path: PathBuf::from("dir").join("file"), + file_type: FileType::File, + depth: 1, + }; + + let path = entry.path.to_str().unwrap().to_string(); + let value = serde_json::to_value(entry).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "path": path, + "file_type": "file", + "depth": 1, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "path": "test-file", + "file_type": "file", + "depth": 0, + }); + + let entry: DirEntry = serde_json::from_value(value).unwrap(); + assert_eq!( + entry, + DirEntry { + path: PathBuf::from("test-file"), + file_type: FileType::File, + depth: 0, + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let entry = DirEntry { + path: PathBuf::from("dir").join("file"), + file_type: FileType::File, + depth: 1, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&entry).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&DirEntry { + path: PathBuf::from("test-file"), + file_type: FileType::File, + depth: 0, + }) + .unwrap(); + + let entry: DirEntry = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + entry, + DirEntry { + path: PathBuf::from("test-file"), + file_type: FileType::File, + depth: 0, + } + ); + } + } + + mod file_type { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let ty = FileType::File; + + let value = serde_json::to_value(ty).unwrap(); + assert_eq!(value, serde_json::json!("file")); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!("file"); + + let ty: FileType = serde_json::from_value(value).unwrap(); + assert_eq!(ty, FileType::File); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let ty = FileType::File; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&ty).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&FileType::File).unwrap(); + + let ty: FileType = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(ty, FileType::File); + } + } +} diff --git a/distant-protocol/src/common/metadata.rs b/distant-protocol/src/common/metadata.rs new file mode 100644 index 0000000..5ddcdaa --- /dev/null +++ b/distant-protocol/src/common/metadata.rs @@ -0,0 +1,1044 @@ +use std::path::PathBuf; + +use bitflags::bitflags; +use serde::{Deserialize, Serialize}; + +use crate::common::FileType; +use crate::utils::{deserialize_u128_option, serialize_u128_option}; + +/// Represents metadata about some path on a remote machine. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct Metadata { + /// Canonicalized path to the file or directory, resolving symlinks, only included if flagged + /// during the request. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub canonicalized_path: Option, + + /// Represents the type of the entry as a file/dir/symlink. + pub file_type: FileType, + + /// Size of the file/directory/symlink in bytes. + pub len: u64, + + /// Whether or not the file/directory/symlink is marked as unwriteable. + pub readonly: bool, + + /// Represents the last time (in milliseconds) when the file/directory/symlink was accessed; + /// can be optional as certain systems don't support this. + /// + /// Note that this is represented as a string and not a number when serialized! + #[serde( + default, + skip_serializing_if = "Option::is_none", + serialize_with = "serialize_u128_option", + deserialize_with = "deserialize_u128_option" + )] + pub accessed: Option, + + /// Represents when (in milliseconds) the file/directory/symlink was created; + /// can be optional as certain systems don't support this. + /// + /// Note that this is represented as a string and not a number when serialized! + #[serde( + default, + skip_serializing_if = "Option::is_none", + serialize_with = "serialize_u128_option", + deserialize_with = "deserialize_u128_option" + )] + pub created: Option, + + /// Represents the last time (in milliseconds) when the file/directory/symlink was modified; + /// can be optional as certain systems don't support this. + /// + /// Note that this is represented as a string and not a number when serialized! + #[serde( + default, + skip_serializing_if = "Option::is_none", + serialize_with = "serialize_u128_option", + deserialize_with = "deserialize_u128_option" + )] + pub modified: Option, + + /// Represents metadata that is specific to a unix remote machine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub unix: Option, + + /// Represents metadata that is specific to a windows remote machine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub windows: Option, +} + +/// Represents unix-specific metadata about some path on a remote machine. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct UnixMetadata { + /// Represents whether or not owner can read from the file. + pub owner_read: bool, + + /// Represents whether or not owner can write to the file. + pub owner_write: bool, + + /// Represents whether or not owner can execute the file. + pub owner_exec: bool, + + /// Represents whether or not associated group can read from the file. + pub group_read: bool, + + /// Represents whether or not associated group can write to the file. + pub group_write: bool, + + /// Represents whether or not associated group can execute the file. + pub group_exec: bool, + + /// Represents whether or not other can read from the file. + pub other_read: bool, + + /// Represents whether or not other can write to the file. + pub other_write: bool, + + /// Represents whether or not other can execute the file. + pub other_exec: bool, +} + +impl From for UnixMetadata { + /// Create from a unix mode bitset + fn from(mode: u32) -> Self { + let flags = UnixFilePermissionFlags::from_bits_truncate(mode); + Self { + owner_read: flags.contains(UnixFilePermissionFlags::OWNER_READ), + owner_write: flags.contains(UnixFilePermissionFlags::OWNER_WRITE), + owner_exec: flags.contains(UnixFilePermissionFlags::OWNER_EXEC), + group_read: flags.contains(UnixFilePermissionFlags::GROUP_READ), + group_write: flags.contains(UnixFilePermissionFlags::GROUP_WRITE), + group_exec: flags.contains(UnixFilePermissionFlags::GROUP_EXEC), + other_read: flags.contains(UnixFilePermissionFlags::OTHER_READ), + other_write: flags.contains(UnixFilePermissionFlags::OTHER_WRITE), + other_exec: flags.contains(UnixFilePermissionFlags::OTHER_EXEC), + } + } +} + +impl From for u32 { + /// Convert to a unix mode bitset. + fn from(metadata: UnixMetadata) -> Self { + let mut flags = UnixFilePermissionFlags::empty(); + + if metadata.owner_read { + flags.insert(UnixFilePermissionFlags::OWNER_READ); + } + if metadata.owner_write { + flags.insert(UnixFilePermissionFlags::OWNER_WRITE); + } + if metadata.owner_exec { + flags.insert(UnixFilePermissionFlags::OWNER_EXEC); + } + + if metadata.group_read { + flags.insert(UnixFilePermissionFlags::GROUP_READ); + } + if metadata.group_write { + flags.insert(UnixFilePermissionFlags::GROUP_WRITE); + } + if metadata.group_exec { + flags.insert(UnixFilePermissionFlags::GROUP_EXEC); + } + + if metadata.other_read { + flags.insert(UnixFilePermissionFlags::OTHER_READ); + } + if metadata.other_write { + flags.insert(UnixFilePermissionFlags::OTHER_WRITE); + } + if metadata.other_exec { + flags.insert(UnixFilePermissionFlags::OTHER_EXEC); + } + + flags.bits() + } +} + +impl UnixMetadata { + pub fn is_readonly(self) -> bool { + !(self.owner_write || self.group_write || self.other_write) + } +} + +bitflags! { + struct UnixFilePermissionFlags: u32 { + const OWNER_READ = 0o400; + const OWNER_WRITE = 0o200; + const OWNER_EXEC = 0o100; + const GROUP_READ = 0o40; + const GROUP_WRITE = 0o20; + const GROUP_EXEC = 0o10; + const OTHER_READ = 0o4; + const OTHER_WRITE = 0o2; + const OTHER_EXEC = 0o1; + } +} + +/// Represents windows-specific metadata about some path on a remote machine +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct WindowsMetadata { + /// Represents whether or not a file or directory is an archive + pub archive: bool, + + /// Represents whether or not a file or directory is compressed + pub compressed: bool, + + /// Represents whether or not the file or directory is encrypted + pub encrypted: bool, + + /// Represents whether or not a file or directory is hidden + pub hidden: bool, + + /// Represents whether or not a directory or user data stream is configured with integrity + pub integrity_stream: bool, + + /// Represents whether or not a file does not have other attributes set + pub normal: bool, + + /// Represents whether or not a file or directory is not to be indexed by content indexing + /// service + pub not_content_indexed: bool, + + /// Represents whether or not a user data stream is not to be read by the background data + /// integrity scanner + pub no_scrub_data: bool, + + /// Represents whether or not the data of a file is not available immediately + pub offline: bool, + + /// Represents whether or not a file or directory is not fully present locally + pub recall_on_data_access: bool, + + /// Represents whether or not a file or directory has no physical representation on the local + /// system (is virtual) + pub recall_on_open: bool, + + /// Represents whether or not a file or directory has an associated reparse point, or a file is + /// a symbolic link + pub reparse_point: bool, + + /// Represents whether or not a file is a sparse file + pub sparse_file: bool, + + /// Represents whether or not a file or directory is used partially or exclusively by the + /// operating system + pub system: bool, + + /// Represents whether or not a file is being used for temporary storage + pub temporary: bool, +} + +impl From for WindowsMetadata { + /// Create from a windows file attribute bitset + fn from(file_attributes: u32) -> Self { + let flags = WindowsFileAttributeFlags::from_bits_truncate(file_attributes); + Self { + archive: flags.contains(WindowsFileAttributeFlags::ARCHIVE), + compressed: flags.contains(WindowsFileAttributeFlags::COMPRESSED), + encrypted: flags.contains(WindowsFileAttributeFlags::ENCRYPTED), + hidden: flags.contains(WindowsFileAttributeFlags::HIDDEN), + integrity_stream: flags.contains(WindowsFileAttributeFlags::INTEGRITY_SYSTEM), + normal: flags.contains(WindowsFileAttributeFlags::NORMAL), + not_content_indexed: flags.contains(WindowsFileAttributeFlags::NOT_CONTENT_INDEXED), + no_scrub_data: flags.contains(WindowsFileAttributeFlags::NO_SCRUB_DATA), + offline: flags.contains(WindowsFileAttributeFlags::OFFLINE), + recall_on_data_access: flags.contains(WindowsFileAttributeFlags::RECALL_ON_DATA_ACCESS), + recall_on_open: flags.contains(WindowsFileAttributeFlags::RECALL_ON_OPEN), + reparse_point: flags.contains(WindowsFileAttributeFlags::REPARSE_POINT), + sparse_file: flags.contains(WindowsFileAttributeFlags::SPARSE_FILE), + system: flags.contains(WindowsFileAttributeFlags::SYSTEM), + temporary: flags.contains(WindowsFileAttributeFlags::TEMPORARY), + } + } +} + +impl From for u32 { + /// Convert to a windows file attribute bitset + fn from(metadata: WindowsMetadata) -> Self { + let mut flags = WindowsFileAttributeFlags::empty(); + + if metadata.archive { + flags.insert(WindowsFileAttributeFlags::ARCHIVE); + } + if metadata.compressed { + flags.insert(WindowsFileAttributeFlags::COMPRESSED); + } + if metadata.encrypted { + flags.insert(WindowsFileAttributeFlags::ENCRYPTED); + } + if metadata.hidden { + flags.insert(WindowsFileAttributeFlags::HIDDEN); + } + if metadata.integrity_stream { + flags.insert(WindowsFileAttributeFlags::INTEGRITY_SYSTEM); + } + if metadata.normal { + flags.insert(WindowsFileAttributeFlags::NORMAL); + } + if metadata.not_content_indexed { + flags.insert(WindowsFileAttributeFlags::NOT_CONTENT_INDEXED); + } + if metadata.no_scrub_data { + flags.insert(WindowsFileAttributeFlags::NO_SCRUB_DATA); + } + if metadata.offline { + flags.insert(WindowsFileAttributeFlags::OFFLINE); + } + if metadata.recall_on_data_access { + flags.insert(WindowsFileAttributeFlags::RECALL_ON_DATA_ACCESS); + } + if metadata.recall_on_open { + flags.insert(WindowsFileAttributeFlags::RECALL_ON_OPEN); + } + if metadata.reparse_point { + flags.insert(WindowsFileAttributeFlags::REPARSE_POINT); + } + if metadata.sparse_file { + flags.insert(WindowsFileAttributeFlags::SPARSE_FILE); + } + if metadata.system { + flags.insert(WindowsFileAttributeFlags::SYSTEM); + } + if metadata.temporary { + flags.insert(WindowsFileAttributeFlags::TEMPORARY); + } + + flags.bits() + } +} + +bitflags! { + struct WindowsFileAttributeFlags: u32 { + const ARCHIVE = 0x20; + const COMPRESSED = 0x800; + const ENCRYPTED = 0x4000; + const HIDDEN = 0x2; + const INTEGRITY_SYSTEM = 0x8000; + const NORMAL = 0x80; + const NOT_CONTENT_INDEXED = 0x2000; + const NO_SCRUB_DATA = 0x20000; + const OFFLINE = 0x1000; + const RECALL_ON_DATA_ACCESS = 0x400000; + const RECALL_ON_OPEN = 0x40000; + const REPARSE_POINT = 0x400; + const SPARSE_FILE = 0x200; + const SYSTEM = 0x4; + const TEMPORARY = 0x100; + const VIRTUAL = 0x10000; + } +} + +#[cfg(test)] +mod tests { + use super::*; + + mod metadata { + use super::*; + + #[test] + fn should_be_able_to_serialize_minimal_metadata_to_json() { + let metadata = Metadata { + canonicalized_path: None, + file_type: FileType::Dir, + len: 999, + readonly: true, + accessed: None, + created: None, + modified: None, + unix: None, + windows: None, + }; + + let value = serde_json::to_value(metadata).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "file_type": "dir", + "len": 999, + "readonly": true, + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_metadata_to_json() { + let metadata = Metadata { + canonicalized_path: Some(PathBuf::from("test-dir")), + file_type: FileType::Dir, + len: 999, + readonly: true, + accessed: Some(u128::MAX), + created: Some(u128::MAX), + modified: Some(u128::MAX), + unix: Some(UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }), + windows: Some(WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }), + }; + + // NOTE: These values are too big to normally serialize, so we have to convert them to + // a string type, which is why the value here also needs to be a string. + let max_u128_str = u128::MAX.to_string(); + + let value = serde_json::to_value(metadata).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "canonicalized_path": "test-dir", + "file_type": "dir", + "len": 999, + "readonly": true, + "accessed": max_u128_str, + "created": max_u128_str, + "modified": max_u128_str, + "unix": { + "owner_read": true, + "owner_write": false, + "owner_exec": false, + "group_read": true, + "group_write": false, + "group_exec": false, + "other_read": true, + "other_write": false, + "other_exec": false, + }, + "windows": { + "archive": true, + "compressed": false, + "encrypted": true, + "hidden": false, + "integrity_stream": true, + "normal": false, + "not_content_indexed": true, + "no_scrub_data": false, + "offline": true, + "recall_on_data_access": false, + "recall_on_open": true, + "reparse_point": false, + "sparse_file": true, + "system": false, + "temporary": true, + } + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_metadata_from_json() { + let value = serde_json::json!({ + "file_type": "dir", + "len": 999, + "readonly": true, + }); + + let metadata: Metadata = serde_json::from_value(value).unwrap(); + assert_eq!( + metadata, + Metadata { + canonicalized_path: None, + file_type: FileType::Dir, + len: 999, + readonly: true, + accessed: None, + created: None, + modified: None, + unix: None, + windows: None, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_metadata_from_json() { + // NOTE: These values are too big to normally serialize, so we have to convert them to + // a string type, which is why the value here also needs to be a string. + let max_u128_str = u128::MAX.to_string(); + + let value = serde_json::json!({ + "canonicalized_path": "test-dir", + "file_type": "dir", + "len": 999, + "readonly": true, + "accessed": max_u128_str, + "created": max_u128_str, + "modified": max_u128_str, + "unix": { + "owner_read": true, + "owner_write": false, + "owner_exec": false, + "group_read": true, + "group_write": false, + "group_exec": false, + "other_read": true, + "other_write": false, + "other_exec": false, + }, + "windows": { + "archive": true, + "compressed": false, + "encrypted": true, + "hidden": false, + "integrity_stream": true, + "normal": false, + "not_content_indexed": true, + "no_scrub_data": false, + "offline": true, + "recall_on_data_access": false, + "recall_on_open": true, + "reparse_point": false, + "sparse_file": true, + "system": false, + "temporary": true, + } + }); + + let metadata: Metadata = serde_json::from_value(value).unwrap(); + assert_eq!( + metadata, + Metadata { + canonicalized_path: Some(PathBuf::from("test-dir")), + file_type: FileType::Dir, + len: 999, + readonly: true, + accessed: Some(u128::MAX), + created: Some(u128::MAX), + modified: Some(u128::MAX), + unix: Some(UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }), + windows: Some(WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }), + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_metadata_to_msgpack() { + let metadata = Metadata { + canonicalized_path: None, + file_type: FileType::Dir, + len: 999, + readonly: true, + accessed: None, + created: None, + modified: None, + unix: None, + windows: None, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&metadata).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_metadata_to_msgpack() { + let metadata = Metadata { + canonicalized_path: Some(PathBuf::from("test-dir")), + file_type: FileType::Dir, + len: 999, + readonly: true, + accessed: Some(u128::MAX), + created: Some(u128::MAX), + modified: Some(u128::MAX), + unix: Some(UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }), + windows: Some(WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&metadata).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_metadata_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or preventing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Metadata { + canonicalized_path: None, + file_type: FileType::Dir, + len: 999, + readonly: true, + accessed: None, + created: None, + modified: None, + unix: None, + windows: None, + }) + .unwrap(); + + let metadata: Metadata = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + metadata, + Metadata { + canonicalized_path: None, + file_type: FileType::Dir, + len: 999, + readonly: true, + accessed: None, + created: None, + modified: None, + unix: None, + windows: None, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_metadata_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or preventing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Metadata { + canonicalized_path: Some(PathBuf::from("test-dir")), + file_type: FileType::Dir, + len: 999, + readonly: true, + accessed: Some(u128::MAX), + created: Some(u128::MAX), + modified: Some(u128::MAX), + unix: Some(UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }), + windows: Some(WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }), + }) + .unwrap(); + + let metadata: Metadata = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + metadata, + Metadata { + canonicalized_path: Some(PathBuf::from("test-dir")), + file_type: FileType::Dir, + len: 999, + readonly: true, + accessed: Some(u128::MAX), + created: Some(u128::MAX), + modified: Some(u128::MAX), + unix: Some(UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }), + windows: Some(WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }), + } + ); + } + } + + mod unix_metadata { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let metadata = UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }; + + let value = serde_json::to_value(metadata).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "owner_read": true, + "owner_write": false, + "owner_exec": false, + "group_read": true, + "group_write": false, + "group_exec": false, + "other_read": true, + "other_write": false, + "other_exec": false, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "owner_read": true, + "owner_write": false, + "owner_exec": false, + "group_read": true, + "group_write": false, + "group_exec": false, + "other_read": true, + "other_write": false, + "other_exec": false, + }); + + let metadata: UnixMetadata = serde_json::from_value(value).unwrap(); + assert_eq!( + metadata, + UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let metadata = UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&metadata).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or preventing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }) + .unwrap(); + + let metadata: UnixMetadata = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + metadata, + UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + } + ); + } + } + + mod windows_metadata { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let metadata = WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }; + + let value = serde_json::to_value(metadata).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "archive": true, + "compressed": false, + "encrypted": true, + "hidden": false, + "integrity_stream": true, + "normal": false, + "not_content_indexed": true, + "no_scrub_data": false, + "offline": true, + "recall_on_data_access": false, + "recall_on_open": true, + "reparse_point": false, + "sparse_file": true, + "system": false, + "temporary": true, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "archive": true, + "compressed": false, + "encrypted": true, + "hidden": false, + "integrity_stream": true, + "normal": false, + "not_content_indexed": true, + "no_scrub_data": false, + "offline": true, + "recall_on_data_access": false, + "recall_on_open": true, + "reparse_point": false, + "sparse_file": true, + "system": false, + "temporary": true, + }); + + let metadata: WindowsMetadata = serde_json::from_value(value).unwrap(); + assert_eq!( + metadata, + WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let metadata = WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&metadata).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or preventing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }) + .unwrap(); + + let metadata: WindowsMetadata = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + metadata, + WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + } + ); + } + } +} diff --git a/distant-protocol/src/common/permissions.rs b/distant-protocol/src/common/permissions.rs new file mode 100644 index 0000000..8f20b06 --- /dev/null +++ b/distant-protocol/src/common/permissions.rs @@ -0,0 +1,658 @@ +use bitflags::bitflags; +use serde::{Deserialize, Serialize}; + +use crate::utils; + +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(default, deny_unknown_fields, rename_all = "snake_case")] +pub struct SetPermissionsOptions { + /// Whether or not to exclude symlinks from traversal entirely, meaning that permissions will + /// not be set on symlinks (usually resolving the symlink and setting the permission of the + /// referenced file or directory) that are explicitly provided or show up during recursion. + #[serde(skip_serializing_if = "utils::is_false")] + pub exclude_symlinks: bool, + + /// Whether or not to traverse symlinks when recursively setting permissions. Note that this + /// does NOT influence setting permissions when encountering a symlink as most platforms will + /// resolve the symlink before setting permissions. + #[serde(skip_serializing_if = "utils::is_false")] + pub follow_symlinks: bool, + + /// Whether or not to set the permissions of the file hierarchies rooted in the paths, instead + /// of just the paths themselves. + #[serde(skip_serializing_if = "utils::is_false")] + pub recursive: bool, +} + +/// Represents permissions to apply to some path on a remote machine +/// +/// When used to set permissions on a file, directory, or symlink, +/// only fields that are set (not `None`) will be applied. +/// +/// On `Unix` platforms, this translates directly into the mode that +/// you would find with `chmod`. On all other platforms, this uses the +/// write flags to determine whether or not to set the readonly status. +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +pub struct Permissions { + /// Represents whether or not owner can read from the file + #[serde(default, skip_serializing_if = "Option::is_none")] + pub owner_read: Option, + + /// Represents whether or not owner can write to the file + #[serde(default, skip_serializing_if = "Option::is_none")] + pub owner_write: Option, + + /// Represents whether or not owner can execute the file + #[serde(default, skip_serializing_if = "Option::is_none")] + pub owner_exec: Option, + + /// Represents whether or not associated group can read from the file + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group_read: Option, + + /// Represents whether or not associated group can write to the file + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group_write: Option, + + /// Represents whether or not associated group can execute the file + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group_exec: Option, + + /// Represents whether or not other can read from the file + #[serde(default, skip_serializing_if = "Option::is_none")] + pub other_read: Option, + + /// Represents whether or not other can write to the file + #[serde(default, skip_serializing_if = "Option::is_none")] + pub other_write: Option, + + /// Represents whether or not other can execute the file + #[serde(default, skip_serializing_if = "Option::is_none")] + pub other_exec: Option, +} + +impl Permissions { + /// Creates a set of [`Permissions`] that indicate readonly status. + /// + /// ``` + /// use distant_protocol::Permissions; + /// + /// let permissions = Permissions::readonly(); + /// assert_eq!(permissions.is_readonly(), Some(true)); + /// assert_eq!(permissions.is_writable(), Some(false)); + /// ``` + pub fn readonly() -> Self { + Self { + owner_write: Some(false), + group_write: Some(false), + other_write: Some(false), + + owner_read: Some(true), + group_read: Some(true), + other_read: Some(true), + + owner_exec: None, + group_exec: None, + other_exec: None, + } + } + /// Creates a set of [`Permissions`] that indicate globally writable status. + /// + /// ``` + /// use distant_protocol::Permissions; + /// + /// let permissions = Permissions::writable(); + /// assert_eq!(permissions.is_readonly(), Some(false)); + /// assert_eq!(permissions.is_writable(), Some(true)); + /// ``` + pub fn writable() -> Self { + Self { + owner_write: Some(true), + group_write: Some(true), + other_write: Some(true), + + owner_read: Some(true), + group_read: Some(true), + other_read: Some(true), + + owner_exec: None, + group_exec: None, + other_exec: None, + } + } + + /// Returns true if the permission set has a value specified for each permission (no `None` + /// settings). + /// + /// ``` + /// use distant_protocol::Permissions; + /// + /// let permissions = Permissions { + /// owner_write: Some(true), + /// group_write: Some(false), + /// other_write: Some(true), + /// owner_read: Some(false), + /// group_read: Some(true), + /// other_read: Some(false), + /// owner_exec: Some(true), + /// group_exec: Some(false), + /// other_exec: Some(true), + /// }; + /// assert!(permissions.is_complete()); + /// ``` + pub fn is_complete(&self) -> bool { + self.owner_read.is_some() + && self.owner_write.is_some() + && self.owner_exec.is_some() + && self.group_read.is_some() + && self.group_write.is_some() + && self.group_exec.is_some() + && self.other_read.is_some() + && self.other_write.is_some() + && self.other_exec.is_some() + } + + /// Returns `true` if permissions represent readonly, `false` if permissions represent + /// writable, and `None` if no permissions have been set to indicate either status. + /// + /// ``` + /// use distant_protocol::Permissions; + /// + /// assert_eq!( + /// Permissions { owner_write: Some(true), ..Default::default() }.is_readonly(), + /// Some(false) + /// ); + /// + /// assert_eq!( + /// Permissions { owner_write: Some(false), ..Default::default() }.is_readonly(), + /// Some(true) + /// ); + /// + /// assert_eq!( + /// Permissions { ..Default::default() }.is_writable(), + /// None + /// ); + /// ``` + #[inline] + pub fn is_readonly(&self) -> Option { + // Negate the writable status to indicate whether or not readonly + self.is_writable().map(|x| !x) + } + + /// Returns `true` if permissions represent ability to write, `false` if permissions represent + /// inability to write, and `None` if no permissions have been set to indicate either status. + /// + /// ``` + /// use distant_protocol::Permissions; + /// + /// assert_eq!( + /// Permissions { owner_write: Some(true), ..Default::default() }.is_writable(), + /// Some(true) + /// ); + /// + /// assert_eq!( + /// Permissions { owner_write: Some(false), ..Default::default() }.is_writable(), + /// Some(false) + /// ); + /// + /// assert_eq!( + /// Permissions { ..Default::default() }.is_writable(), + /// None + /// ); + /// ``` + #[inline] + pub fn is_writable(&self) -> Option { + match (self.owner_write, self.group_write, self.other_write) { + (None, None, None) => None, + (owner, group, other) => { + Some(owner.unwrap_or(false) || group.unwrap_or(false) || other.unwrap_or(false)) + } + } + } + + /// Applies `other` settings to `self`, overwriting any of the permissions in `self` with `other`. + /// + /// ``` + /// use distant_protocol::Permissions; + /// + /// let mut a = Permissions { + /// owner_read: Some(true), + /// owner_write: Some(false), + /// owner_exec: None, + /// ..Default::default() + /// }; + /// + /// let b = Permissions { + /// owner_read: Some(false), + /// owner_write: None, + /// owner_exec: Some(true), + /// ..Default::default() + /// }; + /// + /// a.apply_from(&b); + /// + /// assert_eq!(a, Permissions { + /// owner_read: Some(false), + /// owner_write: Some(false), + /// owner_exec: Some(true), + /// ..Default::default() + /// }); + /// ``` + #[inline] + pub fn apply_from(&mut self, other: &Self) { + macro_rules! apply { + ($key:ident) => {{ + if let Some(value) = other.$key { + self.$key = Some(value); + } + }}; + } + + apply!(owner_read); + apply!(owner_write); + apply!(owner_exec); + apply!(group_read); + apply!(group_write); + apply!(group_exec); + apply!(other_read); + apply!(other_write); + apply!(other_exec); + } + + /// Applies `self` settings to `other`, overwriting any of the permissions in `other` with + /// `self`. + /// + /// ``` + /// use distant_protocol::Permissions; + /// + /// let a = Permissions { + /// owner_read: Some(true), + /// owner_write: Some(false), + /// owner_exec: None, + /// ..Default::default() + /// }; + /// + /// let mut b = Permissions { + /// owner_read: Some(false), + /// owner_write: None, + /// owner_exec: Some(true), + /// ..Default::default() + /// }; + /// + /// a.apply_to(&mut b); + /// + /// assert_eq!(b, Permissions { + /// owner_read: Some(true), + /// owner_write: Some(false), + /// owner_exec: Some(true), + /// ..Default::default() + /// }); + /// ``` + #[inline] + pub fn apply_to(&self, other: &mut Self) { + Self::apply_from(other, self) + } + + /// Converts a Unix `mode` into the permission set. + pub fn from_unix_mode(mode: u32) -> Self { + let flags = UnixFilePermissionFlags::from_bits_truncate(mode); + Self { + owner_read: Some(flags.contains(UnixFilePermissionFlags::OWNER_READ)), + owner_write: Some(flags.contains(UnixFilePermissionFlags::OWNER_WRITE)), + owner_exec: Some(flags.contains(UnixFilePermissionFlags::OWNER_EXEC)), + group_read: Some(flags.contains(UnixFilePermissionFlags::GROUP_READ)), + group_write: Some(flags.contains(UnixFilePermissionFlags::GROUP_WRITE)), + group_exec: Some(flags.contains(UnixFilePermissionFlags::GROUP_EXEC)), + other_read: Some(flags.contains(UnixFilePermissionFlags::OTHER_READ)), + other_write: Some(flags.contains(UnixFilePermissionFlags::OTHER_WRITE)), + other_exec: Some(flags.contains(UnixFilePermissionFlags::OTHER_EXEC)), + } + } + + /// Converts to a Unix `mode` from a permission set. For any missing setting, a 0 bit is used. + /// + /// ``` + /// use distant_protocol::Permissions; + /// + /// assert_eq!(Permissions { + /// owner_read: Some(true), + /// owner_write: Some(true), + /// owner_exec: Some(true), + /// group_read: Some(true), + /// group_write: Some(true), + /// group_exec: Some(true), + /// other_read: Some(true), + /// other_write: Some(true), + /// other_exec: Some(true), + /// }.to_unix_mode(), 0o777); + /// + /// assert_eq!(Permissions { + /// owner_read: Some(true), + /// owner_write: Some(false), + /// owner_exec: Some(false), + /// group_read: Some(true), + /// group_write: Some(false), + /// group_exec: Some(false), + /// other_read: Some(true), + /// other_write: Some(false), + /// other_exec: Some(false), + /// }.to_unix_mode(), 0o444); + /// + /// assert_eq!(Permissions { + /// owner_exec: Some(true), + /// group_exec: Some(true), + /// other_exec: Some(true), + /// ..Default::default() + /// }.to_unix_mode(), 0o111); + /// ``` + pub fn to_unix_mode(&self) -> u32 { + let mut flags = UnixFilePermissionFlags::empty(); + + macro_rules! is_true { + ($opt:expr) => {{ + $opt.is_some() && $opt.unwrap() + }}; + } + + if is_true!(self.owner_read) { + flags.insert(UnixFilePermissionFlags::OWNER_READ); + } + if is_true!(self.owner_write) { + flags.insert(UnixFilePermissionFlags::OWNER_WRITE); + } + if is_true!(self.owner_exec) { + flags.insert(UnixFilePermissionFlags::OWNER_EXEC); + } + + if is_true!(self.group_read) { + flags.insert(UnixFilePermissionFlags::GROUP_READ); + } + if is_true!(self.group_write) { + flags.insert(UnixFilePermissionFlags::GROUP_WRITE); + } + if is_true!(self.group_exec) { + flags.insert(UnixFilePermissionFlags::GROUP_EXEC); + } + + if is_true!(self.other_read) { + flags.insert(UnixFilePermissionFlags::OTHER_READ); + } + if is_true!(self.other_write) { + flags.insert(UnixFilePermissionFlags::OTHER_WRITE); + } + if is_true!(self.other_exec) { + flags.insert(UnixFilePermissionFlags::OTHER_EXEC); + } + + flags.bits() + } +} + +#[cfg(unix)] +impl From for Permissions { + /// Converts [`std::fs::Permissions`] into [`Permissions`] using + /// [`std::os::unix::fs::PermissionsExt::mode`] to supply the bitset. + fn from(permissions: std::fs::Permissions) -> Self { + use std::os::unix::prelude::*; + Self::from_unix_mode(permissions.mode()) + } +} + +#[cfg(not(unix))] +impl From for Permissions { + /// Converts [`std::fs::Permissions`] into [`Permissions`] using the `readonly` flag. + /// + /// This will not set executable flags, but will set all read and write flags with write flags + /// being `false` if `readonly`, otherwise set to `true`. + fn from(permissions: std::fs::Permissions) -> Self { + if permissions.readonly() { + Self::readonly() + } else { + Self::writable() + } + } +} + +#[cfg(unix)] +impl From for std::fs::Permissions { + /// Converts [`Permissions`] into [`std::fs::Permissions`] using + /// [`std::os::unix::fs::PermissionsExt::from_mode`]. + fn from(permissions: Permissions) -> Self { + use std::os::unix::prelude::*; + std::fs::Permissions::from_mode(permissions.to_unix_mode()) + } +} + +bitflags! { + struct UnixFilePermissionFlags: u32 { + const OWNER_READ = 0o400; + const OWNER_WRITE = 0o200; + const OWNER_EXEC = 0o100; + const GROUP_READ = 0o40; + const GROUP_WRITE = 0o20; + const GROUP_EXEC = 0o10; + const OTHER_READ = 0o4; + const OTHER_WRITE = 0o2; + const OTHER_EXEC = 0o1; + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_be_able_to_serialize_minimal_permissions_to_json() { + let permissions = Permissions { + owner_read: None, + owner_write: None, + owner_exec: None, + group_read: None, + group_write: None, + group_exec: None, + other_read: None, + other_write: None, + other_exec: None, + }; + + let value = serde_json::to_value(permissions).unwrap(); + assert_eq!(value, serde_json::json!({})); + } + + #[test] + fn should_be_able_to_serialize_full_permissions_to_json() { + let permissions = Permissions { + owner_read: Some(true), + owner_write: Some(false), + owner_exec: Some(true), + group_read: Some(false), + group_write: Some(true), + group_exec: Some(false), + other_read: Some(true), + other_write: Some(false), + other_exec: Some(true), + }; + + let value = serde_json::to_value(permissions).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "owner_read": true, + "owner_write": false, + "owner_exec": true, + "group_read": false, + "group_write": true, + "group_exec": false, + "other_read": true, + "other_write": false, + "other_exec": true, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_permissions_from_json() { + let value = serde_json::json!({}); + + let permissions: Permissions = serde_json::from_value(value).unwrap(); + assert_eq!( + permissions, + Permissions { + owner_read: None, + owner_write: None, + owner_exec: None, + group_read: None, + group_write: None, + group_exec: None, + other_read: None, + other_write: None, + other_exec: None, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_permissions_from_json() { + let value = serde_json::json!({ + "owner_read": true, + "owner_write": false, + "owner_exec": true, + "group_read": false, + "group_write": true, + "group_exec": false, + "other_read": true, + "other_write": false, + "other_exec": true, + }); + + let permissions: Permissions = serde_json::from_value(value).unwrap(); + assert_eq!( + permissions, + Permissions { + owner_read: Some(true), + owner_write: Some(false), + owner_exec: Some(true), + group_read: Some(false), + group_write: Some(true), + group_exec: Some(false), + other_read: Some(true), + other_write: Some(false), + other_exec: Some(true), + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_permissions_to_msgpack() { + let permissions = Permissions { + owner_read: None, + owner_write: None, + owner_exec: None, + group_read: None, + group_write: None, + group_exec: None, + other_read: None, + other_write: None, + other_exec: None, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&permissions).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_permissions_to_msgpack() { + let permissions = Permissions { + owner_read: Some(true), + owner_write: Some(false), + owner_exec: Some(true), + group_read: Some(true), + group_write: Some(false), + group_exec: Some(true), + other_read: Some(true), + other_write: Some(false), + other_exec: Some(true), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&permissions).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_permissions_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or preventing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Permissions { + owner_read: None, + owner_write: None, + owner_exec: None, + group_read: None, + group_write: None, + group_exec: None, + other_read: None, + other_write: None, + other_exec: None, + }) + .unwrap(); + + let permissions: Permissions = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + permissions, + Permissions { + owner_read: None, + owner_write: None, + owner_exec: None, + group_read: None, + group_write: None, + group_exec: None, + other_read: None, + other_write: None, + other_exec: None, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_permissions_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or preventing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Permissions { + owner_read: Some(true), + owner_write: Some(false), + owner_exec: Some(true), + group_read: Some(true), + group_write: Some(false), + group_exec: Some(true), + other_read: Some(true), + other_write: Some(false), + other_exec: Some(true), + }) + .unwrap(); + + let permissions: Permissions = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + permissions, + Permissions { + owner_read: Some(true), + owner_write: Some(false), + owner_exec: Some(true), + group_read: Some(true), + group_write: Some(false), + group_exec: Some(true), + other_read: Some(true), + other_write: Some(false), + other_exec: Some(true), + } + ); + } +} diff --git a/distant-protocol/src/common/pty.rs b/distant-protocol/src/common/pty.rs new file mode 100644 index 0000000..594a18e --- /dev/null +++ b/distant-protocol/src/common/pty.rs @@ -0,0 +1,241 @@ +use std::fmt; +use std::num::ParseIntError; +use std::str::FromStr; + +use derive_more::{Display, Error}; +use serde::{Deserialize, Serialize}; + +/// Represents the size associated with a remote PTY +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct PtySize { + /// Number of lines of text + pub rows: u16, + + /// Number of columns of text + pub cols: u16, + + /// Width of a cell in pixels. Note that some systems never fill this value and ignore it. + #[serde(default)] + pub pixel_width: u16, + + /// Height of a cell in pixels. Note that some systems never fill this value and ignore it. + #[serde(default)] + pub pixel_height: u16, +} + +impl PtySize { + /// Creates new size using just rows and columns + pub fn from_rows_and_cols(rows: u16, cols: u16) -> Self { + Self { + rows, + cols, + ..Default::default() + } + } +} + +impl fmt::Display for PtySize { + /// Prints out `rows,cols[,pixel_width,pixel_height]` where the + /// pixel width and pixel height are only included if either + /// one of them is not zero + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{},{}", self.rows, self.cols)?; + if self.pixel_width > 0 || self.pixel_height > 0 { + write!(f, ",{},{}", self.pixel_width, self.pixel_height)?; + } + + Ok(()) + } +} + +impl Default for PtySize { + fn default() -> Self { + PtySize { + rows: 24, + cols: 80, + pixel_width: 0, + pixel_height: 0, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Display, Error)] +pub enum PtySizeParseError { + MissingRows, + MissingColumns, + InvalidRows(ParseIntError), + InvalidColumns(ParseIntError), + InvalidPixelWidth(ParseIntError), + InvalidPixelHeight(ParseIntError), +} + +impl FromStr for PtySize { + type Err = PtySizeParseError; + + /// Attempts to parse a str into PtySize using one of the following formats: + /// + /// * rows,cols (defaults to 0 for pixel_width & pixel_height) + /// * rows,cols,pixel_width,pixel_height + fn from_str(s: &str) -> Result { + let mut tokens = s.split(','); + + Ok(Self { + rows: tokens + .next() + .ok_or(PtySizeParseError::MissingRows)? + .trim() + .parse() + .map_err(PtySizeParseError::InvalidRows)?, + cols: tokens + .next() + .ok_or(PtySizeParseError::MissingColumns)? + .trim() + .parse() + .map_err(PtySizeParseError::InvalidColumns)?, + pixel_width: tokens + .next() + .map(|s| s.trim().parse()) + .transpose() + .map_err(PtySizeParseError::InvalidPixelWidth)? + .unwrap_or(0), + pixel_height: tokens + .next() + .map(|s| s.trim().parse()) + .transpose() + .map_err(PtySizeParseError::InvalidPixelHeight)? + .unwrap_or(0), + }) + } +} +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let size = PtySize { + rows: 10, + cols: 20, + pixel_width: 30, + pixel_height: 40, + }; + + let value = serde_json::to_value(size).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "rows": 10, + "cols": 20, + "pixel_width": 30, + "pixel_height": 40, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_size_from_json() { + let value = serde_json::json!({ + "rows": 10, + "cols": 20, + }); + + let size: PtySize = serde_json::from_value(value).unwrap(); + assert_eq!( + size, + PtySize { + rows: 10, + cols: 20, + pixel_width: 0, + pixel_height: 0, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_size_from_json() { + let value = serde_json::json!({ + "rows": 10, + "cols": 20, + "pixel_width": 30, + "pixel_height": 40, + }); + + let size: PtySize = serde_json::from_value(value).unwrap(); + assert_eq!( + size, + PtySize { + rows: 10, + cols: 20, + pixel_width: 30, + pixel_height: 40, + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let size = PtySize { + rows: 10, + cols: 20, + pixel_width: 30, + pixel_height: 40, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&size).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_size_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + #[derive(Serialize)] + struct PartialSize { + rows: u16, + cols: u16, + } + let buf = rmp_serde::encode::to_vec_named(&PartialSize { rows: 10, cols: 20 }).unwrap(); + + let size: PtySize = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + size, + PtySize { + rows: 10, + cols: 20, + pixel_width: 0, + pixel_height: 0, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_size_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&PtySize { + rows: 10, + cols: 20, + pixel_width: 30, + pixel_height: 40, + }) + .unwrap(); + + let size: PtySize = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + size, + PtySize { + rows: 10, + cols: 20, + pixel_width: 30, + pixel_height: 40, + } + ); + } +} diff --git a/distant-protocol/src/common/search.rs b/distant-protocol/src/common/search.rs new file mode 100644 index 0000000..6e55ab6 --- /dev/null +++ b/distant-protocol/src/common/search.rs @@ -0,0 +1,1829 @@ +use std::borrow::Cow; +use std::collections::HashSet; +use std::path::PathBuf; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +use crate::common::FileType; +use crate::utils; + +/// Id associated with a search +pub type SearchId = u32; + +/// Represents a query to perform against the filesystem +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct SearchQuery { + /// Kind of data to examine using condition + pub target: SearchQueryTarget, + + /// Condition to meet to be considered a match + pub condition: SearchQueryCondition, + + /// Paths in which to perform the query + pub paths: Vec, + + /// Options to apply to the query + #[serde(default)] + pub options: SearchQueryOptions, +} + +impl SearchQuery { + /// Creates a search query targeting the contents of files. + pub fn contents( + condition: SearchQueryCondition, + paths: I, + options: SearchQueryOptions, + ) -> Self + where + I: IntoIterator, + T: Into, + { + Self { + target: SearchQueryTarget::Contents, + condition, + paths: paths.into_iter().map(Into::into).collect(), + options, + } + } + + /// Creates a search query targeting the paths of files, directories, and symlinks. + pub fn path( + condition: SearchQueryCondition, + paths: I, + options: SearchQueryOptions, + ) -> Self + where + I: IntoIterator, + T: Into, + { + Self { + target: SearchQueryTarget::Path, + condition, + paths: paths.into_iter().map(Into::into).collect(), + options, + } + } +} + +/// Kind of data to examine using conditions +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum SearchQueryTarget { + /// Checks path of file, directory, or symlink + Path, + + /// Checks contents of files + Contents, +} + +/// Condition used to find a match in a search query +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")] +pub enum SearchQueryCondition { + /// Text is found anywhere (all regex patterns are escaped) + Contains { value: String }, + + /// Begins with some text (all regex patterns are escaped) + EndsWith { value: String }, + + /// Matches some text exactly (all regex patterns are escaped) + Equals { value: String }, + + /// Any of the conditions match + Or { value: Vec }, + + /// Matches some regex + Regex { value: String }, + + /// Begins with some text (all regex patterns are escaped) + StartsWith { value: String }, +} + +impl SearchQueryCondition { + /// Creates a new instance with `Contains` variant + pub fn contains(value: impl Into) -> Self { + Self::Contains { + value: value.into(), + } + } + + /// Creates a new instance with `EndsWith` variant + pub fn ends_with(value: impl Into) -> Self { + Self::EndsWith { + value: value.into(), + } + } + + /// Creates a new instance with `Equals` variant + pub fn equals(value: impl Into) -> Self { + Self::Equals { + value: value.into(), + } + } + + /// Creates a new instance with `Or` variant + pub fn or(value: I) -> Self + where + I: IntoIterator, + C: Into, + { + Self::Or { + value: value.into_iter().map(|s| s.into()).collect(), + } + } + + /// Creates a new instance with `Regex` variant + pub fn regex(value: impl Into) -> Self { + Self::Regex { + value: value.into(), + } + } + + /// Creates a new instance with `StartsWith` variant + pub fn starts_with(value: impl Into) -> Self { + Self::StartsWith { + value: value.into(), + } + } + + /// Converts the condition in a regex string + pub fn to_regex_string(&self) -> String { + match self { + Self::Contains { value } => regex::escape(value), + Self::EndsWith { value } => format!(r"{}$", regex::escape(value)), + Self::Equals { value } => format!(r"^{}$", regex::escape(value)), + Self::Regex { value } => value.to_string(), + Self::StartsWith { value } => format!(r"^{}", regex::escape(value)), + Self::Or { value } => { + let mut s = String::new(); + for (i, condition) in value.iter().enumerate() { + if i > 0 { + s.push('|'); + } + s.push_str(&condition.to_regex_string()); + } + s + } + } + } +} + +impl FromStr for SearchQueryCondition { + type Err = std::convert::Infallible; + + /// Parses search query from a JSON string + fn from_str(s: &str) -> Result { + Ok(Self::regex(s)) + } +} + +/// Options associated with a search query +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(default)] +pub struct SearchQueryOptions { + /// Restrict search to only these file types (otherwise all are allowed). + #[serde(skip_serializing_if = "HashSet::is_empty")] + pub allowed_file_types: HashSet, + + /// Regex to use to filter paths being searched to only those that match the include condition. + #[serde(skip_serializing_if = "Option::is_none")] + pub include: Option, + + /// Regex to use to filter paths being searched to only those that do not match the exclude. + /// condition + #[serde(skip_serializing_if = "Option::is_none")] + pub exclude: Option, + + /// If true, will search upward through parent directories rather than the traditional downward + /// search that recurses through all children directories. + /// + /// Note that this will use maximum depth to apply to the reverse direction, and will only look + /// through each ancestor directory's immediate entries. In other words, this will not result + /// in recursing through sibling directories. + /// + /// An upward search will ALWAYS search the contents of a directory, so this means providing a + /// path to a directory will search its entries EVEN if the max_depth is 0. + #[serde(skip_serializing_if = "utils::is_false")] + pub upward: bool, + + /// Search should follow symbolic links. + #[serde(skip_serializing_if = "utils::is_false")] + pub follow_symbolic_links: bool, + + /// Maximum results to return before stopping the query. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + + /// Maximum depth (directories) to search + /// + /// The smallest depth is 0 and always corresponds to the path given to the new function on + /// this type. Its direct descendents have depth 1, and their descendents have depth 2, and so + /// on. + /// + /// Note that this will not simply filter the entries of the iterator, but it will actually + /// avoid descending into directories when the depth is exceeded. + #[serde(skip_serializing_if = "Option::is_none")] + pub max_depth: Option, + + /// Amount of results to batch before sending back excluding final submission that will always + /// include the remaining results even if less than pagination request. + #[serde(skip_serializing_if = "Option::is_none")] + pub pagination: Option, +} + +/// Represents a match for a search query +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")] +pub enum SearchQueryMatch { + /// Matches part of a file's path + Path(SearchQueryPathMatch), + + /// Matches part of a file's contents + Contents(SearchQueryContentsMatch), +} + +impl SearchQueryMatch { + pub fn into_path_match(self) -> Option { + match self { + Self::Path(x) => Some(x), + _ => None, + } + } + + pub fn into_contents_match(self) -> Option { + match self { + Self::Contents(x) => Some(x), + _ => None, + } + } +} + +/// Represents details for a match on a path +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct SearchQueryPathMatch { + /// Path associated with the match + pub path: PathBuf, + + /// Collection of matches tied to `path` where each submatch's byte offset is relative to + /// `path` + pub submatches: Vec, +} + +/// Represents details for a match on a file's contents +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct SearchQueryContentsMatch { + /// Path to file whose contents match + pub path: PathBuf, + + /// Line(s) that matched + pub lines: SearchQueryMatchData, + + /// Line number where match starts (base index 1) + pub line_number: u64, + + /// Absolute byte offset corresponding to the start of `lines` in the data being searched + pub absolute_offset: u64, + + /// Collection of matches tied to `lines` where each submatch's byte offset is relative to + /// `lines` and not the overall content + pub submatches: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct SearchQuerySubmatch { + /// Content matched by query + pub r#match: SearchQueryMatchData, + + /// Byte offset representing start of submatch (inclusive) + pub start: u64, + + /// Byte offset representing end of submatch (exclusive) + pub end: u64, +} + +impl SearchQuerySubmatch { + /// Creates a new submatch using the given `match` data, `start`, and `end`. + pub fn new(r#match: impl Into, start: u64, end: u64) -> Self { + Self { + r#match: r#match.into(), + start, + end, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum SearchQueryMatchData { + /// Match represented as bytes + Bytes(Vec), + + /// Match represented as UTF-8 text + Text(String), +} + +impl SearchQueryMatchData { + /// Creates a new instance with `Text` variant + pub fn text(value: impl Into) -> Self { + Self::Text(value.into()) + } + + /// Creates a new instance with `Bytes` variant + pub fn bytes(value: impl Into>) -> Self { + Self::Bytes(value.into()) + } + + /// Returns the UTF-8 str reference to the data, if is valid UTF-8 + pub fn to_str(&self) -> Option<&str> { + match self { + Self::Text(x) => Some(x), + Self::Bytes(x) => std::str::from_utf8(x).ok(), + } + } + + /// Converts data to a UTF-8 string, replacing any invalid UTF-8 sequences with + /// [`U+FFFD REPLACEMENT CHARACTER`](https://doc.rust-lang.org/nightly/core/char/const.REPLACEMENT_CHARACTER.html) + pub fn to_string_lossy(&self) -> Cow<'_, str> { + match self { + Self::Text(x) => Cow::Borrowed(x), + Self::Bytes(x) => String::from_utf8_lossy(x), + } + } +} + +impl From> for SearchQueryMatchData { + fn from(bytes: Vec) -> Self { + Self::Bytes(bytes) + } +} + +impl<'a> From<&'a [u8]> for SearchQueryMatchData { + fn from(bytes: &'a [u8]) -> Self { + Self::Bytes(bytes.to_vec()) + } +} + +impl From for SearchQueryMatchData { + fn from(text: String) -> Self { + Self::Text(text) + } +} + +impl<'a> From<&'a str> for SearchQueryMatchData { + fn from(text: &'a str) -> Self { + Self::Text(text.to_string()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + mod search_query { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let query = SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path1"), PathBuf::from("path2")], + options: SearchQueryOptions::default(), + }; + + let value = serde_json::to_value(query).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "target": "contents", + "condition": { + "type": "equals", + "value": "hello world", + }, + "paths": ["path1", "path2"], + "options": {}, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "target": "contents", + "condition": { + "type": "equals", + "value": "hello world", + }, + "paths": ["path1", "path2"], + }); + + let query: SearchQuery = serde_json::from_value(value).unwrap(); + assert_eq!( + query, + SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path1"), PathBuf::from("path2")], + options: SearchQueryOptions::default(), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let query = SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path1"), PathBuf::from("path2")], + options: SearchQueryOptions::default(), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&query).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path1"), PathBuf::from("path2")], + options: SearchQueryOptions::default(), + }) + .unwrap(); + + let query: SearchQuery = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + query, + SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path1"), PathBuf::from("path2")], + options: SearchQueryOptions::default(), + } + ); + } + } + + mod search_query_target { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let target = SearchQueryTarget::Contents; + let value = serde_json::to_value(target).unwrap(); + assert_eq!(value, serde_json::json!("contents")); + + let target = SearchQueryTarget::Path; + let value = serde_json::to_value(target).unwrap(); + assert_eq!(value, serde_json::json!("path")); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!("contents"); + let target: SearchQueryTarget = serde_json::from_value(value).unwrap(); + assert_eq!(target, SearchQueryTarget::Contents); + + let value = serde_json::json!("path"); + let target: SearchQueryTarget = serde_json::from_value(value).unwrap(); + assert_eq!(target, SearchQueryTarget::Path); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let target = SearchQueryTarget::Contents; + let _ = rmp_serde::encode::to_vec_named(&target).unwrap(); + + let target = SearchQueryTarget::Path; + let _ = rmp_serde::encode::to_vec_named(&target).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQueryTarget::Contents).unwrap(); + let target: SearchQueryTarget = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(target, SearchQueryTarget::Contents); + + let buf = rmp_serde::encode::to_vec_named(&SearchQueryTarget::Path).unwrap(); + let target: SearchQueryTarget = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(target, SearchQueryTarget::Path); + } + } + + mod search_query_condition { + use super::*; + + #[test] + fn to_regex_string_should_convert_to_appropriate_regex_and_escape_as_needed() { + assert_eq!( + SearchQueryCondition::contains("t^es$t").to_regex_string(), + r"t\^es\$t" + ); + assert_eq!( + SearchQueryCondition::ends_with("t^es$t").to_regex_string(), + r"t\^es\$t$" + ); + assert_eq!( + SearchQueryCondition::equals("t^es$t").to_regex_string(), + r"^t\^es\$t$" + ); + assert_eq!( + SearchQueryCondition::or([ + SearchQueryCondition::contains("t^es$t"), + SearchQueryCondition::equals("t^es$t"), + SearchQueryCondition::regex("^test$"), + ]) + .to_regex_string(), + r"t\^es\$t|^t\^es\$t$|^test$" + ); + assert_eq!( + SearchQueryCondition::regex("test").to_regex_string(), + "test" + ); + assert_eq!( + SearchQueryCondition::starts_with("t^es$t").to_regex_string(), + r"^t\^es\$t" + ); + } + + mod contains { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let condition = SearchQueryCondition::contains("some text"); + + let value = serde_json::to_value(condition).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "contains", + "value": "some text", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "contains", + "value": "some text", + }); + + let condition: SearchQueryCondition = serde_json::from_value(value).unwrap(); + assert_eq!(condition, SearchQueryCondition::contains("some text")); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let condition = SearchQueryCondition::contains("some text"); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&condition).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = + rmp_serde::encode::to_vec_named(&SearchQueryCondition::contains("some text")) + .unwrap(); + + let condition: SearchQueryCondition = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(condition, SearchQueryCondition::contains("some text")); + } + } + + mod ends_with { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let condition = SearchQueryCondition::ends_with("some text"); + + let value = serde_json::to_value(condition).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "ends_with", + "value": "some text", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "ends_with", + "value": "some text", + }); + + let condition: SearchQueryCondition = serde_json::from_value(value).unwrap(); + assert_eq!(condition, SearchQueryCondition::ends_with("some text")); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let condition = SearchQueryCondition::ends_with("some text"); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&condition).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = + rmp_serde::encode::to_vec_named(&SearchQueryCondition::ends_with("some text")) + .unwrap(); + + let condition: SearchQueryCondition = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(condition, SearchQueryCondition::ends_with("some text")); + } + } + + mod equals { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let condition = SearchQueryCondition::equals("some text"); + + let value = serde_json::to_value(condition).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "equals", + "value": "some text", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "equals", + "value": "some text", + }); + + let condition: SearchQueryCondition = serde_json::from_value(value).unwrap(); + assert_eq!(condition, SearchQueryCondition::equals("some text")); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let condition = SearchQueryCondition::equals("some text"); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&condition).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = + rmp_serde::encode::to_vec_named(&SearchQueryCondition::equals("some text")) + .unwrap(); + + let condition: SearchQueryCondition = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(condition, SearchQueryCondition::equals("some text")); + } + } + + mod or { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let condition = SearchQueryCondition::or([ + SearchQueryCondition::starts_with("start text"), + SearchQueryCondition::ends_with("end text"), + ]); + + let value = serde_json::to_value(condition).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "or", + "value": [ + { "type": "starts_with", "value": "start text" }, + { "type": "ends_with", "value": "end text" }, + ], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "or", + "value": [ + { "type": "starts_with", "value": "start text" }, + { "type": "ends_with", "value": "end text" }, + ], + }); + + let condition: SearchQueryCondition = serde_json::from_value(value).unwrap(); + assert_eq!( + condition, + SearchQueryCondition::or([ + SearchQueryCondition::starts_with("start text"), + SearchQueryCondition::ends_with("end text"), + ]) + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let condition = SearchQueryCondition::or([ + SearchQueryCondition::starts_with("start text"), + SearchQueryCondition::ends_with("end text"), + ]); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&condition).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQueryCondition::or([ + SearchQueryCondition::starts_with("start text"), + SearchQueryCondition::ends_with("end text"), + ])) + .unwrap(); + + let condition: SearchQueryCondition = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + condition, + SearchQueryCondition::or([ + SearchQueryCondition::starts_with("start text"), + SearchQueryCondition::ends_with("end text"), + ]) + ); + } + } + + mod regex { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let condition = SearchQueryCondition::regex("some text"); + + let value = serde_json::to_value(condition).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "regex", + "value": "some text", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "regex", + "value": "some text", + }); + + let condition: SearchQueryCondition = serde_json::from_value(value).unwrap(); + assert_eq!(condition, SearchQueryCondition::regex("some text")); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let condition = SearchQueryCondition::regex("some text"); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&condition).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = + rmp_serde::encode::to_vec_named(&SearchQueryCondition::regex("some text")) + .unwrap(); + + let condition: SearchQueryCondition = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(condition, SearchQueryCondition::regex("some text")); + } + } + + mod starts_with { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let condition = SearchQueryCondition::starts_with("some text"); + + let value = serde_json::to_value(condition).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "starts_with", + "value": "some text", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "starts_with", + "value": "some text", + }); + + let condition: SearchQueryCondition = serde_json::from_value(value).unwrap(); + assert_eq!(condition, SearchQueryCondition::starts_with("some text")); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let condition = SearchQueryCondition::starts_with("some text"); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&condition).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQueryCondition::starts_with( + "some text", + )) + .unwrap(); + + let condition: SearchQueryCondition = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(condition, SearchQueryCondition::starts_with("some text")); + } + } + } + + mod search_query_options { + use super::*; + + #[test] + fn should_be_able_to_serialize_minimal_options_to_json() { + let options = SearchQueryOptions { + allowed_file_types: [].into_iter().collect(), + include: None, + exclude: None, + upward: false, + follow_symbolic_links: false, + limit: None, + max_depth: None, + pagination: None, + }; + + let value = serde_json::to_value(options).unwrap(); + assert_eq!(value, serde_json::json!({})); + } + + #[test] + fn should_be_able_to_serialize_full_options_to_json() { + let options = SearchQueryOptions { + allowed_file_types: [FileType::File].into_iter().collect(), + include: Some(SearchQueryCondition::Equals { + value: String::from("hello"), + }), + exclude: Some(SearchQueryCondition::Contains { + value: String::from("world"), + }), + upward: true, + follow_symbolic_links: true, + limit: Some(u64::MAX), + max_depth: Some(u64::MAX), + pagination: Some(u64::MAX), + }; + + let value = serde_json::to_value(options).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "allowed_file_types": ["file"], + "include": { + "type": "equals", + "value": "hello", + }, + "exclude": { + "type": "contains", + "value": "world", + }, + "upward": true, + "follow_symbolic_links": true, + "limit": u64::MAX, + "max_depth": u64::MAX, + "pagination": u64::MAX, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_options_from_json() { + let value = serde_json::json!({}); + + let options: SearchQueryOptions = serde_json::from_value(value).unwrap(); + assert_eq!( + options, + SearchQueryOptions { + allowed_file_types: [].into_iter().collect(), + include: None, + exclude: None, + upward: false, + follow_symbolic_links: false, + limit: None, + max_depth: None, + pagination: None, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_options_from_json() { + let value = serde_json::json!({ + "allowed_file_types": ["file"], + "include": { + "type": "equals", + "value": "hello", + }, + "exclude": { + "type": "contains", + "value": "world", + }, + "upward": true, + "follow_symbolic_links": true, + "limit": u64::MAX, + "max_depth": u64::MAX, + "pagination": u64::MAX, + }); + + let options: SearchQueryOptions = serde_json::from_value(value).unwrap(); + assert_eq!( + options, + SearchQueryOptions { + allowed_file_types: [FileType::File].into_iter().collect(), + include: Some(SearchQueryCondition::Equals { + value: String::from("hello"), + }), + exclude: Some(SearchQueryCondition::Contains { + value: String::from("world"), + }), + upward: true, + follow_symbolic_links: true, + limit: Some(u64::MAX), + max_depth: Some(u64::MAX), + pagination: Some(u64::MAX), + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_options_to_msgpack() { + let options = SearchQueryOptions { + allowed_file_types: [].into_iter().collect(), + include: None, + exclude: None, + upward: false, + follow_symbolic_links: false, + limit: None, + max_depth: None, + pagination: None, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&options).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_options_to_msgpack() { + let options = SearchQueryOptions { + allowed_file_types: [FileType::File].into_iter().collect(), + include: Some(SearchQueryCondition::Equals { + value: String::from("hello"), + }), + exclude: Some(SearchQueryCondition::Contains { + value: String::from("world"), + }), + upward: true, + follow_symbolic_links: true, + limit: Some(u64::MAX), + max_depth: Some(u64::MAX), + pagination: Some(u64::MAX), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&options).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_options_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQueryOptions { + allowed_file_types: [].into_iter().collect(), + include: None, + exclude: None, + upward: false, + follow_symbolic_links: false, + limit: None, + max_depth: None, + pagination: None, + }) + .unwrap(); + + let options: SearchQueryOptions = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + options, + SearchQueryOptions { + allowed_file_types: [].into_iter().collect(), + include: None, + exclude: None, + upward: false, + follow_symbolic_links: false, + limit: None, + max_depth: None, + pagination: None, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_options_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQueryOptions { + allowed_file_types: [FileType::File].into_iter().collect(), + include: Some(SearchQueryCondition::Equals { + value: String::from("hello"), + }), + exclude: Some(SearchQueryCondition::Contains { + value: String::from("world"), + }), + upward: true, + follow_symbolic_links: true, + limit: Some(u64::MAX), + max_depth: Some(u64::MAX), + pagination: Some(u64::MAX), + }) + .unwrap(); + + let options: SearchQueryOptions = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + options, + SearchQueryOptions { + allowed_file_types: [FileType::File].into_iter().collect(), + include: Some(SearchQueryCondition::Equals { + value: String::from("hello"), + }), + exclude: Some(SearchQueryCondition::Contains { + value: String::from("world"), + }), + upward: true, + follow_symbolic_links: true, + limit: Some(u64::MAX), + max_depth: Some(u64::MAX), + pagination: Some(u64::MAX), + } + ); + } + } + + mod search_query_match { + use super::*; + + mod for_path { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let r#match = SearchQueryMatch::Path(SearchQueryPathMatch { + path: PathBuf::from("path"), + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }); + + let value = serde_json::to_value(r#match).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "path", + "path": "path", + "submatches": [{ + "match": "text", + "start": 8, + "end": 13, + }], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "path", + "path": "path", + "submatches": [{ + "match": "text", + "start": 8, + "end": 13, + }], + }); + + let r#match: SearchQueryMatch = serde_json::from_value(value).unwrap(); + assert_eq!( + r#match, + SearchQueryMatch::Path(SearchQueryPathMatch { + path: PathBuf::from("path"), + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }) + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let r#match = SearchQueryMatch::Path(SearchQueryPathMatch { + path: PathBuf::from("path"), + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&r#match).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQueryMatch::Path( + SearchQueryPathMatch { + path: PathBuf::from("path"), + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }, + )) + .unwrap(); + + let r#match: SearchQueryMatch = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + r#match, + SearchQueryMatch::Path(SearchQueryPathMatch { + path: PathBuf::from("path"), + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }) + ); + } + } + + mod for_contents { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let r#match = SearchQueryMatch::Contents(SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: SearchQueryMatchData::Text(String::from("some text")), + line_number: 12, + absolute_offset: 24, + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }); + + let value = serde_json::to_value(r#match).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "contents", + "path": "path", + "lines": "some text", + "line_number": 12, + "absolute_offset": 24, + "submatches": [{ + "match": "text", + "start": 8, + "end": 13, + }], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "contents", + "path": "path", + "lines": "some text", + "line_number": 12, + "absolute_offset": 24, + "submatches": [{ + "match": "text", + "start": 8, + "end": 13, + }], + }); + + let r#match: SearchQueryMatch = serde_json::from_value(value).unwrap(); + assert_eq!( + r#match, + SearchQueryMatch::Contents(SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: SearchQueryMatchData::Text(String::from("some text")), + line_number: 12, + absolute_offset: 24, + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }) + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let r#match = SearchQueryMatch::Contents(SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: SearchQueryMatchData::Text(String::from("some text")), + line_number: 12, + absolute_offset: 24, + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&r#match).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQueryMatch::Contents( + SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: SearchQueryMatchData::Text(String::from("some text")), + line_number: 12, + absolute_offset: 24, + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }, + )) + .unwrap(); + + let r#match: SearchQueryMatch = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + r#match, + SearchQueryMatch::Contents(SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: SearchQueryMatchData::Text(String::from("some text")), + line_number: 12, + absolute_offset: 24, + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }) + ); + } + } + } + + mod search_query_path_match { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let r#match = SearchQueryPathMatch { + path: PathBuf::from("path"), + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }; + + let value = serde_json::to_value(r#match).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "path": "path", + "submatches": [{ + "match": "text", + "start": 8, + "end": 13, + }], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "path": "path", + "submatches": [{ + "match": "text", + "start": 8, + "end": 13, + }], + }); + + let r#match: SearchQueryPathMatch = serde_json::from_value(value).unwrap(); + assert_eq!( + r#match, + SearchQueryPathMatch { + path: PathBuf::from("path"), + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let r#match = SearchQueryPathMatch { + path: PathBuf::from("path"), + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&r#match).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQueryPathMatch { + path: PathBuf::from("path"), + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }) + .unwrap(); + + let r#match: SearchQueryPathMatch = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + r#match, + SearchQueryPathMatch { + path: PathBuf::from("path"), + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + } + ); + } + } + + mod search_query_contents_match { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let r#match = SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: SearchQueryMatchData::Text(String::from("some text")), + line_number: 12, + absolute_offset: 24, + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }; + + let value = serde_json::to_value(r#match).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "path": "path", + "lines": "some text", + "line_number": 12, + "absolute_offset": 24, + "submatches": [{ + "match": "text", + "start": 8, + "end": 13, + }], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "path": "path", + "lines": "some text", + "line_number": 12, + "absolute_offset": 24, + "submatches": [{ + "match": "text", + "start": 8, + "end": 13, + }], + }); + + let r#match: SearchQueryContentsMatch = serde_json::from_value(value).unwrap(); + assert_eq!( + r#match, + SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: SearchQueryMatchData::Text(String::from("some text")), + line_number: 12, + absolute_offset: 24, + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let r#match = SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: SearchQueryMatchData::Text(String::from("some text")), + line_number: 12, + absolute_offset: 24, + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&r#match).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: SearchQueryMatchData::Text(String::from("some text")), + line_number: 12, + absolute_offset: 24, + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + }) + .unwrap(); + + let r#match: SearchQueryContentsMatch = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + r#match, + SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: SearchQueryMatchData::Text(String::from("some text")), + line_number: 12, + absolute_offset: 24, + submatches: vec![SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("text")), + start: 8, + end: 13, + }], + } + ); + } + } + + mod search_query_submatch { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let data = SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("some text")), + start: 12, + end: 24, + }; + + let value = serde_json::to_value(data).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "match": "some text", + "start": 12, + "end": 24, + }) + ); + + let data = SearchQuerySubmatch { + r#match: SearchQueryMatchData::Bytes(vec![1, 2, 3]), + start: 12, + end: 24, + }; + + // Do the same for bytes + let value = serde_json::to_value(data).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "match": [1, 2, 3], + "start": 12, + "end": 24, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "match": "some text", + "start": 12, + "end": 24, + }); + + let submatch: SearchQuerySubmatch = serde_json::from_value(value).unwrap(); + assert_eq!( + submatch, + SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("some text")), + start: 12, + end: 24, + } + ); + + // Do the same for bytes + let value = serde_json::json!({ + "match": [1, 2, 3], + "start": 12, + "end": 24, + }); + + let submatch: SearchQuerySubmatch = serde_json::from_value(value).unwrap(); + assert_eq!( + submatch, + SearchQuerySubmatch { + r#match: SearchQueryMatchData::Bytes(vec![1, 2, 3]), + start: 12, + end: 24, + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let submatch = SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("some text")), + start: 12, + end: 24, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&submatch).unwrap(); + + // Do the same for bytes + let submatch = SearchQuerySubmatch { + r#match: SearchQueryMatchData::Bytes(vec![1, 2, 3]), + start: 12, + end: 24, + }; + + let _ = rmp_serde::encode::to_vec_named(&submatch).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("some text")), + start: 12, + end: 24, + }) + .unwrap(); + + let submatch: SearchQuerySubmatch = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + submatch, + SearchQuerySubmatch { + r#match: SearchQueryMatchData::Text(String::from("some text")), + start: 12, + end: 24, + } + ); + + // Do the same for bytes + let buf = rmp_serde::encode::to_vec_named(&SearchQuerySubmatch { + r#match: SearchQueryMatchData::Bytes(vec![1, 2, 3]), + start: 12, + end: 24, + }) + .unwrap(); + + let submatch: SearchQuerySubmatch = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + submatch, + SearchQuerySubmatch { + r#match: SearchQueryMatchData::Bytes(vec![1, 2, 3]), + start: 12, + end: 24, + } + ); + } + } + + mod search_query_match_data { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let data = SearchQueryMatchData::Text(String::from("some text")); + + let value = serde_json::to_value(data).unwrap(); + assert_eq!(value, serde_json::json!("some text")); + + // Do the same for bytes + let data = SearchQueryMatchData::Bytes(vec![1, 2, 3]); + + let value = serde_json::to_value(data).unwrap(); + assert_eq!(value, serde_json::json!([1, 2, 3])); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!("some text"); + + let data: SearchQueryMatchData = serde_json::from_value(value).unwrap(); + assert_eq!(data, SearchQueryMatchData::Text(String::from("some text"))); + + // Do the same for bytes + let value = serde_json::json!([1, 2, 3]); + + let data: SearchQueryMatchData = serde_json::from_value(value).unwrap(); + assert_eq!(data, SearchQueryMatchData::Bytes(vec![1, 2, 3])); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let data = SearchQueryMatchData::Text(String::from("some text")); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&data).unwrap(); + + // Do the same for bytes + let data = SearchQueryMatchData::Bytes(vec![1, 2, 3]); + let _ = rmp_serde::encode::to_vec_named(&data).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SearchQueryMatchData::Text(String::from( + "some text", + ))) + .unwrap(); + + let data: SearchQueryMatchData = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(data, SearchQueryMatchData::Text(String::from("some text"))); + + // Do the same for bytes + let buf = rmp_serde::encode::to_vec_named(&SearchQueryMatchData::Bytes(vec![1, 2, 3])) + .unwrap(); + + let data: SearchQueryMatchData = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(data, SearchQueryMatchData::Bytes(vec![1, 2, 3])); + } + } +} diff --git a/distant-protocol/src/common/system.rs b/distant-protocol/src/common/system.rs new file mode 100644 index 0000000..b83abd0 --- /dev/null +++ b/distant-protocol/src/common/system.rs @@ -0,0 +1,142 @@ +use std::path::PathBuf; + +use serde::{Deserialize, Serialize}; + +/// Represents information about a system +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct SystemInfo { + /// Family of the operating system as described in + /// https://doc.rust-lang.org/std/env/consts/constant.FAMILY.html + pub family: String, + + /// Name of the specific operating system as described in + /// https://doc.rust-lang.org/std/env/consts/constant.OS.html + pub os: String, + + /// Architecture of the CPI as described in + /// https://doc.rust-lang.org/std/env/consts/constant.ARCH.html + pub arch: String, + + /// Current working directory of the running server process + pub current_dir: PathBuf, + + /// Primary separator for path components for the current platform + /// as defined in https://doc.rust-lang.org/std/path/constant.MAIN_SEPARATOR.html + pub main_separator: char, + + /// Name of the user running the server process + pub username: String, + + /// Default shell tied to user running the server process + pub shell: String, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let info = SystemInfo { + family: String::from("family"), + os: String::from("os"), + arch: String::from("arch"), + current_dir: PathBuf::from("current-dir"), + main_separator: '/', + username: String::from("username"), + shell: String::from("shell"), + }; + + let value = serde_json::to_value(info).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "family": "family", + "os": "os", + "arch": "arch", + "current_dir": "current-dir", + "main_separator": '/', + "username": "username", + "shell": "shell", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "family": "family", + "os": "os", + "arch": "arch", + "current_dir": "current-dir", + "main_separator": '/', + "username": "username", + "shell": "shell", + }); + + let info: SystemInfo = serde_json::from_value(value).unwrap(); + assert_eq!( + info, + SystemInfo { + family: String::from("family"), + os: String::from("os"), + arch: String::from("arch"), + current_dir: PathBuf::from("current-dir"), + main_separator: '/', + username: String::from("username"), + shell: String::from("shell"), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let info = SystemInfo { + family: String::from("family"), + os: String::from("os"), + arch: String::from("arch"), + current_dir: PathBuf::from("current-dir"), + main_separator: '/', + username: String::from("username"), + shell: String::from("shell"), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&info).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&SystemInfo { + family: String::from("family"), + os: String::from("os"), + arch: String::from("arch"), + current_dir: PathBuf::from("current-dir"), + main_separator: '/', + username: String::from("username"), + shell: String::from("shell"), + }) + .unwrap(); + + let info: SystemInfo = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + info, + SystemInfo { + family: String::from("family"), + os: String::from("os"), + arch: String::from("arch"), + current_dir: PathBuf::from("current-dir"), + main_separator: '/', + username: String::from("username"), + shell: String::from("shell"), + } + ); + } +} diff --git a/distant-protocol/src/common/version.rs b/distant-protocol/src/common/version.rs new file mode 100644 index 0000000..92d79ca --- /dev/null +++ b/distant-protocol/src/common/version.rs @@ -0,0 +1,130 @@ +use serde::{Deserialize, Serialize}; + +use crate::common::{Capabilities, SemVer}; + +/// Represents version information. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct Version { + /// General version of server (arbitrary format) + pub server_version: String, + + /// Protocol version + pub protocol_version: SemVer, + + /// Capabilities of the server + pub capabilities: Capabilities, +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::common::Capability; + + #[test] + fn should_be_able_to_serialize_to_json() { + let version = Version { + server_version: String::from("some version"), + protocol_version: (1, 2, 3), + capabilities: [Capability { + kind: String::from("some kind"), + description: String::from("some description"), + }] + .into_iter() + .collect(), + }; + + let value = serde_json::to_value(version).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "server_version": "some version", + "protocol_version": [1, 2, 3], + "capabilities": [{ + "kind": "some kind", + "description": "some description", + }] + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "server_version": "some version", + "protocol_version": [1, 2, 3], + "capabilities": [{ + "kind": "some kind", + "description": "some description", + }] + }); + + let version: Version = serde_json::from_value(value).unwrap(); + assert_eq!( + version, + Version { + server_version: String::from("some version"), + protocol_version: (1, 2, 3), + capabilities: [Capability { + kind: String::from("some kind"), + description: String::from("some description"), + }] + .into_iter() + .collect(), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let version = Version { + server_version: String::from("some version"), + protocol_version: (1, 2, 3), + capabilities: [Capability { + kind: String::from("some kind"), + description: String::from("some description"), + }] + .into_iter() + .collect(), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&version).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Version { + server_version: String::from("some version"), + protocol_version: (1, 2, 3), + capabilities: [Capability { + kind: String::from("some kind"), + description: String::from("some description"), + }] + .into_iter() + .collect(), + }) + .unwrap(); + + let version: Version = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + version, + Version { + server_version: String::from("some version"), + protocol_version: (1, 2, 3), + capabilities: [Capability { + kind: String::from("some kind"), + description: String::from("some description"), + }] + .into_iter() + .collect(), + } + ); + } +} diff --git a/distant-protocol/src/lib.rs b/distant-protocol/src/lib.rs new file mode 100644 index 0000000..eb450e6 --- /dev/null +++ b/distant-protocol/src/lib.rs @@ -0,0 +1,17 @@ +mod common; +mod msg; +mod request; +mod response; +mod utils; + +pub use common::*; +pub use msg::*; +pub use request::*; +pub use response::*; + +/// Protocol version indicated by the tuple of (major, minor, patch). +/// +/// This is different from the crate version, which matches that of the complete suite of distant +/// crates. Rather, this verison is used to provide stability indicators when the protocol itself +/// changes across crate versions. +pub const PROTOCOL_VERSION: SemVer = (0, 1, 0); diff --git a/distant-protocol/src/msg.rs b/distant-protocol/src/msg.rs new file mode 100644 index 0000000..7e7e326 --- /dev/null +++ b/distant-protocol/src/msg.rs @@ -0,0 +1,192 @@ +use derive_more::From; +use serde::{Deserialize, Serialize}; + +/// Represents a wrapper around a message, supporting single and batch payloads. +#[derive(Clone, Debug, From, PartialEq, Eq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum Msg { + Single(T), + Batch(Vec), +} + +impl Msg { + /// Creates a new msg with a singular payload. + #[inline] + pub fn single(payload: T) -> Self { + Self::Single(payload) + } + + /// Creates a new msg with a batch payload. + pub fn batch(payloads: I) -> Self + where + I: IntoIterator, + { + Self::Batch(payloads.into_iter().collect()) + } + + /// Returns true if msg has a single payload. + #[inline] + pub fn is_single(&self) -> bool { + matches!(self, Self::Single(_)) + } + + /// Returns reference to single value if msg is single variant. + #[inline] + pub fn as_single(&self) -> Option<&T> { + match self { + Self::Single(x) => Some(x), + _ => None, + } + } + + /// Returns mutable reference to single value if msg is single variant. + #[inline] + pub fn as_mut_single(&mut self) -> Option<&T> { + match self { + Self::Single(x) => Some(x), + _ => None, + } + } + + /// Returns the single value if msg is single variant. + #[inline] + pub fn into_single(self) -> Option { + match self { + Self::Single(x) => Some(x), + _ => None, + } + } + + /// Returns true if msg has a batch of payloads. + #[inline] + pub fn is_batch(&self) -> bool { + matches!(self, Self::Batch(_)) + } + + /// Returns reference to batch value if msg is batch variant. + #[inline] + pub fn as_batch(&self) -> Option<&[T]> { + match self { + Self::Batch(x) => Some(x), + _ => None, + } + } + + /// Returns mutable reference to batch value if msg is batch variant. + #[inline] + pub fn as_mut_batch(&mut self) -> Option<&mut [T]> { + match self { + Self::Batch(x) => Some(x), + _ => None, + } + } + + /// Returns the batch value if msg is batch variant. + #[inline] + pub fn into_batch(self) -> Option> { + match self { + Self::Batch(x) => Some(x), + _ => None, + } + } + + /// Convert into a collection of payload data. + #[inline] + pub fn into_vec(self) -> Vec { + match self { + Self::Single(x) => vec![x], + Self::Batch(x) => x, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + mod single { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let msg = Msg::single("hello world"); + + let value = serde_json::to_value(msg).unwrap(); + assert_eq!(value, serde_json::json!("hello world")); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!("hello world"); + + let msg: Msg = serde_json::from_value(value).unwrap(); + assert_eq!(msg, Msg::single(String::from("hello world"))); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let msg = Msg::single("hello world"); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&msg).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Msg::single("hello world")).unwrap(); + + let msg: Msg = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(msg, Msg::single(String::from("hello world"))); + } + } + + mod batch { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let msg = Msg::batch(["hello world"]); + + let value = serde_json::to_value(msg).unwrap(); + assert_eq!(value, serde_json::json!(["hello world"])); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!(["hello world"]); + + let msg: Msg = serde_json::from_value(value).unwrap(); + assert_eq!(msg, Msg::batch([String::from("hello world")])); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let msg = Msg::batch(["hello world"]); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&msg).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Msg::batch(["hello world"])).unwrap(); + + let msg: Msg = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(msg, Msg::batch([String::from("hello world")])); + } + } +} diff --git a/distant-protocol/src/request.rs b/distant-protocol/src/request.rs new file mode 100644 index 0000000..cf38976 --- /dev/null +++ b/distant-protocol/src/request.rs @@ -0,0 +1,2981 @@ +use std::collections::HashMap; +use std::path::PathBuf; + +use derive_more::IsVariant; +use serde::{Deserialize, Serialize}; +use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString}; + +use crate::common::{ + ChangeKind, Cmd, Permissions, ProcessId, PtySize, SearchId, SearchQuery, SetPermissionsOptions, +}; +use crate::utils; + +/// Mapping of environment variables +pub type Environment = HashMap; + +/// Represents the payload of a request to be performed on the remote machine +#[derive(Clone, Debug, PartialEq, Eq, EnumDiscriminants, IsVariant, Serialize, Deserialize)] +#[strum_discriminants(derive( + AsRefStr, + strum::Display, + EnumIter, + EnumMessage, + EnumString, + Hash, + PartialOrd, + Ord, + IsVariant, + Serialize, + Deserialize +))] +#[strum_discriminants(name(RequestKind))] +#[strum_discriminants(strum(serialize_all = "snake_case"))] +#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")] +pub enum Request { + /// Reads a file from the specified path on the remote machine + #[strum_discriminants(strum(message = "Supports reading binary file"))] + FileRead { + /// The path to the file on the remote machine + path: PathBuf, + }, + + /// Reads a file from the specified path on the remote machine + /// and treats the contents as text + #[strum_discriminants(strum(message = "Supports reading text file"))] + FileReadText { + /// The path to the file on the remote machine + path: PathBuf, + }, + + /// Writes a file, creating it if it does not exist, and overwriting any existing content + /// on the remote machine + #[strum_discriminants(strum(message = "Supports writing binary file"))] + FileWrite { + /// The path to the file on the remote machine + path: PathBuf, + + /// Data for server-side writing of content + #[serde(with = "serde_bytes")] + data: Vec, + }, + + /// Writes a file using text instead of bytes, creating it if it does not exist, + /// and overwriting any existing content on the remote machine + #[strum_discriminants(strum(message = "Supports writing text file"))] + FileWriteText { + /// The path to the file on the remote machine + path: PathBuf, + + /// Data for server-side writing of content + text: String, + }, + + /// Appends to a file, creating it if it does not exist, on the remote machine + #[strum_discriminants(strum(message = "Supports appending to binary file"))] + FileAppend { + /// The path to the file on the remote machine + path: PathBuf, + + /// Data for server-side writing of content + #[serde(with = "serde_bytes")] + data: Vec, + }, + + /// Appends text to a file, creating it if it does not exist, on the remote machine + #[strum_discriminants(strum(message = "Supports appending to text file"))] + FileAppendText { + /// The path to the file on the remote machine + path: PathBuf, + + /// Data for server-side writing of content + text: String, + }, + + /// Reads a directory from the specified path on the remote machine + #[strum_discriminants(strum(message = "Supports reading directory"))] + DirRead { + /// The path to the directory on the remote machine + path: PathBuf, + + /// Maximum depth to traverse with 0 indicating there is no maximum + /// depth and 1 indicating the most immediate children within the + /// directory + #[serde(default = "utils::one", skip_serializing_if = "utils::is_one")] + depth: usize, + + /// Whether or not to return absolute or relative paths + #[serde(default, skip_serializing_if = "utils::is_false")] + absolute: bool, + + /// Whether or not to canonicalize the resulting paths, meaning + /// returning the canonical, absolute form of a path with all + /// intermediate components normalized and symbolic links resolved + /// + /// Note that the flag absolute must be true to have absolute paths + /// returned, even if canonicalize is flagged as true + #[serde(default, skip_serializing_if = "utils::is_false")] + canonicalize: bool, + + /// Whether or not to include the root directory in the retrieved + /// entries + /// + /// If included, the root directory will also be a canonicalized, + /// absolute path and will not follow any of the other flags + #[serde(default, skip_serializing_if = "utils::is_false")] + include_root: bool, + }, + + /// Creates a directory on the remote machine + #[strum_discriminants(strum(message = "Supports creating directory"))] + DirCreate { + /// The path to the directory on the remote machine + path: PathBuf, + + /// Whether or not to create all parent directories + #[serde(default, skip_serializing_if = "utils::is_false")] + all: bool, + }, + + /// Removes a file or directory on the remote machine + #[strum_discriminants(strum(message = "Supports removing files, directories, and symlinks"))] + Remove { + /// The path to the file or directory on the remote machine + path: PathBuf, + + /// Whether or not to remove all contents within directory if is a directory. + /// Does nothing different for files + #[serde(default, skip_serializing_if = "utils::is_false")] + force: bool, + }, + + /// Copies a file or directory on the remote machine + #[strum_discriminants(strum(message = "Supports copying files, directories, and symlinks"))] + Copy { + /// The path to the file or directory on the remote machine + src: PathBuf, + + /// New location on the remote machine for copy of file or directory + dst: PathBuf, + }, + + /// Moves/renames a file or directory on the remote machine + #[strum_discriminants(strum(message = "Supports renaming files, directories, and symlinks"))] + Rename { + /// The path to the file or directory on the remote machine + src: PathBuf, + + /// New location on the remote machine for the file or directory + dst: PathBuf, + }, + + /// Watches a path for changes + #[strum_discriminants(strum(message = "Supports watching filesystem for changes"))] + Watch { + /// The path to the file, directory, or symlink on the remote machine + path: PathBuf, + + /// If true, will recursively watch for changes within directories, othewise + /// will only watch for changes immediately within directories + #[serde(default, skip_serializing_if = "utils::is_false")] + recursive: bool, + + /// Filter to only report back specified changes + #[serde(default, skip_serializing_if = "Vec::is_empty")] + only: Vec, + + /// Filter to report back changes except these specified changes + #[serde(default, skip_serializing_if = "Vec::is_empty")] + except: Vec, + }, + + /// Unwatches a path for changes, meaning no additional changes will be reported + #[strum_discriminants(strum(message = "Supports unwatching filesystem for changes"))] + Unwatch { + /// The path to the file, directory, or symlink on the remote machine + path: PathBuf, + }, + + /// Checks whether the given path exists + #[strum_discriminants(strum(message = "Supports checking if a path exists"))] + Exists { + /// The path to the file or directory on the remote machine + path: PathBuf, + }, + + /// Retrieves filesystem metadata for the specified path on the remote machine + #[strum_discriminants(strum( + message = "Supports retrieving metadata about a file, directory, or symlink" + ))] + Metadata { + /// The path to the file, directory, or symlink on the remote machine + path: PathBuf, + + /// Whether or not to include a canonicalized version of the path, meaning + /// returning the canonical, absolute form of a path with all + /// intermediate components normalized and symbolic links resolved + #[serde(default, skip_serializing_if = "utils::is_false")] + canonicalize: bool, + + /// Whether or not to follow symlinks to determine absolute file type (dir/file) + #[serde(default, skip_serializing_if = "utils::is_false")] + resolve_file_type: bool, + }, + + /// Sets permissions on a file, directory, or symlink on the remote machine + #[strum_discriminants(strum( + message = "Supports setting permissions on a file, directory, or symlink" + ))] + SetPermissions { + /// The path to the file, directory, or symlink on the remote machine + path: PathBuf, + + /// New permissions to apply to the file, directory, or symlink + permissions: Permissions, + + /// Additional options to supply when setting permissions + #[serde(default)] + options: SetPermissionsOptions, + }, + + /// Searches filesystem using the provided query + #[strum_discriminants(strum(message = "Supports searching filesystem using queries"))] + Search { + /// Query to perform against the filesystem + query: SearchQuery, + }, + + /// Cancels an active search being run against the filesystem + #[strum_discriminants(strum( + message = "Supports canceling an active search against the filesystem" + ))] + CancelSearch { + /// Id of the search to cancel + id: SearchId, + }, + + /// Spawns a new process on the remote machine + #[strum_discriminants(strum(message = "Supports spawning a process"))] + ProcSpawn { + /// The full command to run including arguments + cmd: Cmd, + + /// Environment to provide to the remote process + #[serde(default, skip_serializing_if = "HashMap::is_empty")] + environment: Environment, + + /// Alternative current directory for the remote process + #[serde(default, skip_serializing_if = "Option::is_none")] + current_dir: Option, + + /// If provided, will spawn process in a pty, otherwise spawns directly + #[serde(default, skip_serializing_if = "Option::is_none")] + pty: Option, + }, + + /// Kills a process running on the remote machine + #[strum_discriminants(strum(message = "Supports killing a spawned process"))] + ProcKill { + /// Id of the actively-running process + id: ProcessId, + }, + + /// Sends additional data to stdin of running process + #[strum_discriminants(strum(message = "Supports sending stdin to a spawned process"))] + ProcStdin { + /// Id of the actively-running process to send stdin data + id: ProcessId, + + /// Data to send to a process's stdin pipe + #[serde(with = "serde_bytes")] + data: Vec, + }, + + /// Resize pty of remote process + #[strum_discriminants(strum(message = "Supports resizing the pty of a spawned process"))] + ProcResizePty { + /// Id of the actively-running process whose pty to resize + id: ProcessId, + + /// The new pty dimensions + size: PtySize, + }, + + /// Retrieve information about the server and the system it is on + #[strum_discriminants(strum(message = "Supports retrieving system information"))] + SystemInfo {}, + + /// Retrieve information about the server's protocol version + #[strum_discriminants(strum(message = "Supports retrieving version"))] + Version {}, +} + +#[cfg(test)] +mod tests { + use super::*; + + mod file_read { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::FileRead { + path: PathBuf::from("path"), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "file_read", + "path": "path", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "file_read", + "path": "path", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::FileRead { + path: PathBuf::from("path"), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::FileRead { + path: PathBuf::from("path"), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::FileRead { + path: PathBuf::from("path"), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::FileRead { + path: PathBuf::from("path"), + } + ); + } + } + + mod file_read_text { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::FileReadText { + path: PathBuf::from("path"), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "file_read_text", + "path": "path", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "file_read_text", + "path": "path", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::FileReadText { + path: PathBuf::from("path"), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::FileReadText { + path: PathBuf::from("path"), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::FileReadText { + path: PathBuf::from("path"), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::FileReadText { + path: PathBuf::from("path"), + } + ); + } + } + + mod file_write { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::FileWrite { + path: PathBuf::from("path"), + data: vec![0, 1, 2, u8::MAX], + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "file_write", + "path": "path", + "data": [0, 1, 2, u8::MAX], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "file_write", + "path": "path", + "data": [0, 1, 2, u8::MAX], + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::FileWrite { + path: PathBuf::from("path"), + data: vec![0, 1, 2, u8::MAX], + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::FileWrite { + path: PathBuf::from("path"), + data: vec![0, 1, 2, u8::MAX], + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::FileWrite { + path: PathBuf::from("path"), + data: vec![0, 1, 2, u8::MAX], + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::FileWrite { + path: PathBuf::from("path"), + data: vec![0, 1, 2, u8::MAX], + } + ); + } + } + + mod file_write_text { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::FileWriteText { + path: PathBuf::from("path"), + text: String::from("text"), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "file_write_text", + "path": "path", + "text": "text", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "file_write_text", + "path": "path", + "text": "text", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::FileWriteText { + path: PathBuf::from("path"), + text: String::from("text"), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::FileWriteText { + path: PathBuf::from("path"), + text: String::from("text"), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::FileWriteText { + path: PathBuf::from("path"), + text: String::from("text"), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::FileWriteText { + path: PathBuf::from("path"), + text: String::from("text"), + } + ); + } + } + + mod file_append { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::FileAppend { + path: PathBuf::from("path"), + data: vec![0, 1, 2, u8::MAX], + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "file_append", + "path": "path", + "data": [0, 1, 2, u8::MAX], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "file_append", + "path": "path", + "data": [0, 1, 2, u8::MAX], + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::FileAppend { + path: PathBuf::from("path"), + data: vec![0, 1, 2, u8::MAX], + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::FileAppend { + path: PathBuf::from("path"), + data: vec![0, 1, 2, u8::MAX], + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::FileAppend { + path: PathBuf::from("path"), + data: vec![0, 1, 2, u8::MAX], + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::FileAppend { + path: PathBuf::from("path"), + data: vec![0, 1, 2, u8::MAX], + } + ); + } + } + + mod file_append_text { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::FileAppendText { + path: PathBuf::from("path"), + text: String::from("text"), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "file_append_text", + "path": "path", + "text": "text", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "file_append_text", + "path": "path", + "text": "text", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::FileAppendText { + path: PathBuf::from("path"), + text: String::from("text"), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::FileAppendText { + path: PathBuf::from("path"), + text: String::from("text"), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::FileAppendText { + path: PathBuf::from("path"), + text: String::from("text"), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::FileAppendText { + path: PathBuf::from("path"), + text: String::from("text"), + } + ); + } + } + + mod dir_read { + use super::*; + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Request::DirRead { + path: PathBuf::from("path"), + depth: 1, + absolute: false, + canonicalize: false, + include_root: false, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "dir_read", + "path": "path", + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Request::DirRead { + path: PathBuf::from("path"), + depth: usize::MAX, + absolute: true, + canonicalize: true, + include_root: true, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "dir_read", + "path": "path", + "depth": usize::MAX, + "absolute": true, + "canonicalize": true, + "include_root": true, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "dir_read", + "path": "path", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::DirRead { + path: PathBuf::from("path"), + depth: 1, + absolute: false, + canonicalize: false, + include_root: false, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let value = serde_json::json!({ + "type": "dir_read", + "path": "path", + "depth": usize::MAX, + "absolute": true, + "canonicalize": true, + "include_root": true, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::DirRead { + path: PathBuf::from("path"), + depth: usize::MAX, + absolute: true, + canonicalize: true, + include_root: true, + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Request::DirRead { + path: PathBuf::from("path"), + depth: 1, + absolute: false, + canonicalize: false, + include_root: false, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Request::DirRead { + path: PathBuf::from("path"), + depth: usize::MAX, + absolute: true, + canonicalize: true, + include_root: true, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::DirRead { + path: PathBuf::from("path"), + depth: 1, + absolute: false, + canonicalize: false, + include_root: false, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::DirRead { + path: PathBuf::from("path"), + depth: 1, + absolute: false, + canonicalize: false, + include_root: false, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::DirRead { + path: PathBuf::from("path"), + depth: usize::MAX, + absolute: true, + canonicalize: true, + include_root: true, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::DirRead { + path: PathBuf::from("path"), + depth: usize::MAX, + absolute: true, + canonicalize: true, + include_root: true, + } + ); + } + } + + mod dir_create { + use super::*; + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Request::DirCreate { + path: PathBuf::from("path"), + all: false, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "dir_create", + "path": "path", + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Request::DirCreate { + path: PathBuf::from("path"), + all: true, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "dir_create", + "path": "path", + "all": true, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "dir_create", + "path": "path", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::DirCreate { + path: PathBuf::from("path"), + all: false, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let value = serde_json::json!({ + "type": "dir_create", + "path": "path", + "all": true, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::DirCreate { + path: PathBuf::from("path"), + all: true, + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Request::DirCreate { + path: PathBuf::from("path"), + all: false, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Request::DirCreate { + path: PathBuf::from("path"), + all: true, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::DirCreate { + path: PathBuf::from("path"), + all: false, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::DirCreate { + path: PathBuf::from("path"), + all: false, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::DirCreate { + path: PathBuf::from("path"), + all: true, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::DirCreate { + path: PathBuf::from("path"), + all: true, + } + ); + } + } + + mod remove { + use super::*; + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Request::Remove { + path: PathBuf::from("path"), + force: false, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "remove", + "path": "path", + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Request::Remove { + path: PathBuf::from("path"), + force: true, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "remove", + "path": "path", + "force": true, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "remove", + "path": "path", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Remove { + path: PathBuf::from("path"), + force: false, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let value = serde_json::json!({ + "type": "remove", + "path": "path", + "force": true, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Remove { + path: PathBuf::from("path"), + force: true, + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Request::Remove { + path: PathBuf::from("path"), + force: false, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Request::Remove { + path: PathBuf::from("path"), + force: true, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Remove { + path: PathBuf::from("path"), + force: false, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Remove { + path: PathBuf::from("path"), + force: false, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Remove { + path: PathBuf::from("path"), + force: true, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Remove { + path: PathBuf::from("path"), + force: true, + } + ); + } + } + + mod copy { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::Copy { + src: PathBuf::from("src"), + dst: PathBuf::from("dst"), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "copy", + "src": "src", + "dst": "dst", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "copy", + "src": "src", + "dst": "dst", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Copy { + src: PathBuf::from("src"), + dst: PathBuf::from("dst"), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::Copy { + src: PathBuf::from("src"), + dst: PathBuf::from("dst"), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Copy { + src: PathBuf::from("src"), + dst: PathBuf::from("dst"), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Copy { + src: PathBuf::from("src"), + dst: PathBuf::from("dst"), + } + ); + } + } + + mod rename { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::Rename { + src: PathBuf::from("src"), + dst: PathBuf::from("dst"), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "rename", + "src": "src", + "dst": "dst", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "rename", + "src": "src", + "dst": "dst", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Rename { + src: PathBuf::from("src"), + dst: PathBuf::from("dst"), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::Rename { + src: PathBuf::from("src"), + dst: PathBuf::from("dst"), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Rename { + src: PathBuf::from("src"), + dst: PathBuf::from("dst"), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Rename { + src: PathBuf::from("src"), + dst: PathBuf::from("dst"), + } + ); + } + } + + mod watch { + use super::*; + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Request::Watch { + path: PathBuf::from("path"), + recursive: false, + only: Vec::new(), + except: Vec::new(), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "watch", + "path": "path", + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Request::Watch { + path: PathBuf::from("path"), + recursive: true, + only: vec![ChangeKind::Access], + except: vec![ChangeKind::Modify], + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "watch", + "path": "path", + "recursive": true, + "only": ["access"], + "except": ["modify"], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "watch", + "path": "path", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Watch { + path: PathBuf::from("path"), + recursive: false, + only: Vec::new(), + except: Vec::new(), + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let value = serde_json::json!({ + "type": "watch", + "path": "path", + "recursive": true, + "only": ["access"], + "except": ["modify"], + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Watch { + path: PathBuf::from("path"), + recursive: true, + only: vec![ChangeKind::Access], + except: vec![ChangeKind::Modify], + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Request::Watch { + path: PathBuf::from("path"), + recursive: false, + only: Vec::new(), + except: Vec::new(), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Request::Watch { + path: PathBuf::from("path"), + recursive: true, + only: vec![ChangeKind::Access], + except: vec![ChangeKind::Modify], + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Watch { + path: PathBuf::from("path"), + recursive: false, + only: Vec::new(), + except: Vec::new(), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Watch { + path: PathBuf::from("path"), + recursive: false, + only: Vec::new(), + except: Vec::new(), + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Watch { + path: PathBuf::from("path"), + recursive: true, + only: vec![ChangeKind::Access], + except: vec![ChangeKind::Modify], + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Watch { + path: PathBuf::from("path"), + recursive: true, + only: vec![ChangeKind::Access], + except: vec![ChangeKind::Modify], + } + ); + } + } + + mod unwatch { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::Unwatch { + path: PathBuf::from("path"), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "unwatch", + "path": "path", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "unwatch", + "path": "path", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Unwatch { + path: PathBuf::from("path") + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::Unwatch { + path: PathBuf::from("path"), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Unwatch { + path: PathBuf::from("path"), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Unwatch { + path: PathBuf::from("path"), + } + ); + } + } + + mod exists { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::Exists { + path: PathBuf::from("path"), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "exists", + "path": "path", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "exists", + "path": "path", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Exists { + path: PathBuf::from("path") + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::Exists { + path: PathBuf::from("path"), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Exists { + path: PathBuf::from("path"), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Exists { + path: PathBuf::from("path"), + } + ); + } + } + + mod metadata { + use super::*; + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Request::Metadata { + path: PathBuf::from("path"), + canonicalize: false, + resolve_file_type: false, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "metadata", + "path": "path", + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Request::Metadata { + path: PathBuf::from("path"), + canonicalize: true, + resolve_file_type: true, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "metadata", + "path": "path", + "canonicalize": true, + "resolve_file_type": true, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "metadata", + "path": "path", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Metadata { + path: PathBuf::from("path"), + canonicalize: false, + resolve_file_type: false, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let value = serde_json::json!({ + "type": "metadata", + "path": "path", + "canonicalize": true, + "resolve_file_type": true, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Metadata { + path: PathBuf::from("path"), + canonicalize: true, + resolve_file_type: true, + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Request::Metadata { + path: PathBuf::from("path"), + canonicalize: false, + resolve_file_type: false, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Request::Metadata { + path: PathBuf::from("path"), + canonicalize: true, + resolve_file_type: true, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Metadata { + path: PathBuf::from("path"), + canonicalize: false, + resolve_file_type: false, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Metadata { + path: PathBuf::from("path"), + canonicalize: false, + resolve_file_type: false, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Metadata { + path: PathBuf::from("path"), + canonicalize: true, + resolve_file_type: true, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Metadata { + path: PathBuf::from("path"), + canonicalize: true, + resolve_file_type: true, + } + ); + } + } + + mod set_permissions { + use super::*; + + const fn full_permissions() -> Permissions { + Permissions { + owner_read: Some(true), + owner_write: Some(true), + owner_exec: Some(true), + group_read: Some(true), + group_write: Some(true), + group_exec: Some(true), + other_read: Some(true), + other_write: Some(true), + other_exec: Some(true), + } + } + + const fn full_options() -> SetPermissionsOptions { + SetPermissionsOptions { + exclude_symlinks: true, + follow_symlinks: true, + recursive: true, + } + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Request::SetPermissions { + path: PathBuf::from("path"), + permissions: Default::default(), + options: Default::default(), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "set_permissions", + "path": "path", + "permissions": {}, + "options": {}, + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Request::SetPermissions { + path: PathBuf::from("path"), + permissions: full_permissions(), + options: full_options(), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "set_permissions", + "path": "path", + "permissions": { + "owner_read": true, + "owner_write": true, + "owner_exec": true, + "group_read": true, + "group_write": true, + "group_exec": true, + "other_read": true, + "other_write": true, + "other_exec": true, + }, + "options": { + "exclude_symlinks": true, + "follow_symlinks": true, + "recursive": true, + }, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "set_permissions", + "path": "path", + "permissions": {}, + "options": {}, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::SetPermissions { + path: PathBuf::from("path"), + permissions: Default::default(), + options: Default::default(), + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let value = serde_json::json!({ + "type": "set_permissions", + "path": "path", + "permissions": { + "owner_read": true, + "owner_write": true, + "owner_exec": true, + "group_read": true, + "group_write": true, + "group_exec": true, + "other_read": true, + "other_write": true, + "other_exec": true, + }, + "options": { + "exclude_symlinks": true, + "follow_symlinks": true, + "recursive": true, + }, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::SetPermissions { + path: PathBuf::from("path"), + permissions: full_permissions(), + options: full_options(), + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Request::SetPermissions { + path: PathBuf::from("path"), + permissions: Default::default(), + options: Default::default(), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Request::SetPermissions { + path: PathBuf::from("path"), + permissions: full_permissions(), + options: full_options(), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::SetPermissions { + path: PathBuf::from("path"), + permissions: Default::default(), + options: Default::default(), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::SetPermissions { + path: PathBuf::from("path"), + permissions: Default::default(), + options: Default::default(), + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::SetPermissions { + path: PathBuf::from("path"), + permissions: full_permissions(), + options: full_options(), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::SetPermissions { + path: PathBuf::from("path"), + permissions: full_permissions(), + options: full_options(), + } + ); + } + } + + mod search { + use super::*; + use crate::common::{ + FileType, SearchQueryCondition, SearchQueryOptions, SearchQueryTarget, + }; + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Request::Search { + query: SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path")], + options: Default::default(), + }, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "search", + "query": { + "target": "contents", + "condition": { + "type": "equals", + "value": "hello world", + }, + "paths": ["path"], + "options": {}, + }, + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Request::Search { + query: SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path")], + options: SearchQueryOptions { + allowed_file_types: [FileType::File].into_iter().collect(), + include: Some(SearchQueryCondition::Equals { + value: String::from("hello"), + }), + exclude: Some(SearchQueryCondition::Contains { + value: String::from("world"), + }), + upward: true, + follow_symbolic_links: true, + limit: Some(u64::MAX), + max_depth: Some(u64::MAX), + pagination: Some(u64::MAX), + }, + }, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "search", + "query": { + "target": "contents", + "condition": { + "type": "equals", + "value": "hello world", + }, + "paths": ["path"], + "options": { + "allowed_file_types": ["file"], + "include": { + "type": "equals", + "value": "hello", + }, + "exclude": { + "type": "contains", + "value": "world", + }, + "upward": true, + "follow_symbolic_links": true, + "limit": u64::MAX, + "max_depth": u64::MAX, + "pagination": u64::MAX, + }, + }, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "search", + "query": { + "target": "contents", + "condition": { + "type": "equals", + "value": "hello world", + }, + "paths": ["path"], + }, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Search { + query: SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path")], + options: Default::default(), + }, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let value = serde_json::json!({ + "type": "search", + "query": { + "target": "contents", + "condition": { + "type": "equals", + "value": "hello world", + }, + "paths": ["path"], + "options": { + "allowed_file_types": ["file"], + "include": { + "type": "equals", + "value": "hello", + }, + "exclude": { + "type": "contains", + "value": "world", + }, + "upward": true, + "follow_symbolic_links": true, + "limit": u64::MAX, + "max_depth": u64::MAX, + "pagination": u64::MAX, + }, + }, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::Search { + query: SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path")], + options: SearchQueryOptions { + allowed_file_types: [FileType::File].into_iter().collect(), + include: Some(SearchQueryCondition::Equals { + value: String::from("hello"), + }), + exclude: Some(SearchQueryCondition::Contains { + value: String::from("world"), + }), + upward: true, + follow_symbolic_links: true, + limit: Some(u64::MAX), + max_depth: Some(u64::MAX), + pagination: Some(u64::MAX), + }, + }, + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Request::Search { + query: SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path")], + options: Default::default(), + }, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Request::Search { + query: SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path")], + options: SearchQueryOptions { + allowed_file_types: [FileType::File].into_iter().collect(), + include: Some(SearchQueryCondition::Equals { + value: String::from("hello"), + }), + exclude: Some(SearchQueryCondition::Contains { + value: String::from("world"), + }), + upward: true, + follow_symbolic_links: true, + limit: Some(u64::MAX), + max_depth: Some(u64::MAX), + pagination: Some(u64::MAX), + }, + }, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Search { + query: SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path")], + options: Default::default(), + }, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Search { + query: SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path")], + options: Default::default(), + }, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Search { + query: SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path")], + options: SearchQueryOptions { + allowed_file_types: [FileType::File].into_iter().collect(), + include: Some(SearchQueryCondition::Equals { + value: String::from("hello"), + }), + exclude: Some(SearchQueryCondition::Contains { + value: String::from("world"), + }), + upward: true, + follow_symbolic_links: true, + limit: Some(u64::MAX), + max_depth: Some(u64::MAX), + pagination: Some(u64::MAX), + }, + }, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::Search { + query: SearchQuery { + target: SearchQueryTarget::Contents, + condition: SearchQueryCondition::equals("hello world"), + paths: vec![PathBuf::from("path")], + options: SearchQueryOptions { + allowed_file_types: [FileType::File].into_iter().collect(), + include: Some(SearchQueryCondition::Equals { + value: String::from("hello"), + }), + exclude: Some(SearchQueryCondition::Contains { + value: String::from("world"), + }), + upward: true, + follow_symbolic_links: true, + limit: Some(u64::MAX), + max_depth: Some(u64::MAX), + pagination: Some(u64::MAX), + }, + }, + } + ); + } + } + + mod cancel_search { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::CancelSearch { id: u32::MAX }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "cancel_search", + "id": u32::MAX, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "cancel_search", + "id": u32::MAX, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!(payload, Request::CancelSearch { id: u32::MAX }); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::CancelSearch { id: u32::MAX }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = + rmp_serde::encode::to_vec_named(&Request::CancelSearch { id: u32::MAX }).unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(payload, Request::CancelSearch { id: u32::MAX }); + } + } + + mod proc_spawn { + use super::*; + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Request::ProcSpawn { + cmd: Cmd::new("echo some text"), + environment: Environment::new(), + current_dir: None, + pty: None, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "proc_spawn", + "cmd": "echo some text", + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Request::ProcSpawn { + cmd: Cmd::new("echo some text"), + environment: [(String::from("hello"), String::from("world"))] + .into_iter() + .collect(), + current_dir: Some(PathBuf::from("current-dir")), + pty: Some(PtySize { + rows: u16::MAX, + cols: u16::MAX, + pixel_width: u16::MAX, + pixel_height: u16::MAX, + }), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "proc_spawn", + "cmd": "echo some text", + "environment": { "hello": "world" }, + "current_dir": "current-dir", + "pty": { + "rows": u16::MAX, + "cols": u16::MAX, + "pixel_width": u16::MAX, + "pixel_height": u16::MAX, + }, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "proc_spawn", + "cmd": "echo some text", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::ProcSpawn { + cmd: Cmd::new("echo some text"), + environment: Environment::new(), + current_dir: None, + pty: None, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let value = serde_json::json!({ + "type": "proc_spawn", + "cmd": "echo some text", + "environment": { "hello": "world" }, + "current_dir": "current-dir", + "pty": { + "rows": u16::MAX, + "cols": u16::MAX, + "pixel_width": u16::MAX, + "pixel_height": u16::MAX, + }, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::ProcSpawn { + cmd: Cmd::new("echo some text"), + environment: [(String::from("hello"), String::from("world"))] + .into_iter() + .collect(), + current_dir: Some(PathBuf::from("current-dir")), + pty: Some(PtySize { + rows: u16::MAX, + cols: u16::MAX, + pixel_width: u16::MAX, + pixel_height: u16::MAX, + }), + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Request::ProcSpawn { + cmd: Cmd::new("echo some text"), + environment: Environment::new(), + current_dir: None, + pty: None, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Request::ProcSpawn { + cmd: Cmd::new("echo some text"), + environment: [(String::from("hello"), String::from("world"))] + .into_iter() + .collect(), + current_dir: Some(PathBuf::from("current-dir")), + pty: Some(PtySize { + rows: u16::MAX, + cols: u16::MAX, + pixel_width: u16::MAX, + pixel_height: u16::MAX, + }), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::ProcSpawn { + cmd: Cmd::new("echo some text"), + environment: Environment::new(), + current_dir: None, + pty: None, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::ProcSpawn { + cmd: Cmd::new("echo some text"), + environment: Environment::new(), + current_dir: None, + pty: None, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::ProcSpawn { + cmd: Cmd::new("echo some text"), + environment: [(String::from("hello"), String::from("world"))] + .into_iter() + .collect(), + current_dir: Some(PathBuf::from("current-dir")), + pty: Some(PtySize { + rows: u16::MAX, + cols: u16::MAX, + pixel_width: u16::MAX, + pixel_height: u16::MAX, + }), + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::ProcSpawn { + cmd: Cmd::new("echo some text"), + environment: [(String::from("hello"), String::from("world"))] + .into_iter() + .collect(), + current_dir: Some(PathBuf::from("current-dir")), + pty: Some(PtySize { + rows: u16::MAX, + cols: u16::MAX, + pixel_width: u16::MAX, + pixel_height: u16::MAX, + }), + } + ); + } + } + + mod proc_kill { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::ProcKill { id: u32::MAX }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "proc_kill", + "id": u32::MAX, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "proc_kill", + "id": u32::MAX, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!(payload, Request::ProcKill { id: u32::MAX }); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::ProcKill { id: u32::MAX }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::ProcKill { id: u32::MAX }).unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(payload, Request::ProcKill { id: u32::MAX }); + } + } + + mod proc_stdin { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::ProcStdin { + id: u32::MAX, + data: vec![0, 1, 2, 3, u8::MAX], + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "proc_stdin", + "id": u32::MAX, + "data": [0, 1, 2, 3, u8::MAX], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "proc_stdin", + "id": u32::MAX, + "data": [0, 1, 2, 3, u8::MAX], + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::ProcStdin { + id: u32::MAX, + data: vec![0, 1, 2, 3, u8::MAX], + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::ProcStdin { + id: u32::MAX, + data: vec![0, 1, 2, 3, u8::MAX], + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::ProcStdin { + id: u32::MAX, + data: vec![0, 1, 2, 3, u8::MAX], + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::ProcStdin { + id: u32::MAX, + data: vec![0, 1, 2, 3, u8::MAX], + } + ); + } + } + + mod proc_resize_pty { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::ProcResizePty { + id: u32::MAX, + size: PtySize { + rows: u16::MAX, + cols: u16::MAX, + pixel_width: u16::MAX, + pixel_height: u16::MAX, + }, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "proc_resize_pty", + "id": u32::MAX, + "size": { + "rows": u16::MAX, + "cols": u16::MAX, + "pixel_width": u16::MAX, + "pixel_height": u16::MAX, + }, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "proc_resize_pty", + "id": u32::MAX, + "size": { + "rows": u16::MAX, + "cols": u16::MAX, + "pixel_width": u16::MAX, + "pixel_height": u16::MAX, + }, + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Request::ProcResizePty { + id: u32::MAX, + size: PtySize { + rows: u16::MAX, + cols: u16::MAX, + pixel_width: u16::MAX, + pixel_height: u16::MAX, + }, + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::ProcResizePty { + id: u32::MAX, + size: PtySize { + rows: u16::MAX, + cols: u16::MAX, + pixel_width: u16::MAX, + pixel_height: u16::MAX, + }, + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::ProcResizePty { + id: u32::MAX, + size: PtySize { + rows: u16::MAX, + cols: u16::MAX, + pixel_width: u16::MAX, + pixel_height: u16::MAX, + }, + }) + .unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Request::ProcResizePty { + id: u32::MAX, + size: PtySize { + rows: u16::MAX, + cols: u16::MAX, + pixel_width: u16::MAX, + pixel_height: u16::MAX, + }, + } + ); + } + } + + mod system_info { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::SystemInfo {}; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "system_info", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "system_info", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!(payload, Request::SystemInfo {}); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::SystemInfo {}; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::SystemInfo {}).unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(payload, Request::SystemInfo {}); + } + } + + mod version { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Request::Version {}; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "version", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "version", + }); + + let payload: Request = serde_json::from_value(value).unwrap(); + assert_eq!(payload, Request::Version {}); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Request::Version {}; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Request::Version {}).unwrap(); + + let payload: Request = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(payload, Request::Version {}); + } + } +} diff --git a/distant-protocol/src/response.rs b/distant-protocol/src/response.rs new file mode 100644 index 0000000..a0d9e88 --- /dev/null +++ b/distant-protocol/src/response.rs @@ -0,0 +1,1973 @@ +use std::io; + +use derive_more::IsVariant; +use serde::{Deserialize, Serialize}; +use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString}; + +use crate::common::{ + Change, DirEntry, Error, Metadata, ProcessId, SearchId, SearchQueryMatch, SystemInfo, Version, +}; + +/// Represents the payload of a successful response +#[derive( + Clone, Debug, PartialEq, Eq, AsRefStr, IsVariant, EnumDiscriminants, Serialize, Deserialize, +)] +#[strum_discriminants(derive( + AsRefStr, + strum::Display, + EnumIter, + EnumMessage, + EnumString, + Hash, + PartialOrd, + Ord, + IsVariant, + Serialize, + Deserialize +))] +#[strum_discriminants(name(ResponseKind))] +#[strum_discriminants(strum(serialize_all = "snake_case"))] +#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")] +#[strum(serialize_all = "snake_case")] +pub enum Response { + /// General okay with no extra data, returned in cases like + /// creating or removing a directory, copying a file, or renaming + /// a file + Ok, + + /// General-purpose failure that occurred from some request + Error(Error), + + /// Response containing some arbitrary, binary data + Blob { + /// Binary data associated with the response + #[serde(with = "serde_bytes")] + data: Vec, + }, + + /// Response containing some arbitrary, text data + Text { + /// Text data associated with the response + data: String, + }, + + /// Response to reading a directory + DirEntries { + /// Entries contained within the requested directory + entries: Vec, + + /// Errors encountered while scanning for entries + errors: Vec, + }, + + /// Response to a filesystem change for some watched file, directory, or symlink + Changed(Change), + + /// Response to checking if a path exists + Exists { value: bool }, + + /// Represents metadata about some filesystem object (file, directory, symlink) on remote machine + Metadata(Metadata), + + /// Represents a search being started + SearchStarted { + /// Arbitrary id associated with search + id: SearchId, + }, + + /// Represents some subset of results for a search query (may not be all of them) + SearchResults { + /// Arbitrary id associated with search + id: SearchId, + + /// Collection of matches from performing a query + matches: Vec, + }, + + /// Represents a search being completed + SearchDone { + /// Arbitrary id associated with search + id: SearchId, + }, + + /// Response to starting a new process + ProcSpawned { + /// Arbitrary id associated with running process + id: ProcessId, + }, + + /// Actively-transmitted stdout as part of running process + ProcStdout { + /// Arbitrary id associated with running process + id: ProcessId, + + /// Data read from a process' stdout pipe + #[serde(with = "serde_bytes")] + data: Vec, + }, + + /// Actively-transmitted stderr as part of running process + ProcStderr { + /// Arbitrary id associated with running process + id: ProcessId, + + /// Data read from a process' stderr pipe + #[serde(with = "serde_bytes")] + data: Vec, + }, + + /// Response to a process finishing + ProcDone { + /// Arbitrary id associated with running process + id: ProcessId, + + /// Whether or not termination was successful + success: bool, + + /// Exit code associated with termination, will be missing if terminated by signal + #[serde(default, skip_serializing_if = "Option::is_none")] + code: Option, + }, + + /// Response to retrieving information about the server and the system it is on + SystemInfo(SystemInfo), + + /// Response to retrieving information about the server's version + Version(Version), +} + +impl From for Response { + fn from(x: io::Error) -> Self { + Self::Error(Error::from(x)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + mod ok { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::Ok; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "ok", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "ok", + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!(payload, Response::Ok); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::Ok; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::Ok).unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(payload, Response::Ok); + } + } + + mod error { + use super::*; + use crate::common::ErrorKind; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::Error(Error { + kind: ErrorKind::AddrInUse, + description: String::from("some description"), + }); + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "error", + "kind": "addr_in_use", + "description": "some description", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "error", + "kind": "addr_in_use", + "description": "some description", + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::Error(Error { + kind: ErrorKind::AddrInUse, + description: String::from("some description"), + }) + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::Error(Error { + kind: ErrorKind::AddrInUse, + description: String::from("some description"), + }); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::Error(Error { + kind: ErrorKind::AddrInUse, + description: String::from("some description"), + })) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::Error(Error { + kind: ErrorKind::AddrInUse, + description: String::from("some description"), + }) + ); + } + } + + mod blob { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::Blob { + data: vec![0, 1, 2, u8::MAX], + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "blob", + "data": [0, 1, 2, u8::MAX], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "blob", + "data": [0, 1, 2, u8::MAX], + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::Blob { + data: vec![0, 1, 2, u8::MAX], + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::Blob { + data: vec![0, 1, 2, u8::MAX], + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::Blob { + data: vec![0, 1, 2, u8::MAX], + }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::Blob { + data: vec![0, 1, 2, u8::MAX], + } + ); + } + } + + mod text { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::Text { + data: String::from("some text"), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "text", + "data": "some text", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "text", + "data": "some text", + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::Text { + data: String::from("some text"), + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::Text { + data: String::from("some text"), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::Text { + data: String::from("some text"), + }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::Text { + data: String::from("some text"), + } + ); + } + } + + mod dir_entries { + use std::path::PathBuf; + + use super::*; + use crate::common::{ErrorKind, FileType}; + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Response::DirEntries { + entries: Vec::new(), + errors: Vec::new(), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "dir_entries", + "entries": [], + "errors": [], + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Response::DirEntries { + entries: vec![DirEntry { + path: PathBuf::from("path"), + file_type: FileType::File, + depth: usize::MAX, + }], + errors: vec![Error { + kind: ErrorKind::AddrInUse, + description: String::from("some description"), + }], + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "dir_entries", + "entries": [{ + "path": "path", + "file_type": "file", + "depth": usize::MAX, + }], + "errors": [{ + "kind": "addr_in_use", + "description": "some description", + }], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "dir_entries", + "entries": [], + "errors": [], + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::DirEntries { + entries: Vec::new(), + errors: Vec::new(), + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let value = serde_json::json!({ + "type": "dir_entries", + "entries": [{ + "path": "path", + "file_type": "file", + "depth": usize::MAX, + }], + "errors": [{ + "kind": "addr_in_use", + "description": "some description", + }], + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::DirEntries { + entries: vec![DirEntry { + path: PathBuf::from("path"), + file_type: FileType::File, + depth: usize::MAX, + }], + errors: vec![Error { + kind: ErrorKind::AddrInUse, + description: String::from("some description"), + }], + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Response::DirEntries { + entries: Vec::new(), + errors: Vec::new(), + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Response::DirEntries { + entries: vec![DirEntry { + path: PathBuf::from("path"), + file_type: FileType::File, + depth: usize::MAX, + }], + errors: vec![Error { + kind: ErrorKind::AddrInUse, + description: String::from("some description"), + }], + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::DirEntries { + entries: Vec::new(), + errors: Vec::new(), + }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::DirEntries { + entries: Vec::new(), + errors: Vec::new(), + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::DirEntries { + entries: vec![DirEntry { + path: PathBuf::from("path"), + file_type: FileType::File, + depth: usize::MAX, + }], + errors: vec![Error { + kind: ErrorKind::AddrInUse, + description: String::from("some description"), + }], + }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::DirEntries { + entries: vec![DirEntry { + path: PathBuf::from("path"), + file_type: FileType::File, + depth: usize::MAX, + }], + errors: vec![Error { + kind: ErrorKind::AddrInUse, + description: String::from("some description"), + }], + } + ); + } + } + + mod changed { + use super::*; + use crate::common::ChangeKind; + use std::path::PathBuf; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::Changed(Change { + kind: ChangeKind::Access, + paths: vec![PathBuf::from("path")], + }); + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "changed", + "kind": "access", + "paths": ["path"], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "changed", + "kind": "access", + "paths": ["path"], + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::Changed(Change { + kind: ChangeKind::Access, + paths: vec![PathBuf::from("path")], + }) + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::Changed(Change { + kind: ChangeKind::Access, + paths: vec![PathBuf::from("path")], + }); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::Changed(Change { + kind: ChangeKind::Access, + paths: vec![PathBuf::from("path")], + })) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::Changed(Change { + kind: ChangeKind::Access, + paths: vec![PathBuf::from("path")], + }) + ); + } + } + + mod exists { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::Exists { value: true }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "exists", + "value": true, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "exists", + "value": true, + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!(payload, Response::Exists { value: true }); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::Exists { value: true }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::Exists { value: true }).unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(payload, Response::Exists { value: true }); + } + } + + mod metadata { + use super::*; + use crate::common::{FileType, UnixMetadata, WindowsMetadata}; + use std::path::PathBuf; + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Response::Metadata(Metadata { + canonicalized_path: None, + file_type: FileType::File, + len: 0, + readonly: false, + accessed: None, + created: None, + modified: None, + unix: None, + windows: None, + }); + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "metadata", + "file_type": "file", + "len": 0, + "readonly": false, + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Response::Metadata(Metadata { + canonicalized_path: Some(PathBuf::from("path")), + file_type: FileType::File, + len: u64::MAX, + readonly: true, + accessed: Some(u128::MAX), + created: Some(u128::MAX), + modified: Some(u128::MAX), + unix: Some(UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }), + windows: Some(WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }), + }); + + // NOTE: These values are too big to normally serialize, so we have to convert them to + // a string type, which is why the value here also needs to be a string. + let u128_max_str = u128::MAX.to_string(); + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "metadata", + "canonicalized_path": "path", + "file_type": "file", + "len": u64::MAX, + "readonly": true, + "accessed": u128_max_str, + "created": u128_max_str, + "modified": u128_max_str, + "unix": { + "owner_read": true, + "owner_write": false, + "owner_exec": false, + "group_read": true, + "group_write": false, + "group_exec": false, + "other_read": true, + "other_write": false, + "other_exec": false, + }, + "windows": { + "archive": true, + "compressed": false, + "encrypted": true, + "hidden": false, + "integrity_stream": true, + "normal": false, + "not_content_indexed": true, + "no_scrub_data": false, + "offline": true, + "recall_on_data_access": false, + "recall_on_open": true, + "reparse_point": false, + "sparse_file": true, + "system": false, + "temporary": true, + } + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "metadata", + "file_type": "file", + "len": 0, + "readonly": false, + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::Metadata(Metadata { + canonicalized_path: None, + file_type: FileType::File, + len: 0, + readonly: false, + accessed: None, + created: None, + modified: None, + unix: None, + windows: None, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let u128_max_str = u128::MAX.to_string(); + let value = serde_json::json!({ + "type": "metadata", + "canonicalized_path": "path", + "file_type": "file", + "len": u64::MAX, + "readonly": true, + "accessed": u128_max_str, + "created": u128_max_str, + "modified": u128_max_str, + "unix": { + "owner_read": true, + "owner_write": false, + "owner_exec": false, + "group_read": true, + "group_write": false, + "group_exec": false, + "other_read": true, + "other_write": false, + "other_exec": false, + }, + "windows": { + "archive": true, + "compressed": false, + "encrypted": true, + "hidden": false, + "integrity_stream": true, + "normal": false, + "not_content_indexed": true, + "no_scrub_data": false, + "offline": true, + "recall_on_data_access": false, + "recall_on_open": true, + "reparse_point": false, + "sparse_file": true, + "system": false, + "temporary": true, + } + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::Metadata(Metadata { + canonicalized_path: Some(PathBuf::from("path")), + file_type: FileType::File, + len: u64::MAX, + readonly: true, + accessed: Some(u128::MAX), + created: Some(u128::MAX), + modified: Some(u128::MAX), + unix: Some(UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }), + windows: Some(WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }), + }) + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Response::Metadata(Metadata { + canonicalized_path: None, + file_type: FileType::File, + len: 0, + readonly: false, + accessed: None, + created: None, + modified: None, + unix: None, + windows: None, + }); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Response::Metadata(Metadata { + canonicalized_path: Some(PathBuf::from("path")), + file_type: FileType::File, + len: u64::MAX, + readonly: true, + accessed: Some(u128::MAX), + created: Some(u128::MAX), + modified: Some(u128::MAX), + unix: Some(UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }), + windows: Some(WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }), + }); + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::Metadata(Metadata { + canonicalized_path: None, + file_type: FileType::File, + len: 0, + readonly: false, + accessed: None, + created: None, + modified: None, + unix: None, + windows: None, + })) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::Metadata(Metadata { + canonicalized_path: None, + file_type: FileType::File, + len: 0, + readonly: false, + accessed: None, + created: None, + modified: None, + unix: None, + windows: None, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::Metadata(Metadata { + canonicalized_path: Some(PathBuf::from("path")), + file_type: FileType::File, + len: u64::MAX, + readonly: true, + accessed: Some(u128::MAX), + created: Some(u128::MAX), + modified: Some(u128::MAX), + unix: Some(UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }), + windows: Some(WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }), + })) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::Metadata(Metadata { + canonicalized_path: Some(PathBuf::from("path")), + file_type: FileType::File, + len: u64::MAX, + readonly: true, + accessed: Some(u128::MAX), + created: Some(u128::MAX), + modified: Some(u128::MAX), + unix: Some(UnixMetadata { + owner_read: true, + owner_write: false, + owner_exec: false, + group_read: true, + group_write: false, + group_exec: false, + other_read: true, + other_write: false, + other_exec: false, + }), + windows: Some(WindowsMetadata { + archive: true, + compressed: false, + encrypted: true, + hidden: false, + integrity_stream: true, + normal: false, + not_content_indexed: true, + no_scrub_data: false, + offline: true, + recall_on_data_access: false, + recall_on_open: true, + reparse_point: false, + sparse_file: true, + system: false, + temporary: true, + }), + }) + ); + } + } + + mod search_started { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::SearchStarted { id: SearchId::MAX }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "search_started", + "id": SearchId::MAX, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "search_started", + "id": SearchId::MAX, + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!(payload, Response::SearchStarted { id: SearchId::MAX }); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::SearchStarted { id: SearchId::MAX }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = + rmp_serde::encode::to_vec_named(&Response::SearchStarted { id: SearchId::MAX }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(payload, Response::SearchStarted { id: SearchId::MAX }); + } + } + + mod search_results { + use super::*; + use crate::common::{SearchQueryContentsMatch, SearchQueryMatch, SearchQuerySubmatch}; + use std::path::PathBuf; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::SearchResults { + id: SearchId::MAX, + matches: vec![SearchQueryMatch::Contents(SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: "some lines".into(), + line_number: u64::MAX, + absolute_offset: u64::MAX, + submatches: vec![SearchQuerySubmatch::new("text", u64::MAX, u64::MAX)], + })], + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "search_results", + "id": SearchId::MAX, + "matches": [{ + "type": "contents", + "path": "path", + "lines": "some lines", + "line_number": u64::MAX, + "absolute_offset": u64::MAX, + "submatches": [{ + "match": "text", + "start": u64::MAX, + "end": u64::MAX, + }], + }], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "search_results", + "id": SearchId::MAX, + "matches": [{ + "type": "contents", + "path": "path", + "lines": "some lines", + "line_number": u64::MAX, + "absolute_offset": u64::MAX, + "submatches": [{ + "match": "text", + "start": u64::MAX, + "end": u64::MAX, + }], + }], + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::SearchResults { + id: SearchId::MAX, + matches: vec![SearchQueryMatch::Contents(SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: "some lines".into(), + line_number: u64::MAX, + absolute_offset: u64::MAX, + submatches: vec![SearchQuerySubmatch::new("text", u64::MAX, u64::MAX)], + })], + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::SearchResults { + id: SearchId::MAX, + matches: vec![SearchQueryMatch::Contents(SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: "some lines".into(), + line_number: u64::MAX, + absolute_offset: u64::MAX, + submatches: vec![SearchQuerySubmatch::new("text", u64::MAX, u64::MAX)], + })], + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is results. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::SearchResults { + id: SearchId::MAX, + matches: vec![SearchQueryMatch::Contents(SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: "some lines".into(), + line_number: u64::MAX, + absolute_offset: u64::MAX, + submatches: vec![SearchQuerySubmatch::new("text", u64::MAX, u64::MAX)], + })], + }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::SearchResults { + id: SearchId::MAX, + matches: vec![SearchQueryMatch::Contents(SearchQueryContentsMatch { + path: PathBuf::from("path"), + lines: "some lines".into(), + line_number: u64::MAX, + absolute_offset: u64::MAX, + submatches: vec![SearchQuerySubmatch::new("text", u64::MAX, u64::MAX)], + })], + } + ); + } + } + + mod search_done { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::SearchDone { id: SearchId::MAX }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "search_done", + "id": SearchId::MAX, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "search_done", + "id": SearchId::MAX, + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!(payload, Response::SearchDone { id: SearchId::MAX }); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::SearchDone { id: SearchId::MAX }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::SearchDone { id: SearchId::MAX }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(payload, Response::SearchDone { id: SearchId::MAX }); + } + } + + mod proc_spawned { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::ProcSpawned { id: ProcessId::MAX }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "proc_spawned", + "id": ProcessId::MAX, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "proc_spawned", + "id": ProcessId::MAX, + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!(payload, Response::ProcSpawned { id: ProcessId::MAX }); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::ProcSpawned { id: ProcessId::MAX }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = + rmp_serde::encode::to_vec_named(&Response::ProcSpawned { id: ProcessId::MAX }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!(payload, Response::ProcSpawned { id: ProcessId::MAX }); + } + } + + mod proc_stdout { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::ProcStdout { + id: ProcessId::MAX, + data: vec![0, 1, 2, u8::MAX], + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "proc_stdout", + "id": ProcessId::MAX, + "data": vec![0, 1, 2, u8::MAX], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "proc_stdout", + "id": ProcessId::MAX, + "data": vec![0, 1, 2, u8::MAX], + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::ProcStdout { + id: ProcessId::MAX, + data: vec![0, 1, 2, u8::MAX], + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::ProcStdout { + id: ProcessId::MAX, + data: vec![0, 1, 2, u8::MAX], + }; + + // NOTE: We don't actually check the output here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::ProcStdout { + id: ProcessId::MAX, + data: vec![0, 1, 2, u8::MAX], + }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::ProcStdout { + id: ProcessId::MAX, + data: vec![0, 1, 2, u8::MAX], + } + ); + } + } + + mod proc_stderr { + use super::*; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::ProcStderr { + id: ProcessId::MAX, + data: vec![0, 1, 2, u8::MAX], + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "proc_stderr", + "id": ProcessId::MAX, + "data": vec![0, 1, 2, u8::MAX], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "proc_stderr", + "id": ProcessId::MAX, + "data": vec![0, 1, 2, u8::MAX], + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::ProcStderr { + id: ProcessId::MAX, + data: vec![0, 1, 2, u8::MAX], + } + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::ProcStderr { + id: ProcessId::MAX, + data: vec![0, 1, 2, u8::MAX], + }; + + // NOTE: We don't actually check the errput here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::ProcStderr { + id: ProcessId::MAX, + data: vec![0, 1, 2, u8::MAX], + }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::ProcStderr { + id: ProcessId::MAX, + data: vec![0, 1, 2, u8::MAX], + } + ); + } + } + + mod proc_done { + use super::*; + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_json() { + let payload = Response::ProcDone { + id: ProcessId::MAX, + success: false, + code: None, + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "proc_done", + "id": ProcessId::MAX, + "success": false, + }) + ); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_json() { + let payload = Response::ProcDone { + id: ProcessId::MAX, + success: true, + code: Some(i32::MAX), + }; + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "proc_done", + "id": ProcessId::MAX, + "success": true, + "code": i32::MAX, + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_json() { + let value = serde_json::json!({ + "type": "proc_done", + "id": ProcessId::MAX, + "success": false, + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::ProcDone { + id: ProcessId::MAX, + success: false, + code: None, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_json() { + let value = serde_json::json!({ + "type": "proc_done", + "id": ProcessId::MAX, + "success": true, + "code": i32::MAX, + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::ProcDone { + id: ProcessId::MAX, + success: true, + code: Some(i32::MAX), + } + ); + } + + #[test] + fn should_be_able_to_serialize_minimal_payload_to_msgpack() { + let payload = Response::ProcDone { + id: ProcessId::MAX, + success: false, + code: None, + }; + + // NOTE: We don't actually check the errput here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_serialize_full_payload_to_msgpack() { + let payload = Response::ProcDone { + id: ProcessId::MAX, + success: true, + code: Some(i32::MAX), + }; + + // NOTE: We don't actually check the errput here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_minimal_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::ProcDone { + id: ProcessId::MAX, + success: false, + code: None, + }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::ProcDone { + id: ProcessId::MAX, + success: false, + code: None, + } + ); + } + + #[test] + fn should_be_able_to_deserialize_full_payload_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::ProcDone { + id: ProcessId::MAX, + success: true, + code: Some(i32::MAX), + }) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::ProcDone { + id: ProcessId::MAX, + success: true, + code: Some(i32::MAX), + } + ); + } + } + + mod system_info { + use super::*; + use std::path::PathBuf; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::SystemInfo(SystemInfo { + family: String::from("family"), + os: String::from("os"), + arch: String::from("arch"), + current_dir: PathBuf::from("current-dir"), + main_separator: '/', + username: String::from("username"), + shell: String::from("shell"), + }); + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "system_info", + "family": "family", + "os": "os", + "arch": "arch", + "current_dir": "current-dir", + "main_separator": '/', + "username": "username", + "shell": "shell", + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "system_info", + "family": "family", + "os": "os", + "arch": "arch", + "current_dir": "current-dir", + "main_separator": '/', + "username": "username", + "shell": "shell", + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::SystemInfo(SystemInfo { + family: String::from("family"), + os: String::from("os"), + arch: String::from("arch"), + current_dir: PathBuf::from("current-dir"), + main_separator: '/', + username: String::from("username"), + shell: String::from("shell"), + }) + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::SystemInfo(SystemInfo { + family: String::from("family"), + os: String::from("os"), + arch: String::from("arch"), + current_dir: PathBuf::from("current-dir"), + main_separator: '/', + username: String::from("username"), + shell: String::from("shell"), + }); + + // NOTE: We don't actually check the errput here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::SystemInfo(SystemInfo { + family: String::from("family"), + os: String::from("os"), + arch: String::from("arch"), + current_dir: PathBuf::from("current-dir"), + main_separator: '/', + username: String::from("username"), + shell: String::from("shell"), + })) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::SystemInfo(SystemInfo { + family: String::from("family"), + os: String::from("os"), + arch: String::from("arch"), + current_dir: PathBuf::from("current-dir"), + main_separator: '/', + username: String::from("username"), + shell: String::from("shell"), + }) + ); + } + } + + mod version { + use super::*; + use crate::common::{Capabilities, Capability}; + + #[test] + fn should_be_able_to_serialize_to_json() { + let payload = Response::Version(Version { + server_version: String::from("some version"), + protocol_version: (1, 2, 3), + capabilities: [Capability { + kind: String::from("some kind"), + description: String::from("some description"), + }] + .into_iter() + .collect(), + }); + + let value = serde_json::to_value(payload).unwrap(); + assert_eq!( + value, + serde_json::json!({ + "type": "version", + "server_version": "some version", + "protocol_version": [1, 2, 3], + "capabilities": [{ + "kind": "some kind", + "description": "some description", + }], + }) + ); + } + + #[test] + fn should_be_able_to_deserialize_from_json() { + let value = serde_json::json!({ + "type": "version", + "server_version": "some version", + "protocol_version": [1, 2, 3], + "capabilities": Capabilities::all(), + }); + + let payload: Response = serde_json::from_value(value).unwrap(); + assert_eq!( + payload, + Response::Version(Version { + server_version: String::from("some version"), + protocol_version: (1, 2, 3), + capabilities: Capabilities::all(), + }) + ); + } + + #[test] + fn should_be_able_to_serialize_to_msgpack() { + let payload = Response::Version(Version { + server_version: String::from("some version"), + protocol_version: (1, 2, 3), + capabilities: Capabilities::all(), + }); + + // NOTE: We don't actually check the errput here because it's an implementation detail + // and could change as we change how serialization is done. This is merely to verify + // that we can serialize since there are times when serde fails to serialize at + // runtime. + let _ = rmp_serde::encode::to_vec_named(&payload).unwrap(); + } + + #[test] + fn should_be_able_to_deserialize_from_msgpack() { + // NOTE: It may seem odd that we are serializing just to deserialize, but this is to + // verify that we are not corrupting or causing issues when serializing on a + // client/server and then trying to deserialize on the other side. This has happened + // enough times with minor changes that we need tests to verify. + let buf = rmp_serde::encode::to_vec_named(&Response::Version(Version { + server_version: String::from("some version"), + protocol_version: (1, 2, 3), + capabilities: Capabilities::all(), + })) + .unwrap(); + + let payload: Response = rmp_serde::decode::from_slice(&buf).unwrap(); + assert_eq!( + payload, + Response::Version(Version { + server_version: String::from("some version"), + protocol_version: (1, 2, 3), + capabilities: Capabilities::all(), + }) + ); + } + } +} diff --git a/distant-core/src/protocol/utils.rs b/distant-protocol/src/utils.rs similarity index 52% rename from distant-core/src/protocol/utils.rs rename to distant-protocol/src/utils.rs index e98e278..7902654 100644 --- a/distant-core/src/protocol/utils.rs +++ b/distant-protocol/src/utils.rs @@ -1,6 +1,24 @@ use serde::{Deserialize, Serialize}; -pub(crate) fn deserialize_u128_option<'de, D>(deserializer: D) -> Result, D::Error> +/// Used purely for skipping serialization of values that are false by default. +#[inline] +pub const fn is_false(value: &bool) -> bool { + !*value +} + +/// Used purely for skipping serialization of values that are 1 by default. +#[inline] +pub const fn is_one(value: &usize) -> bool { + *value == 1 +} + +/// Used to provide a default serde value of 1. +#[inline] +pub const fn one() -> usize { + 1 +} + +pub fn deserialize_u128_option<'de, D>(deserializer: D) -> Result, D::Error> where D: serde::Deserializer<'de>, { @@ -15,7 +33,7 @@ where } } -pub(crate) fn serialize_u128_option( +pub fn serialize_u128_option( val: &Option, s: S, ) -> Result { diff --git a/distant-ssh2/src/api.rs b/distant-ssh2/src/api.rs index f960f3a..fd8b975 100644 --- a/distant-ssh2/src/api.rs +++ b/distant-ssh2/src/api.rs @@ -10,7 +10,7 @@ use async_trait::async_trait; use distant_core::net::server::ConnectionCtx; use distant_core::protocol::{ Capabilities, CapabilityKind, DirEntry, Environment, FileType, Metadata, Permissions, - ProcessId, PtySize, SetPermissionsOptions, SystemInfo, UnixMetadata, + ProcessId, PtySize, SetPermissionsOptions, SystemInfo, UnixMetadata, Version, PROTOCOL_VERSION, }; use distant_core::{DistantApi, DistantCtx}; use log::*; @@ -79,22 +79,6 @@ impl DistantApi for SshDistantApi { Ok(()) } - async fn capabilities(&self, ctx: DistantCtx) -> io::Result { - debug!("[Conn {}] Querying capabilities", ctx.connection_id); - - let mut capabilities = Capabilities::all(); - - // Searching is not supported by ssh implementation - // TODO: Could we have external search using ripgrep's JSON lines API? - capabilities.take(CapabilityKind::Search); - capabilities.take(CapabilityKind::CancelSearch); - - // Broken via wezterm-ssh, so not supported right now - capabilities.take(CapabilityKind::SetPermissions); - - Ok(capabilities) - } - async fn read_file( &self, ctx: DistantCtx, @@ -1013,4 +997,24 @@ impl DistantApi for SshDistantApi { shell, }) } + + async fn version(&self, ctx: DistantCtx) -> io::Result { + debug!("[Conn {}] Querying capabilities", ctx.connection_id); + + let mut capabilities = Capabilities::all(); + + // Searching is not supported by ssh implementation + // TODO: Could we have external search using ripgrep's JSON lines API? + capabilities.take(CapabilityKind::Search); + capabilities.take(CapabilityKind::CancelSearch); + + // Broken via wezterm-ssh, so not supported right now + capabilities.take(CapabilityKind::SetPermissions); + + Ok(Version { + server_version: format!("{} {}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")), + protocol_version: PROTOCOL_VERSION, + capabilities, + }) + } } diff --git a/distant-ssh2/src/process.rs b/distant-ssh2/src/process.rs index 4fd3243..e9993c2 100644 --- a/distant-ssh2/src/process.rs +++ b/distant-ssh2/src/process.rs @@ -57,7 +57,7 @@ where if environment.is_empty() { None } else { - Some(environment.into_map()) + Some(environment) }, ) .compat() @@ -143,7 +143,7 @@ where if environment.is_empty() { None } else { - Some(environment.into_map()) + Some(environment) }, ) .compat() diff --git a/src/cli/commands/client.rs b/src/cli/commands/client.rs index 39f0873..7504634 100644 --- a/src/cli/commands/client.rs +++ b/src/cli/commands/client.rs @@ -7,7 +7,8 @@ use anyhow::Context; use distant_core::net::common::{ConnectionId, Host, Map, Request, Response}; use distant_core::net::manager::ManagerClient; use distant_core::protocol::{ - self, ChangeKindSet, FileType, Permissions, SearchQuery, SetPermissionsOptions, SystemInfo, + self, Capabilities, ChangeKindSet, FileType, Permissions, SearchQuery, SetPermissionsOptions, + SystemInfo, }; use distant_core::{DistantChannel, DistantChannelExt, RemoteCommand, Searcher, Watcher}; use log::*; @@ -48,60 +49,6 @@ async fn read_cache(path: &Path) -> Cache { async fn async_run(cmd: ClientSubcommand) -> CliResult { match cmd { - ClientSubcommand::Capabilities { - cache, - connection, - format, - network, - } => { - debug!("Connecting to manager"); - let mut client = connect_to_manager(format, network).await?; - - let mut cache = read_cache(&cache).await; - let connection_id = - use_or_lookup_connection_id(&mut cache, connection, &mut client).await?; - - debug!("Opening raw channel to connection {}", connection_id); - let channel = client - .open_raw_channel(connection_id) - .await - .with_context(|| { - format!("Failed to open raw channel to connection {connection_id}") - })?; - - debug!("Retrieving capabilities"); - let capabilities = channel - .into_client() - .into_channel() - .capabilities() - .await - .with_context(|| { - format!("Failed to retrieve capabilities using connection {connection_id}") - })?; - - match format { - Format::Shell => { - #[derive(Tabled)] - struct EntryRow { - kind: String, - description: String, - } - - let table = Table::new(capabilities.into_sorted_vec().into_iter().map(|cap| { - EntryRow { - kind: cap.kind, - description: cap.description, - } - })) - .with(Style::ascii()) - .with(Modify::new(Rows::new(..)).with(Alignment::left())) - .to_string(); - - println!("{table}"); - } - Format::Json => println!("{}", serde_json::to_string(&capabilities).unwrap()), - } - } ClientSubcommand::Connect { cache, destination, @@ -402,7 +349,12 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult { cmd.as_deref().unwrap_or(r"$SHELL") ); Shell::new(channel.into_client().into_channel()) - .spawn(cmd, environment, current_dir, MAX_PIPE_CHUNK_SIZE) + .spawn( + cmd, + environment.into_map(), + current_dir, + MAX_PIPE_CHUNK_SIZE, + ) .await?; } ClientSubcommand::Spawn { @@ -449,7 +401,12 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult { environment, current_dir, cmd ); Shell::new(channel.into_client().into_channel()) - .spawn(cmd, environment, current_dir, MAX_PIPE_CHUNK_SIZE) + .spawn( + cmd, + environment.into_map(), + current_dir, + MAX_PIPE_CHUNK_SIZE, + ) .await?; } else { debug!( @@ -457,7 +414,7 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult { environment, current_dir, cmd ); let mut proc = RemoteCommand::new() - .environment(environment) + .environment(environment.into_map()) .current_dir(current_dir) .pty(None) .spawn(channel.into_client().into_channel(), &cmd) @@ -548,6 +505,114 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult { .context("Failed to write system information to stdout")?; out.flush().context("Failed to flush stdout")?; } + ClientSubcommand::Version { + cache, + connection, + format, + network, + } => { + debug!("Connecting to manager"); + let mut client = connect_to_manager(format, network).await?; + + let mut cache = read_cache(&cache).await; + let connection_id = + use_or_lookup_connection_id(&mut cache, connection, &mut client).await?; + + debug!("Opening raw channel to connection {}", connection_id); + let channel = client + .open_raw_channel(connection_id) + .await + .with_context(|| { + format!("Failed to open raw channel to connection {connection_id}") + })?; + + debug!("Retrieving version information"); + let version = channel + .into_client() + .into_channel() + .version() + .await + .with_context(|| { + format!("Failed to retrieve version using connection {connection_id}") + })?; + + match format { + Format::Shell => { + let (major, minor, patch) = distant_core::protocol::PROTOCOL_VERSION; + println!( + "Client: {} {} (Protocol {major}.{minor}.{patch})", + env!("CARGO_PKG_NAME"), + env!("CARGO_PKG_VERSION") + ); + + let (major, minor, patch) = version.protocol_version; + println!( + "Server: {} (Protocol {major}.{minor}.{patch})", + version.server_version + ); + + // Build a complete set of capabilities to show which ones we support + let client_capabilities = Capabilities::all(); + let server_capabilities = version.capabilities; + let mut capabilities: Vec = client_capabilities + .union(server_capabilities.as_ref()) + .map(|cap| { + let kind = &cap.kind; + if client_capabilities.contains(kind) + && server_capabilities.contains(kind) + { + format!("+{kind}") + } else { + format!("-{kind}") + } + }) + .collect(); + capabilities.sort_unstable(); + + // Figure out the text length of the longest capability + let max_len = capabilities.iter().map(|x| x.len()).max().unwrap_or(0); + + if max_len > 0 { + const MAX_COLS: usize = 4; + + // Determine how wide we have available to determine how many columns + // to use; if we don't have a terminal width, default to something + // + // Maximum columns we want to support is 4 + let cols = match terminal_size::terminal_size() { + // If we have a tty, see how many we can fit including space char + // + // Ensure that we at least return 1 as cols + Some((width, _)) => std::cmp::max(width.0 as usize / (max_len + 1), 1), + + // If we have no tty, default to 4 columns + None => MAX_COLS, + }; + + println!("Capabilities supported (+) or not (-):"); + for chunk in capabilities.chunks(std::cmp::min(cols, MAX_COLS)) { + let cnt = chunk.len(); + match cnt { + 1 => println!("{:max_len$}", chunk[0]), + 2 => println!("{:max_len$} {:max_len$}", chunk[0], chunk[1]), + 3 => println!( + "{:max_len$} {:max_len$} {:max_len$}", + chunk[0], chunk[1], chunk[2] + ), + 4 => println!( + "{:max_len$} {:max_len$} {:max_len$} {:max_len$}", + chunk[0], chunk[1], chunk[2], chunk[3] + ), + _ => unreachable!("Chunk of size {cnt} is not 1 > i <= {MAX_COLS}"), + } + } + } + } + Format::Json => { + println!("{}", serde_json::to_string(&version).unwrap()) + } + } + } ClientSubcommand::FileSystem(ClientFileSystemSubcommand::Copy { cache, connection, diff --git a/src/cli/commands/common/format.rs b/src/cli/commands/common/format.rs index 8a5f765..6f32e15 100644 --- a/src/cli/commands/common/format.rs +++ b/src/cli/commands/common/format.rs @@ -156,10 +156,10 @@ fn format_shell(state: &mut FormatterState, data: protocol::Response) -> Output "{}{}", match change.kind { ChangeKind::Create => "Following paths were created:\n", - ChangeKind::Remove => "Following paths were removed:\n", - x if x.is_access_kind() => "Following paths were accessed:\n", - x if x.is_modify_kind() => "Following paths were modified:\n", - x if x.is_rename_kind() => "Following paths were renamed:\n", + ChangeKind::Delete => "Following paths were removed:\n", + x if x.is_access() => "Following paths were accessed:\n", + x if x.is_modify() => "Following paths were modified:\n", + x if x.is_rename() => "Following paths were renamed:\n", _ => "Following paths were affected:\n", }, change @@ -375,17 +375,23 @@ fn format_shell(state: &mut FormatterState, data: protocol::Response) -> Output ) .into_bytes(), ), - protocol::Response::Capabilities { supported } => { + protocol::Response::Version(version) => { #[derive(Tabled)] struct EntryRow { kind: String, description: String, } - let table = Table::new(supported.into_sorted_vec().into_iter().map(|cap| EntryRow { - kind: cap.kind, - description: cap.description, - })) + let table = Table::new( + version + .capabilities + .into_sorted_vec() + .into_iter() + .map(|cap| EntryRow { + kind: cap.kind, + description: cap.description, + }), + ) .with(Style::ascii()) .with(Modify::new(Rows::new(..)).with(Alignment::left())) .to_string() diff --git a/src/cli/commands/generate.rs b/src/cli/commands/generate.rs index eef1875..11617a2 100644 --- a/src/cli/commands/generate.rs +++ b/src/cli/commands/generate.rs @@ -3,8 +3,6 @@ use std::{fs, io}; use anyhow::Context; use clap::CommandFactory; use clap_complete::generate as clap_generate; -use distant_core::net::common::{Request, Response}; -use distant_core::protocol; use crate::options::{Config, GenerateSubcommand}; use crate::{CliResult, Options}; @@ -20,35 +18,6 @@ async fn async_run(cmd: GenerateSubcommand) -> CliResult { .await .context("Failed to write default config to {file:?}")?, - GenerateSubcommand::Schema { file } => { - let request_schema = - serde_json::to_value(&Request::>::root_schema()) - .context("Failed to serialize request schema")?; - let response_schema = - serde_json::to_value(&Response::>::root_schema()) - .context("Failed to serialize response schema")?; - - let schema = serde_json::json!({ - "request": request_schema, - "response": response_schema, - }); - - if let Some(path) = file { - serde_json::to_writer_pretty( - &mut fs::OpenOptions::new() - .create(true) - .write(true) - .open(&path) - .with_context(|| format!("Failed to open {path:?}"))?, - &schema, - ) - .context("Failed to write to {path:?}")?; - } else { - serde_json::to_writer_pretty(&mut io::stdout(), &schema) - .context("Failed to print to stdout")?; - } - } - GenerateSubcommand::Completion { file, shell } => { let name = "distant"; let mut cmd = Options::command(); diff --git a/src/options.rs b/src/options.rs index b178f20..b9c0000 100644 --- a/src/options.rs +++ b/src/options.rs @@ -7,7 +7,7 @@ use clap_complete::Shell as ClapCompleteShell; use derive_more::IsVariant; use distant_core::net::common::{ConnectionId, Destination, Map, PortRange}; use distant_core::net::server::Shutdown; -use distant_core::protocol::{ChangeKind, Environment}; +use distant_core::protocol::ChangeKind; use service_manager::ServiceManagerKind; use crate::constants; @@ -103,9 +103,6 @@ impl Options { network.merge(config.client.network); *timeout = timeout.take().or(config.client.api.timeout); } - ClientSubcommand::Capabilities { network, .. } => { - network.merge(config.client.network); - } ClientSubcommand::Connect { network, options, .. } => { @@ -153,6 +150,9 @@ impl Options { ClientSubcommand::SystemInfo { network, .. } => { network.merge(config.client.network); } + ClientSubcommand::Version { network, .. } => { + network.merge(config.client.network); + } } } DistantSubcommand::Generate(_) => { @@ -263,28 +263,6 @@ pub enum ClientSubcommand { network: NetworkSettings, }, - /// Retrieves capabilities of the remote server - Capabilities { - /// Location to store cached data - #[clap( - long, - value_hint = ValueHint::FilePath, - value_parser, - default_value = CACHE_FILE_PATH_STR.as_str() - )] - cache: PathBuf, - - /// Specify a connection being managed - #[clap(long)] - connection: Option, - - #[clap(flatten)] - network: NetworkSettings, - - #[clap(short, long, default_value_t, value_enum)] - format: Format, - }, - /// Requests that active manager connects to the server at the specified destination Connect { /// Location to store cached data @@ -392,7 +370,7 @@ pub enum ClientSubcommand { /// Environment variables to provide to the shell #[clap(long, default_value_t)] - environment: Environment, + environment: Map, /// Optional command to run instead of $SHELL #[clap(name = "CMD", last = true)] @@ -434,7 +412,7 @@ pub enum ClientSubcommand { /// Environment variables to provide to the shell #[clap(long, default_value_t)] - environment: Environment, + environment: Map, /// Command to run #[clap(name = "CMD", num_args = 1.., last = true)] @@ -458,12 +436,33 @@ pub enum ClientSubcommand { #[clap(flatten)] network: NetworkSettings, }, + + /// Retrieves version information of the remote server + Version { + /// Location to store cached data + #[clap( + long, + value_hint = ValueHint::FilePath, + value_parser, + default_value = CACHE_FILE_PATH_STR.as_str() + )] + cache: PathBuf, + + /// Specify a connection being managed + #[clap(long)] + connection: Option, + + #[clap(flatten)] + network: NetworkSettings, + + #[clap(short, long, default_value_t, value_enum)] + format: Format, + }, } impl ClientSubcommand { pub fn cache_path(&self) -> &Path { match self { - Self::Capabilities { cache, .. } => cache.as_path(), Self::Connect { cache, .. } => cache.as_path(), Self::FileSystem(fs) => fs.cache_path(), Self::Launch { cache, .. } => cache.as_path(), @@ -471,12 +470,12 @@ impl ClientSubcommand { Self::Shell { cache, .. } => cache.as_path(), Self::Spawn { cache, .. } => cache.as_path(), Self::SystemInfo { cache, .. } => cache.as_path(), + Self::Version { cache, .. } => cache.as_path(), } } pub fn network_settings(&self) -> &NetworkSettings { match self { - Self::Capabilities { network, .. } => network, Self::Connect { network, .. } => network, Self::FileSystem(fs) => fs.network_settings(), Self::Launch { network, .. } => network, @@ -484,6 +483,7 @@ impl ClientSubcommand { Self::Shell { network, .. } => network, Self::Spawn { network, .. } => network, Self::SystemInfo { network, .. } => network, + Self::Version { network, .. } => network, } } } @@ -894,13 +894,6 @@ pub enum GenerateSubcommand { file: PathBuf, }, - /// Generate JSON schema for server request/response - Schema { - /// If specified, will output to the file at the given path instead of stdout - #[clap(long)] - file: Option, - }, - // Generate completion info for CLI Completion { /// If specified, will output to the file at the given path instead of stdout @@ -1272,7 +1265,7 @@ mod tests { log_file: None, log_level: None, }, - command: DistantSubcommand::Client(ClientSubcommand::Capabilities { + command: DistantSubcommand::Client(ClientSubcommand::Version { cache: PathBuf::new(), connection: None, network: NetworkSettings { @@ -1309,7 +1302,7 @@ mod tests { log_file: Some(PathBuf::from("config-log-file")), log_level: Some(LogLevel::Trace), }, - command: DistantSubcommand::Client(ClientSubcommand::Capabilities { + command: DistantSubcommand::Client(ClientSubcommand::Version { cache: PathBuf::new(), connection: None, network: NetworkSettings { @@ -1330,7 +1323,7 @@ mod tests { log_file: Some(PathBuf::from("cli-log-file")), log_level: Some(LogLevel::Info), }, - command: DistantSubcommand::Client(ClientSubcommand::Capabilities { + command: DistantSubcommand::Client(ClientSubcommand::Version { cache: PathBuf::new(), connection: None, network: NetworkSettings { @@ -1367,7 +1360,7 @@ mod tests { log_file: Some(PathBuf::from("cli-log-file")), log_level: Some(LogLevel::Info), }, - command: DistantSubcommand::Client(ClientSubcommand::Capabilities { + command: DistantSubcommand::Client(ClientSubcommand::Version { cache: PathBuf::new(), connection: None, network: NetworkSettings { @@ -1666,7 +1659,7 @@ mod tests { windows_pipe: None, }, current_dir: None, - environment: map!(), + environment: Default::default(), cmd: None, }), }; diff --git a/tests/cli/api/mod.rs b/tests/cli/api/mod.rs index b44cc65..de82004 100644 --- a/tests/cli/api/mod.rs +++ b/tests/cli/api/mod.rs @@ -1,4 +1,3 @@ -mod capabilities; mod copy; mod dir_create; mod dir_read; @@ -15,4 +14,5 @@ mod remove; mod rename; mod search; mod system_info; +mod version; mod watch; diff --git a/tests/cli/api/search.rs b/tests/cli/api/search.rs index 05752e8..a9cbf98 100644 --- a/tests/cli/api/search.rs +++ b/tests/cli/api/search.rs @@ -54,18 +54,12 @@ async fn should_support_json_search_filesystem_using_query( { "type": "contents", "path": root.child("file2.txt").to_string_lossy(), - "lines": { - "type": "text", - "value": "textual\n", - }, + "lines": "textual\n", "line_number": 3, "absolute_offset": 9, "submatches": [ { - "match": { - "type": "text", - "value": "ua", - }, + "match": "ua", "start": 4, "end": 6, } diff --git a/tests/cli/api/capabilities.rs b/tests/cli/api/version.rs similarity index 65% rename from tests/cli/api/capabilities.rs rename to tests/cli/api/version.rs index 149da94..12d01e4 100644 --- a/tests/cli/api/capabilities.rs +++ b/tests/cli/api/version.rs @@ -1,4 +1,4 @@ -use distant_core::protocol::{Capabilities, Capability}; +use distant_core::protocol::{Capabilities, Capability, SemVer, PROTOCOL_VERSION}; use rstest::*; use serde_json::json; use test_log::test; @@ -13,15 +13,19 @@ async fn should_support_json_capabilities(mut api_process: CtxCommand().to_string(); let req = json!({ "id": id, - "payload": { "type": "capabilities" }, + "payload": { "type": "version" }, }); let res = api_process.write_and_read_json(req).await.unwrap().unwrap(); assert_eq!(res["origin_id"], id, "JSON: {res}"); - assert_eq!(res["payload"]["type"], "capabilities", "JSON: {res}"); + assert_eq!(res["payload"]["type"], "version", "JSON: {res}"); - let supported: Capabilities = res["payload"]["supported"] + let protocol_version: SemVer = + serde_json::from_value(res["payload"]["protocol_version"].clone()).unwrap(); + assert_eq!(protocol_version, PROTOCOL_VERSION); + + let capabilities: Capabilities = res["payload"]["capabilities"] .as_array() .expect("Field 'supported' was not an array") .iter() @@ -33,5 +37,5 @@ async fn should_support_json_capabilities(mut api_process: CtxCommand predicates::str::RegexPredicate { +pub fn regex_pred(s: &str) -> ::predicates::str::RegexPredicate { predicate::str::is_match(s).unwrap() } diff --git a/tests/cli/utils/predicates.rs b/tests/cli/utils/predicates.rs new file mode 100644 index 0000000..d336b51 --- /dev/null +++ b/tests/cli/utils/predicates.rs @@ -0,0 +1,50 @@ +use predicates::reflection::PredicateReflection; +use predicates::Predicate; +use std::fmt; + +/// Checks if lines of text match the provided, trimming each line +/// of both before comparing. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TrimmedLinesMatchPredicate { + pattern: String, +} + +impl TrimmedLinesMatchPredicate { + pub fn new(pattern: impl Into) -> Self { + Self { + pattern: pattern.into(), + } + } +} + +impl fmt::Display for TrimmedLinesMatchPredicate { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "trimmed_lines expects {}", self.pattern) + } +} + +impl Predicate for TrimmedLinesMatchPredicate { + fn eval(&self, variable: &str) -> bool { + let mut expected = self.pattern.lines(); + let mut actual = variable.lines(); + + // Fail if we don't have the same number of lines + // or of the trimmed result of lines don't match + // + // Otherwise if we finish processing all lines, + // we are a success + loop { + match (expected.next(), actual.next()) { + (Some(expected), Some(actual)) => { + if expected.trim() != actual.trim() { + return false; + } + } + (None, None) => return true, + _ => return false, + } + } + } +} + +impl PredicateReflection for TrimmedLinesMatchPredicate {}