Refactor into unified settings (#172)

pull/184/head
Chip Senkbeil 1 year ago committed by GitHub
parent 093b4d2ec4
commit 4b983b0229
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -7,7 +7,43 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.20.0-alpha.4]
## [0.20.0-alpha.5]
### Added
- CLI now offers the following new subcommands
- `distant fs copy` is a refactoring of `distant client action copy`
- `distant fs exists` is a refactoring of `distant client action exists`
- `distant fs read` is a refactoring of `distant client action file-read`,
`distant client action file-read-text`, and `distant client action dir-read`
- `distant fs rename` is a refactoring of `distant client action rename`
- `distant fs write` is a refactoring of `distant client action file-write`,
`distant client action file-write-text`, `distant client action file-append`,
- `distant fs make-dir` is a refactoring of `distant client action dir-create`
- `distant fs metadata` is a refactoring of `distant client action metadata`
- `distant fs remove` is a refactoring of `distant client action remove`
- `distant fs search` is a refactoring of `distant client action search`
- `distant fs watch` is a refactoring of `distant client action watch`
- `distant spawn` is a refactoring of `distant client action proc-spawn`
with `distant client lsp` merged in using the `--lsp` flag
- `distant system-info` is a refactoring of `distant client action system-info`
### Changed
- CLI subcommands refactored
- `distant client select` moved to `distant manager select`
- `distant client action` moved to `distant action`
- `distant client launch` moved to `distant launch`
- `distant client connect` moved to `distant connect`
- `distant client lsp` moved to `distant lsp`
- `distant client repl` moved to `distant api`
- `distant client shell` moved to `distant shell`
### Removed
- `distant-core` crate no longer offers the `clap` feature
## [0.20.0-alpha.4] - 2023-03-31
### Added
@ -20,7 +56,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Updated a variety of dependencies to latest versions
## [0.20.0-alpha.3]
## [0.20.0-alpha.3] - 2022-11-27
### Added
@ -375,7 +411,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
pending upon full channel and no longer locks up
- stdout, stderr, and stdin of `RemoteProcess` no longer cause deadlock
[Unreleased]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.4...HEAD
[Unreleased]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.5...HEAD
[0.20.0-alpha.5]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.4...v0.20.0-alpha.5
[0.20.0-alpha.4]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.3...v0.20.0-alpha.4
[0.20.0-alpha.3]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.2...v0.20.0-alpha.3
[0.20.0-alpha.2]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.1...v0.20.0-alpha.2

9
Cargo.lock generated

@ -849,7 +849,7 @@ dependencies = [
[[package]]
name = "distant"
version = "0.20.0-alpha.4"
version = "0.20.0-alpha.5"
dependencies = [
"anyhow",
"assert_cmd",
@ -892,13 +892,12 @@ dependencies = [
[[package]]
name = "distant-core"
version = "0.20.0-alpha.4"
version = "0.20.0-alpha.5"
dependencies = [
"assert_fs",
"async-trait",
"bitflags 2.0.2",
"bytes",
"clap",
"derive_more",
"distant-net",
"env_logger",
@ -932,7 +931,7 @@ dependencies = [
[[package]]
name = "distant-net"
version = "0.20.0-alpha.4"
version = "0.20.0-alpha.5"
dependencies = [
"async-trait",
"bytes",
@ -961,7 +960,7 @@ dependencies = [
[[package]]
name = "distant-ssh2"
version = "0.20.0-alpha.4"
version = "0.20.0-alpha.5"
dependencies = [
"anyhow",
"assert_fs",

@ -3,7 +3,7 @@ name = "distant"
description = "Operate on a remote computer through file and process manipulation"
categories = ["command-line-utilities"]
keywords = ["cli"]
version = "0.20.0-alpha.4"
version = "0.20.0-alpha.5"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -32,7 +32,7 @@ clap_complete = "4.2.0"
config = { version = "0.13.3", default-features = false, features = ["toml"] }
derive_more = { version = "0.99.17", default-features = false, features = ["display", "from", "error", "is_variant"] }
dialoguer = { version = "0.10.3", default-features = false }
distant-core = { version = "=0.20.0-alpha.4", path = "distant-core", features = ["clap", "schemars"] }
distant-core = { version = "=0.20.0-alpha.5", path = "distant-core", features = ["schemars"] }
directories = "5.0.0"
flexi_logger = "0.25.3"
indoc = "2.0.1"
@ -54,7 +54,7 @@ winsplit = "0.1.0"
whoami = "1.4.0"
# Optional native SSH functionality
distant-ssh2 = { version = "=0.20.0-alpha.4", path = "distant-ssh2", default-features = false, features = ["serde"], optional = true }
distant-ssh2 = { version = "=0.20.0-alpha.5", path = "distant-ssh2", default-features = false, features = ["serde"], optional = true }
[target.'cfg(unix)'.dependencies]
fork = "0.1.21"

@ -3,7 +3,7 @@ name = "distant-core"
description = "Core library for distant, enabling operation on a remote computer through file and process manipulation"
categories = ["network-programming"]
keywords = ["api", "async"]
version = "0.20.0-alpha.4"
version = "0.20.0-alpha.5"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -19,7 +19,7 @@ async-trait = "0.1.68"
bitflags = "2.0.2"
bytes = "1.4.0"
derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] }
distant-net = { version = "=0.20.0-alpha.4", path = "../distant-net" }
distant-net = { version = "=0.20.0-alpha.5", path = "../distant-net" }
futures = "0.3.28"
grep = "0.2.11"
hex = "0.4.3"
@ -43,7 +43,6 @@ whoami = "1.4.0"
winsplit = "0.1.0"
# Optional dependencies based on features
clap = { version = "4.2.1", features = ["derive"], optional = true }
schemars = { version = "0.8.12", optional = true }
[dev-dependencies]

@ -35,8 +35,6 @@ distant-core = "0.19"
Currently, the library supports the following features:
- `clap`: generates [`Clap`](https://github.com/clap-rs) bindings for
`DistantRequestData` (used by cli to expose request actions)
- `schemars`: derives the `schemars::JsonSchema` interface on
`DistantMsg`, `DistantRequestData`, and `DistantResponseData` data types

@ -61,8 +61,8 @@ impl Watcher {
DistantRequestData::Watch {
path: path.to_path_buf(),
recursive,
only: only.into_vec(),
except: except.into_vec(),
only: only.into_sorted_vec(),
except: except.into_sorted_vec(),
},
)))
.await?;

@ -3,9 +3,6 @@ use serde::{Deserialize, Serialize};
use std::{io, path::PathBuf};
use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString};
#[cfg(feature = "clap")]
use strum::VariantNames;
mod capabilities;
pub use capabilities::*;
@ -15,9 +12,6 @@ pub use change::*;
mod cmd;
pub use cmd::*;
#[cfg(feature = "clap")]
mod clap_impl;
mod error;
pub use error::*;
@ -45,17 +39,6 @@ pub type ProcessId = u32;
/// Mapping of environment variables
pub type Environment = distant_net::common::Map;
/// Type alias for a vec of bytes
///
/// NOTE: This only exists to support properly parsing a Vec<u8> from an entire string
/// with clap rather than trying to parse a string as a singular u8
pub type ByteVec = Vec<u8>;
#[cfg(feature = "clap")]
fn parse_byte_vec(src: &str) -> Result<ByteVec, std::convert::Infallible> {
Ok(src.as_bytes().to_vec())
}
/// Represents a wrapper around a distant message, supporting single and batch requests
#[derive(Clone, Debug, From, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
@ -143,7 +126,6 @@ impl<T: schemars::JsonSchema> DistantMsg<T> {
/// Represents the payload of a request to be performed on the remote machine
#[derive(Clone, Debug, PartialEq, Eq, EnumDiscriminants, IsVariant, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[cfg_attr(feature = "clap", derive(clap::Subcommand))]
#[strum_discriminants(derive(
AsRefStr,
strum::Display,
@ -164,14 +146,12 @@ impl<T: schemars::JsonSchema> DistantMsg<T> {
#[strum_discriminants(name(CapabilityKind))]
#[strum_discriminants(strum(serialize_all = "snake_case"))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
#[cfg_attr(feature = "clap", clap(rename_all = "kebab-case"))]
pub enum DistantRequestData {
/// Retrieve information about the server's capabilities
#[strum_discriminants(strum(message = "Supports retrieving capabilities"))]
Capabilities {},
/// Reads a file from the specified path on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["cat"]))]
#[strum_discriminants(strum(message = "Supports reading binary file"))]
FileRead {
/// The path to the file on the remote machine
@ -194,8 +174,9 @@ pub enum DistantRequestData {
path: PathBuf,
/// Data for server-side writing of content
#[cfg_attr(feature = "clap", clap(value_parser = parse_byte_vec))]
data: ByteVec,
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Writes a file using text instead of bytes, creating it if it does not exist,
@ -216,8 +197,9 @@ pub enum DistantRequestData {
path: PathBuf,
/// Data for server-side writing of content
#[cfg_attr(feature = "clap", clap(value_parser = parse_byte_vec))]
data: ByteVec,
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Appends text to a file, creating it if it does not exist, on the remote machine
@ -231,7 +213,6 @@ pub enum DistantRequestData {
},
/// Reads a directory from the specified path on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["ls"]))]
#[strum_discriminants(strum(message = "Supports reading directory"))]
DirRead {
/// The path to the directory on the remote machine
@ -241,12 +222,10 @@ pub enum DistantRequestData {
/// depth and 1 indicating the most immediate children within the
/// directory
#[serde(default = "one")]
#[cfg_attr(feature = "clap", clap(long, default_value = "1"))]
depth: usize,
/// Whether or not to return absolute or relative paths
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
absolute: bool,
/// Whether or not to canonicalize the resulting paths, meaning
@ -256,7 +235,6 @@ pub enum DistantRequestData {
/// Note that the flag absolute must be true to have absolute paths
/// returned, even if canonicalize is flagged as true
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
canonicalize: bool,
/// Whether or not to include the root directory in the retrieved
@ -265,12 +243,10 @@ pub enum DistantRequestData {
/// If included, the root directory will also be a canonicalized,
/// absolute path and will not follow any of the other flags
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
include_root: bool,
},
/// Creates a directory on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["mkdir"]))]
#[strum_discriminants(strum(message = "Supports creating directory"))]
DirCreate {
/// The path to the directory on the remote machine
@ -278,12 +254,10 @@ pub enum DistantRequestData {
/// Whether or not to create all parent directories
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
all: bool,
},
/// Removes a file or directory on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["rm"]))]
#[strum_discriminants(strum(message = "Supports removing files, directories, and symlinks"))]
Remove {
/// The path to the file or directory on the remote machine
@ -292,12 +266,10 @@ pub enum DistantRequestData {
/// Whether or not to remove all contents within directory if is a directory.
/// Does nothing different for files
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
force: bool,
},
/// Copies a file or directory on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["cp"]))]
#[strum_discriminants(strum(message = "Supports copying files, directories, and symlinks"))]
Copy {
/// The path to the file or directory on the remote machine
@ -308,7 +280,6 @@ pub enum DistantRequestData {
},
/// Moves/renames a file or directory on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["mv"]))]
#[strum_discriminants(strum(message = "Supports renaming files, directories, and symlinks"))]
Rename {
/// The path to the file or directory on the remote machine
@ -327,23 +298,14 @@ pub enum DistantRequestData {
/// If true, will recursively watch for changes within directories, othewise
/// will only watch for changes immediately within directories
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
recursive: bool,
/// Filter to only report back specified changes
#[serde(default)]
#[cfg_attr(
feature = "clap",
clap(long, value_parser = clap::builder::PossibleValuesParser::new(ChangeKind::VARIANTS))
)]
only: Vec<ChangeKind>,
/// Filter to report back changes except these specified changes
#[serde(default)]
#[cfg_attr(
feature = "clap",
clap(long, value_parser = clap::builder::PossibleValuesParser::new(ChangeKind::VARIANTS))
)]
except: Vec<ChangeKind>,
},
@ -373,12 +335,10 @@ pub enum DistantRequestData {
/// returning the canonical, absolute form of a path with all
/// intermediate components normalized and symbolic links resolved
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
canonicalize: bool,
/// Whether or not to follow symlinks to determine absolute file type (dir/file)
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
resolve_file_type: bool,
},
@ -386,7 +346,6 @@ pub enum DistantRequestData {
#[strum_discriminants(strum(message = "Supports searching filesystem using queries"))]
Search {
/// Query to perform against the filesystem
#[cfg_attr(feature = "clap", clap(flatten))]
query: SearchQuery,
},
@ -400,31 +359,25 @@ pub enum DistantRequestData {
},
/// Spawns a new process on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["spawn", "run"]))]
#[strum_discriminants(strum(message = "Supports spawning a process"))]
ProcSpawn {
/// The full command to run including arguments
#[cfg_attr(feature = "clap", clap(flatten))]
cmd: Cmd,
/// Environment to provide to the remote process
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long, default_value_t = Environment::default()))]
environment: Environment,
/// Alternative current directory for the remote process
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
current_dir: Option<PathBuf>,
/// If provided, will spawn process in a pty, otherwise spawns directly
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
pty: Option<PtySize>,
},
/// Kills a process running on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["kill"]))]
#[strum_discriminants(strum(message = "Supports killing a spawned process"))]
ProcKill {
/// Id of the actively-running process

@ -10,7 +10,7 @@ use std::{
path::PathBuf,
str::FromStr,
};
use strum::{EnumString, EnumVariantNames};
use strum::{EnumString, EnumVariantNames, VariantNames};
/// Change to one or more paths on the filesystem
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
@ -58,8 +58,6 @@ impl From<NotifyEvent> for Change {
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
#[strum(serialize_all = "snake_case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "clap", clap(rename_all = "snake_case"))]
pub enum ChangeKind {
/// Something about a file or directory was accessed, but
/// no specific details were known
@ -142,6 +140,16 @@ pub enum ChangeKind {
}
impl ChangeKind {
/// Returns a list of all variants as str names
pub const fn variants() -> &'static [&'static str] {
Self::VARIANTS
}
/// Returns a list of all variants as a vec
pub fn all() -> Vec<ChangeKind> {
ChangeKindSet::all().into_sorted_vec()
}
/// Returns true if the change is a kind of access
pub fn is_access_kind(&self) -> bool {
self.is_open_access_kind()
@ -375,9 +383,11 @@ impl ChangeKindSet {
ChangeKind::Rename | ChangeKind::RenameBoth | ChangeKind::RenameFrom | ChangeKind::RenameTo
}
/// Consumes set and returns a vec of the kinds of changes
pub fn into_vec(self) -> Vec<ChangeKind> {
self.0.into_iter().collect()
/// Consumes set and returns a sorted vec of the kinds of changes
pub fn into_sorted_vec(self) -> Vec<ChangeKind> {
let mut v = self.0.into_iter().collect::<Vec<_>>();
v.sort();
v
}
}

@ -1,106 +0,0 @@
use crate::{data::Cmd, DistantMsg, DistantRequestData};
use clap::{
error::{Error, ErrorKind},
Arg, ArgAction, ArgMatches, Args, Command, FromArgMatches, Subcommand,
};
impl FromArgMatches for Cmd {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
let mut matches = matches.clone();
Self::from_arg_matches_mut(&mut matches)
}
fn from_arg_matches_mut(matches: &mut ArgMatches) -> Result<Self, Error> {
let cmd = matches.get_one::<String>("cmd").ok_or_else(|| {
Error::raw(
ErrorKind::MissingRequiredArgument,
"program must be specified",
)
})?;
let args: Vec<String> = matches
.get_many::<String>("arg")
.unwrap_or_default()
.map(ToString::to_string)
.collect();
Ok(Self::new(format!("{cmd} {}", args.join(" "))))
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
let mut matches = matches.clone();
self.update_from_arg_matches_mut(&mut matches)
}
fn update_from_arg_matches_mut(&mut self, _matches: &mut ArgMatches) -> Result<(), Error> {
Ok(())
}
}
impl Args for Cmd {
fn augment_args(cmd: Command) -> Command {
cmd.arg(
Arg::new("cmd")
.required(true)
.value_name("CMD")
.action(ArgAction::Set),
)
.trailing_var_arg(true)
.arg(
Arg::new("arg")
.value_name("ARGS")
.num_args(1..)
.action(ArgAction::Append),
)
}
fn augment_args_for_update(cmd: Command) -> Command {
cmd
}
}
impl FromArgMatches for DistantMsg<DistantRequestData> {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
match matches.subcommand() {
Some(("single", args)) => Ok(Self::Single(DistantRequestData::from_arg_matches(args)?)),
Some((_, _)) => Err(Error::raw(
ErrorKind::InvalidSubcommand,
"Valid subcommand is `single`",
)),
None => Err(Error::raw(
ErrorKind::MissingSubcommand,
"Valid subcommand is `single`",
)),
}
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
match matches.subcommand() {
Some(("single", args)) => {
*self = Self::Single(DistantRequestData::from_arg_matches(args)?)
}
Some((_, _)) => {
return Err(Error::raw(
ErrorKind::InvalidSubcommand,
"Valid subcommand is `single`",
))
}
None => (),
};
Ok(())
}
}
impl Subcommand for DistantMsg<DistantRequestData> {
fn augment_subcommands(cmd: Command) -> Command {
cmd.subcommand(DistantRequestData::augment_subcommands(Command::new(
"single",
)))
.subcommand_required(true)
}
fn augment_subcommands_for_update(cmd: Command) -> Command {
cmd.subcommand(DistantRequestData::augment_subcommands(Command::new(
"single",
)))
.subcommand_required(true)
}
fn has_subcommand(name: &str) -> bool {
matches!(name, "single")
}
}

@ -18,6 +18,13 @@ pub struct Error {
impl std::error::Error for Error {}
impl Error {
/// Produces an [`io::Error`] from this error.
pub fn to_io_error(&self) -> io::Error {
io::Error::new(self.kind.into(), self.description.to_string())
}
}
#[cfg(feature = "schemars")]
impl Error {
pub fn root_schema() -> schemars::schema::RootSchema {

@ -7,24 +7,19 @@ pub type SearchId = u32;
/// Represents a query to perform against the filesystem
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "clap", derive(clap::Args))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SearchQuery {
/// Kind of data to examine using condition
#[cfg_attr(feature = "clap", clap(long, value_enum, default_value_t = SearchQueryTarget::Contents))]
pub target: SearchQueryTarget,
/// Condition to meet to be considered a match
#[cfg_attr(feature = "clap", clap(name = "pattern"))]
pub condition: SearchQueryCondition,
/// Paths in which to perform the query
#[cfg_attr(feature = "clap", clap(default_value = "."))]
pub paths: Vec<PathBuf>,
/// Options to apply to the query
#[serde(default)]
#[cfg_attr(feature = "clap", clap(flatten))]
pub options: SearchQueryOptions,
}
@ -46,9 +41,7 @@ impl FromStr for SearchQuery {
/// Kind of data to examine using conditions
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[cfg_attr(feature = "clap", clap(rename_all = "snake_case"))]
#[serde(rename_all = "snake_case")]
pub enum SearchQueryTarget {
/// Checks path of file, directory, or symlink
@ -176,32 +169,26 @@ impl FromStr for SearchQueryCondition {
/// Options associated with a search query
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "clap", derive(clap::Args))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SearchQueryOptions {
/// Restrict search to only these file types (otherwise all are allowed)
#[cfg_attr(feature = "clap", clap(skip))]
#[serde(default)]
pub allowed_file_types: HashSet<FileType>,
/// Regex to use to filter paths being searched to only those that match the include condition
#[cfg_attr(feature = "clap", clap(long))]
#[serde(default)]
pub include: Option<SearchQueryCondition>,
/// Regex to use to filter paths being searched to only those that do not match the exclude
/// condition
#[cfg_attr(feature = "clap", clap(long))]
#[serde(default)]
pub exclude: Option<SearchQueryCondition>,
/// Search should follow symbolic links
#[cfg_attr(feature = "clap", clap(long))]
#[serde(default)]
pub follow_symbolic_links: bool,
/// Maximum results to return before stopping the query
#[cfg_attr(feature = "clap", clap(long))]
#[serde(default)]
pub limit: Option<u64>,
@ -213,13 +200,11 @@ pub struct SearchQueryOptions {
///
/// Note that this will not simply filter the entries of the iterator, but it will actually
/// avoid descending into directories when the depth is exceeded.
#[cfg_attr(feature = "clap", clap(long))]
#[serde(default)]
pub max_depth: Option<u64>,
/// Amount of results to batch before sending back excluding final submission that will always
/// include the remaining results even if less than pagination request
#[cfg_attr(feature = "clap", clap(long))]
#[serde(default)]
pub pagination: Option<u64>,
}

@ -3,7 +3,7 @@ name = "distant-net"
description = "Network library for distant, providing implementations to support client/server architecture"
categories = ["network-programming"]
keywords = ["api", "async"]
version = "0.20.0-alpha.4"
version = "0.20.0-alpha.5"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"

@ -302,17 +302,32 @@ impl UntypedClient {
Ok(response) => {
if log_enabled!(Level::Trace) {
trace!(
"Client receiving {}",
String::from_utf8_lossy(&response.to_bytes())
.to_string()
"Client receiving (id:{} | origin: {}): {}",
response.id,
response.origin_id,
String::from_utf8_lossy(&response.payload).to_string()
);
}
// For trace-level logging, we need to clone the id and
// origin id before passing the response ownership to
// be delivered elsewhere
let (id, origin_id) = if log_enabled!(Level::Trace) {
(response.id.to_string(), response.origin_id.to_string())
} else {
(String::new(), String::new())
};
// Try to send response to appropriate mailbox
// TODO: This will block if full... is that a problem?
// TODO: How should we handle false response? Did logging in past
post_office
if post_office
.deliver_untyped_response(response.into_owned())
.await;
.await
{
trace!("Client delivered response {id} to {origin_id}");
} else {
trace!("Client dropped response {id} to {origin_id}");
}
}
Err(x) => {
error!("Invalid response: {x}");

@ -2,6 +2,7 @@ use crate::common::utils::{deserialize_from_str, serialize_to_str};
use derive_more::{Display, Error, From, IntoIterator};
use serde::{de::Deserializer, ser::Serializer, Deserialize, Serialize};
use std::{
collections::hash_map::Entry,
collections::HashMap,
fmt,
ops::{Deref, DerefMut},
@ -21,6 +22,57 @@ impl Map {
pub fn into_map(self) -> HashMap<String, String> {
self.0
}
/// Merges this map with another map. When there is a conflict
/// where both maps have the same key, the other map's key is
/// used UNLESS the `keep` flag is set to true, where this
/// map's key will be used instead.
///
/// ### Examples
///
/// Keeping the value will result in `x` retaining the `a` key's original value:
///
/// ```rust
/// use distant_net::map;
///
/// let mut x = map!("a" -> "hello", "b" -> "world");
/// let y = map!("a" -> "foo", "c" -> "bar");
///
/// x.merge(y, /* keep */ true);
///
/// assert_eq!(x, map!("a" -> "hello", "b" -> "world", "c" -> "bar"));
/// ```
///
/// Not keeping the value will result in `x` replacing the `a` key's value:
///
/// ```rust
/// use distant_net::map;
///
/// let mut x = map!("a" -> "hello", "b" -> "world");
/// let y = map!("a" -> "foo", "c" -> "bar");
///
/// x.merge(y, /* keep */ false);
///
/// assert_eq!(x, map!("a" -> "foo", "b" -> "world", "c" -> "bar"));
/// ```
pub fn merge(&mut self, other: Map, keep: bool) {
for (key, value) in other {
match self.0.entry(key) {
// If we want to keep the original value, skip replacing it
Entry::Occupied(_) if keep => continue,
// If we want to use the other value, replace it
Entry::Occupied(mut x) => {
x.insert(value);
}
// Otherwise, nothing found, so insert it
Entry::Vacant(x) => {
x.insert(value);
}
}
}
}
}
#[cfg(feature = "schemars")]

@ -20,6 +20,21 @@ pub struct PortRange {
}
impl PortRange {
/// Represents an ephemeral port as defined using the port range of 0.
pub const EPHEMERAL: Self = Self {
start: 0,
end: None,
};
/// Creates a port range targeting a single `port`.
#[inline]
pub fn single(port: u16) -> Self {
Self {
start: port,
end: None,
}
}
/// Builds a collection of `SocketAddr` instances from the port range and given ip address
pub fn make_socket_addrs(&self, addr: impl Into<IpAddr>) -> Vec<SocketAddr> {
let mut socket_addrs = Vec::new();
@ -31,14 +46,17 @@ impl PortRange {
socket_addrs
}
/// Returns true if port range represents the ephemeral port.
#[inline]
pub fn is_ephemeral(&self) -> bool {
self == &Self::EPHEMERAL
}
}
impl From<u16> for PortRange {
fn from(port: u16) -> Self {
Self {
start: port,
end: None,
}
Self::single(port)
}
}

@ -2,7 +2,7 @@
name = "distant-ssh2"
description = "Library to enable native ssh-2 protocol for use with distant sessions"
categories = ["network-programming"]
version = "0.20.0-alpha.4"
version = "0.20.0-alpha.5"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -20,7 +20,7 @@ async-compat = "0.2.1"
async-once-cell = "0.4.4"
async-trait = "0.1.68"
derive_more = { version = "0.99.17", default-features = false, features = ["display", "error"] }
distant-core = { version = "=0.20.0-alpha.4", path = "../distant-core" }
distant-core = { version = "=0.20.0-alpha.5", path = "../distant-core" }
futures = "0.3.28"
hex = "0.4.3"
log = "0.4.17"

@ -1,44 +1,21 @@
use crate::{
config::{CommonConfig, Config},
paths, CliResult,
};
use clap::Parser;
use std::{ffi::OsString, path::PathBuf};
use crate::options::DistantSubcommand;
use crate::{CliResult, Options};
use std::ffi::OsString;
mod cache;
mod client;
mod commands;
mod manager;
mod spawner;
mod common;
pub(crate) use cache::Cache;
pub(crate) use client::Client;
use commands::DistantSubcommand;
pub(crate) use manager::Manager;
pub(crate) use common::Cache;
pub(crate) use common::Client;
pub(crate) use common::Manager;
#[cfg_attr(unix, allow(unused_imports))]
pub(crate) use spawner::Spawner;
pub(crate) use common::Spawner;
/// Represents the primary CLI entrypoint
#[derive(Debug)]
pub struct Cli {
common: CommonConfig,
command: DistantSubcommand,
config: Config,
}
#[derive(Debug, Parser)]
#[clap(author, version, about)]
#[clap(name = "distant")]
struct Opt {
#[clap(flatten)]
common: CommonConfig,
/// Configuration file to load instead of the default paths
#[clap(short = 'c', long = "config", global = true, value_parser)]
config_path: Option<PathBuf>,
#[clap(subcommand)]
command: DistantSubcommand,
options: Options,
}
impl Cli {
@ -53,54 +30,8 @@ impl Cli {
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
// NOTE: We should NOT provide context here as printing help and version are both
// reported this way and providing context puts them under the "caused by" section
let Opt {
mut common,
config_path,
command,
} = Opt::try_parse_from(args)?;
// Try to load a configuration file, defaulting if no config file is found
let config = Config::load_multi(config_path)?;
// Extract the common config from our config file
let config_common = match &command {
DistantSubcommand::Client(_) => config.client.common.clone(),
DistantSubcommand::Generate(_) => config.generate.common.clone(),
DistantSubcommand::Manager(_) => config.manager.common.clone(),
DistantSubcommand::Server(_) => config.server.common.clone(),
};
// Blend common configs together
common.log_file = common.log_file.or(config_common.log_file);
common.log_level = common.log_level.or(config_common.log_level);
// Assign the appropriate log file based on client/manager/server
if common.log_file.is_none() {
// NOTE: We assume that any of these commands will log to the user-specific path
// and that services that run manager will explicitly override the
// log file path
common.log_file = Some(match &command {
DistantSubcommand::Client(_) => paths::user::CLIENT_LOG_FILE_PATH.to_path_buf(),
DistantSubcommand::Server(_) => paths::user::SERVER_LOG_FILE_PATH.to_path_buf(),
DistantSubcommand::Generate(_) => paths::user::GENERATE_LOG_FILE_PATH.to_path_buf(),
// If we are listening as a manager, then we want to log to a manager-specific file
DistantSubcommand::Manager(cmd) if cmd.is_listen() => {
paths::user::MANAGER_LOG_FILE_PATH.to_path_buf()
}
// Otherwise, if we are performing some operation as a client talking to the
// manager, then we want to log to the client file
DistantSubcommand::Manager(_) => paths::user::CLIENT_LOG_FILE_PATH.to_path_buf(),
});
}
Ok(Cli {
common,
command,
config,
options: Options::load_from(args)?,
})
}
@ -117,7 +48,8 @@ impl Cli {
for module in modules {
builder.module(
module,
self.common
self.options
.logging
.log_level
.unwrap_or_default()
.to_log_level_filter(),
@ -130,7 +62,7 @@ impl Cli {
// Assign our log output to a file
// NOTE: We can unwrap here as we assign the log file earlier
let logger = logger.log_to_file(
FileSpec::try_from(self.common.log_file.as_ref().unwrap())
FileSpec::try_from(self.options.logging.log_file.as_ref().unwrap())
.expect("Failed to create log file spec"),
);
@ -139,7 +71,7 @@ impl Cli {
#[cfg(windows)]
pub fn is_manager_listen_command(&self) -> bool {
match &self.command {
match &self.options.command {
DistantSubcommand::Manager(cmd) => cmd.is_listen(),
_ => false,
}
@ -147,11 +79,11 @@ impl Cli {
/// Runs the CLI
pub fn run(self) -> CliResult {
match self.command {
DistantSubcommand::Client(cmd) => cmd.run(self.config.client),
DistantSubcommand::Generate(cmd) => cmd.run(self.config.generate),
DistantSubcommand::Manager(cmd) => cmd.run(self.config.manager),
DistantSubcommand::Server(cmd) => cmd.run(self.config.server),
match self.options.command {
DistantSubcommand::Client(cmd) => commands::client::run(cmd),
DistantSubcommand::Generate(cmd) => commands::generate::run(cmd),
DistantSubcommand::Manager(cmd) => commands::manager::run(cmd),
DistantSubcommand::Server(cmd) => commands::server::run(cmd),
}
}
}

@ -1,26 +1,5 @@
use clap::Subcommand;
mod client;
mod generate;
mod manager;
mod server;
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Subcommand)]
pub enum DistantSubcommand {
/// Perform client commands
#[clap(subcommand)]
Client(client::ClientSubcommand),
/// Perform manager commands
#[clap(subcommand)]
Manager(manager::ManagerSubcommand),
/// Perform server commands
#[clap(subcommand)]
Server(server::ServerSubcommand),
/// Perform generation commands
#[clap(subcommand)]
Generate(generate::GenerateSubcommand),
}
pub mod client;
mod common;
pub mod generate;
pub mod manager;
pub mod server;

File diff suppressed because it is too large Load Diff

@ -1,4 +1,5 @@
use super::{link::RemoteProcessLink, CliError, CliResult};
use super::super::common::RemoteProcessLink;
use super::{CliError, CliResult};
use anyhow::Context;
use distant_core::{data::PtySize, DistantChannel, RemoteLspCommand};
use std::path::PathBuf;
@ -17,6 +18,7 @@ impl Lsp {
cmd: impl Into<String>,
current_dir: Option<PathBuf>,
pty: bool,
max_chunk_size: usize,
) -> CliResult {
let cmd = cmd.into();
let mut proc = RemoteLspCommand::new()
@ -37,6 +39,7 @@ impl Lsp {
proc.stdin.take(),
proc.stdout.take().unwrap(),
proc.stderr.take().unwrap(),
max_chunk_size,
);
let status = proc.wait().await.context("Failed to wait for process")?;

@ -1,4 +1,5 @@
use super::{link::RemoteProcessLink, CliError, CliResult};
use super::super::common::RemoteProcessLink;
use super::{CliError, CliResult};
use anyhow::Context;
use distant_core::{
data::{Environment, PtySize},
@ -27,6 +28,7 @@ impl Shell {
cmd: impl Into<Option<String>>,
mut environment: Environment,
current_dir: Option<PathBuf>,
max_chunk_size: usize,
) -> CliResult {
// Automatically add TERM=xterm-256color if not specified
if !environment.contains_key("TERM") {
@ -116,6 +118,7 @@ impl Shell {
None,
proc.stdout.take().unwrap(),
proc.stderr.take().unwrap(),
max_chunk_size,
);
// Continually loop to check for terminal resize changes while the process is still running

@ -0,0 +1,8 @@
mod buf;
mod format;
mod link;
pub mod stdin;
pub use buf::*;
pub use format::*;
pub use link::*;

@ -1,4 +1,4 @@
use clap::ValueEnum;
use crate::options::Format;
use distant_core::{
data::{
ChangeKind, DistantMsg, DistantResponseData, Error, FileType, Metadata,
@ -14,30 +14,6 @@ use std::{
};
use tabled::{object::Rows, style::Style, Alignment, Disable, Modify, Table, Tabled};
#[derive(Copy, Clone, Debug, PartialEq, Eq, ValueEnum)]
#[clap(rename_all = "snake_case")]
pub enum Format {
/// Sends and receives data in JSON format
Json,
/// Commands are traditional shell commands and output responses are
/// inline with what is expected of a program's output in a shell
Shell,
}
impl Format {
/// Returns true if json format
pub fn is_json(self) -> bool {
matches!(self, Self::Json)
}
}
impl Default for Format {
fn default() -> Self {
Self::Shell
}
}
#[derive(Default)]
struct FormatterState {
/// Last seen path during search

@ -1,5 +1,4 @@
use super::stdin;
use crate::constants::MAX_PIPE_CHUNK_SIZE;
use distant_core::{
RemoteLspStderr, RemoteLspStdin, RemoteLspStdout, RemoteStderr, RemoteStdin, RemoteStdout,
};
@ -20,11 +19,11 @@ pub struct RemoteProcessLink {
}
macro_rules! from_pipes {
($stdin:expr, $stdout:expr, $stderr:expr) => {{
($stdin:expr, $stdout:expr, $stderr:expr, $buffer:expr) => {{
let mut stdin_thread = None;
let mut stdin_task = None;
if let Some(mut stdin_handle) = $stdin {
let (thread, mut rx) = stdin::spawn_channel(MAX_PIPE_CHUNK_SIZE);
let (thread, mut rx) = stdin::spawn_channel($buffer);
let task = tokio::spawn(async move {
loop {
if let Some(input) = rx.recv().await {
@ -77,22 +76,30 @@ macro_rules! from_pipes {
}
impl RemoteProcessLink {
/// Creates a new process link from the pipes of a remote process
/// Creates a new process link from the pipes of a remote process.
///
/// `max_pipe_chunk_size` represents the maximum size (in bytes) of data that will be read from
/// stdin at one time to forward to the remote process.
pub fn from_remote_pipes(
stdin: Option<RemoteStdin>,
mut stdout: RemoteStdout,
mut stderr: RemoteStderr,
max_pipe_chunk_size: usize,
) -> Self {
from_pipes!(stdin, stdout, stderr)
from_pipes!(stdin, stdout, stderr, max_pipe_chunk_size)
}
/// Creates a new process link from the pipes of a remote LSP server process
/// Creates a new process link from the pipes of a remote LSP server process.
///
/// `max_pipe_chunk_size` represents the maximum size (in bytes) of data that will be read from
/// stdin at one time to forward to the remote process.
pub fn from_remote_lsp_pipes(
stdin: Option<RemoteLspStdin>,
mut stdout: RemoteLspStdout,
mut stderr: RemoteLspStderr,
max_pipe_chunk_size: usize,
) -> Self {
from_pipes!(stdin, stdout, stderr)
from_pipes!(stdin, stdout, stderr, max_pipe_chunk_size)
}
/// Shuts down the link, aborting any running tasks, and swallowing join errors

@ -1,107 +1,72 @@
use crate::{
cli::Opt,
config::{Config, GenerateConfig},
CliResult,
};
use crate::options::{Config, GenerateSubcommand};
use crate::{CliResult, Options};
use anyhow::Context;
use clap::{CommandFactory, Subcommand};
use clap_complete::{generate as clap_generate, Shell};
use distant_core::{
net::common::{Request, Response},
DistantMsg, DistantRequestData, DistantResponseData,
};
use std::{fs, io, path::PathBuf};
use clap::CommandFactory;
use clap_complete::generate as clap_generate;
use distant_core::net::common::{Request, Response};
use distant_core::{DistantMsg, DistantRequestData, DistantResponseData};
use std::{fs, io};
#[derive(Debug, Subcommand)]
pub enum GenerateSubcommand {
/// Generate configuration file with base settings
Config {
/// Path to where the configuration file should be created
file: PathBuf,
},
/// Generate JSON schema for server request/response
Schema {
/// If specified, will output to the file at the given path instead of stdout
#[clap(long)]
file: Option<PathBuf>,
},
// Generate completion info for CLI
Completion {
/// If specified, will output to the file at the given path instead of stdout
#[clap(long)]
file: Option<PathBuf>,
/// Specific shell to target for the generated output
#[clap(value_enum, value_parser)]
shell: Shell,
},
pub fn run(cmd: GenerateSubcommand) -> CliResult {
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(async_run(cmd))
}
impl GenerateSubcommand {
pub fn run(self, _config: GenerateConfig) -> CliResult {
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(Self::async_run(self))
}
async fn async_run(self) -> CliResult {
match self {
Self::Config { file } => tokio::fs::write(file, Config::default_raw_str())
.await
.context("Failed to write default config to {file:?}")?,
async fn async_run(cmd: GenerateSubcommand) -> CliResult {
match cmd {
GenerateSubcommand::Config { file } => tokio::fs::write(file, Config::default_raw_str())
.await
.context("Failed to write default config to {file:?}")?,
Self::Schema { file } => {
let request_schema =
serde_json::to_value(&Request::<DistantMsg<DistantRequestData>>::root_schema())
.context("Failed to serialize request schema")?;
let response_schema = serde_json::to_value(&Response::<
DistantMsg<DistantResponseData>,
>::root_schema())
.context("Failed to serialize response schema")?;
GenerateSubcommand::Schema { file } => {
let request_schema =
serde_json::to_value(&Request::<DistantMsg<DistantRequestData>>::root_schema())
.context("Failed to serialize request schema")?;
let response_schema =
serde_json::to_value(&Response::<DistantMsg<DistantResponseData>>::root_schema())
.context("Failed to serialize response schema")?;
let schema = serde_json::json!({
"request": request_schema,
"response": response_schema,
});
let schema = serde_json::json!({
"request": request_schema,
"response": response_schema,
});
if let Some(path) = file {
serde_json::to_writer_pretty(
&mut fs::OpenOptions::new()
.create(true)
.write(true)
.open(&path)
.with_context(|| format!("Failed to open {path:?}"))?,
&schema,
)
.context("Failed to write to {path:?}")?;
} else {
serde_json::to_writer_pretty(&mut io::stdout(), &schema)
.context("Failed to print to stdout")?;
}
if let Some(path) = file {
serde_json::to_writer_pretty(
&mut fs::OpenOptions::new()
.create(true)
.write(true)
.open(&path)
.with_context(|| format!("Failed to open {path:?}"))?,
&schema,
)
.context("Failed to write to {path:?}")?;
} else {
serde_json::to_writer_pretty(&mut io::stdout(), &schema)
.context("Failed to print to stdout")?;
}
}
Self::Completion { file, shell } => {
let name = "distant";
let mut cmd = Opt::command();
GenerateSubcommand::Completion { file, shell } => {
let name = "distant";
let mut cmd = Options::command();
if let Some(path) = file {
clap_generate(
shell,
&mut cmd,
name,
&mut fs::OpenOptions::new()
.create(true)
.write(true)
.open(&path)
.with_context(|| format!("Failed to open {path:?}"))?,
)
} else {
clap_generate(shell, &mut cmd, name, &mut io::stdout())
}
if let Some(path) = file {
clap_generate(
shell,
&mut cmd,
name,
&mut fs::OpenOptions::new()
.create(true)
.write(true)
.open(&path)
.with_context(|| format!("Failed to open {path:?}"))?,
)
} else {
clap_generate(shell, &mut cmd, name, &mut io::stdout())
}
}
Ok(())
}
Ok(())
}

@ -1,18 +1,19 @@
use crate::{
cli::{Cache, Client, Manager},
config::{AccessControl, ManagerConfig, NetworkConfig},
paths::user::CACHE_FILE_PATH_STR,
CliResult,
};
use crate::cli::common::{MsgReceiver, MsgSender};
use crate::cli::{Cache, Client, Manager};
use crate::options::{Format, ManagerServiceSubcommand, ManagerSubcommand, NetworkSettings};
use crate::{CliError, CliResult};
use anyhow::Context;
use clap::{Subcommand, ValueHint};
use dialoguer::{console::Term, theme::ColorfulTheme, Select};
use distant_core::net::common::ConnectionId;
use distant_core::net::manager::{Config as NetManagerConfig, ConnectHandler, LaunchHandler};
use distant_core::net::manager::{
Config as NetManagerConfig, ConnectHandler, LaunchHandler, ManagerClient,
};
use log::*;
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use service_manager::{
ServiceInstallCtx, ServiceLabel, ServiceLevel, ServiceManager, ServiceManagerKind,
ServiceStartCtx, ServiceStopCtx, ServiceUninstallCtx,
ServiceInstallCtx, ServiceLabel, ServiceLevel, ServiceManager, ServiceStartCtx, ServiceStopCtx,
ServiceUninstallCtx,
};
use std::{collections::HashMap, ffi::OsString, path::PathBuf};
use tabled::{Table, Tabled};
@ -26,442 +27,513 @@ static SERVICE_LABEL: Lazy<ServiceLabel> = Lazy::new(|| ServiceLabel {
mod handlers;
#[derive(Debug, Subcommand)]
pub enum ManagerSubcommand {
/// Interact with a manager being run by a service management platform
#[clap(subcommand)]
Service(ManagerServiceSubcommand),
/// Listen for incoming requests as a manager
Listen {
/// Type of access to apply to created unix socket or windows pipe
#[clap(long, value_enum)]
access: Option<AccessControl>,
/// If specified, will fork the process to run as a standalone daemon
#[clap(long)]
daemon: bool,
/// If specified, will listen on a user-local unix socket or local windows named pipe
#[clap(long)]
user: bool,
#[clap(flatten)]
network: NetworkConfig,
},
/// Retrieve a list of capabilities that the manager supports
Capabilities {
#[clap(flatten)]
network: NetworkConfig,
},
/// Retrieve information about a specific connection
Info {
id: ConnectionId,
#[clap(flatten)]
network: NetworkConfig,
},
/// List information about all connections
List {
#[clap(flatten)]
network: NetworkConfig,
/// Location to store cached data
#[clap(
long,
value_hint = ValueHint::FilePath,
value_parser,
default_value = CACHE_FILE_PATH_STR.as_str()
)]
cache: PathBuf,
},
/// Kill a specific connection
Kill {
#[clap(flatten)]
network: NetworkConfig,
id: ConnectionId,
},
pub fn run(cmd: ManagerSubcommand) -> CliResult {
match &cmd {
ManagerSubcommand::Listen { daemon, .. } if *daemon => run_daemon(cmd),
_ => {
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(async_run(cmd))
}
}
}
#[derive(Debug, Subcommand)]
pub enum ManagerServiceSubcommand {
/// Start the manager as a service
Start {
/// Type of service manager used to run this service, defaulting to platform native
#[clap(long, value_enum)]
kind: Option<ServiceManagerKind>,
/// If specified, starts as a user-level service
#[clap(long)]
user: bool,
},
/// Stop the manager as a service
Stop {
#[clap(long, value_enum)]
kind: Option<ServiceManagerKind>,
/// If specified, stops a user-level service
#[clap(long)]
user: bool,
},
/// Install the manager as a service
Install {
#[clap(long, value_enum)]
kind: Option<ServiceManagerKind>,
/// If specified, installs as a user-level service
#[clap(long)]
user: bool,
},
/// Uninstall the manager as a service
Uninstall {
#[clap(long, value_enum)]
kind: Option<ServiceManagerKind>,
/// If specified, uninstalls a user-level service
#[clap(long)]
user: bool,
},
#[cfg(windows)]
fn run_daemon(_cmd: ManagerSubcommand) -> CliResult {
use crate::cli::Spawner;
let pid = Spawner::spawn_running_background(Vec::new())
.context("Failed to spawn background process")?;
println!("[distant manager detached, pid = {}]", pid);
Ok(())
}
impl ManagerSubcommand {
/// Returns true if the manager subcommand is listen
pub fn is_listen(&self) -> bool {
matches!(self, Self::Listen { .. })
}
#[cfg(unix)]
fn run_daemon(cmd: ManagerSubcommand) -> CliResult {
use fork::{daemon, Fork};
pub fn run(self, config: ManagerConfig) -> CliResult {
match &self {
Self::Listen { daemon, .. } if *daemon => Self::run_daemon(self, config),
_ => {
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(Self::async_run(self, config))
debug!("Forking process");
match daemon(true, true) {
Ok(Fork::Child) => {
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(async { async_run(cmd).await })?;
Ok(())
}
Ok(Fork::Parent(pid)) => {
println!("[distant manager detached, pid = {pid}]");
if fork::close_fd().is_err() {
Err(CliError::Error(anyhow::anyhow!("Fork failed to close fd")))
} else {
Ok(())
}
}
Err(_) => Err(CliError::Error(anyhow::anyhow!("Fork failed"))),
}
}
#[cfg(windows)]
fn run_daemon(self, _config: ManagerConfig) -> CliResult {
use crate::cli::Spawner;
let pid = Spawner::spawn_running_background(Vec::new())
.context("Failed to spawn background process")?;
println!("[distant manager detached, pid = {}]", pid);
Ok(())
}
#[cfg(unix)]
fn run_daemon(self, config: ManagerConfig) -> CliResult {
use crate::CliError;
use fork::{daemon, Fork};
async fn async_run(cmd: ManagerSubcommand) -> CliResult {
match cmd {
ManagerSubcommand::Service(ManagerServiceSubcommand::Start { kind, user }) => {
debug!("Starting manager service via {:?}", kind);
let mut manager = <dyn ServiceManager>::target_or_native(kind)
.context("Failed to detect native service manager")?;
debug!("Forking process");
match daemon(true, true) {
Ok(Fork::Child) => {
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(async { Self::async_run(self, config).await })?;
Ok(())
}
Ok(Fork::Parent(pid)) => {
println!("[distant manager detached, pid = {pid}]");
if fork::close_fd().is_err() {
Err(CliError::Error(anyhow::anyhow!("Fork failed to close fd")))
} else {
Ok(())
}
if user {
manager
.set_level(ServiceLevel::User)
.context("Failed to set service manager to user level")?;
}
Err(_) => Err(CliError::Error(anyhow::anyhow!("Fork failed"))),
manager
.start(ServiceStartCtx {
label: SERVICE_LABEL.clone(),
})
.context("Failed to start service")?;
Ok(())
}
}
ManagerSubcommand::Service(ManagerServiceSubcommand::Stop { kind, user }) => {
debug!("Stopping manager service via {:?}", kind);
let mut manager = <dyn ServiceManager>::target_or_native(kind)
.context("Failed to detect native service manager")?;
async fn async_run(self, config: ManagerConfig) -> CliResult {
match self {
Self::Service(ManagerServiceSubcommand::Start { kind, user }) => {
debug!("Starting manager service via {:?}", kind);
let mut manager = <dyn ServiceManager>::target_or_native(kind)
.context("Failed to detect native service manager")?;
if user {
manager
.set_level(ServiceLevel::User)
.context("Failed to set service manager to user level")?;
}
if user {
manager
.set_level(ServiceLevel::User)
.context("Failed to set service manager to user level")?;
}
manager
.stop(ServiceStopCtx {
label: SERVICE_LABEL.clone(),
})
.context("Failed to stop service")?;
Ok(())
}
ManagerSubcommand::Service(ManagerServiceSubcommand::Install { kind, user }) => {
debug!("Installing manager service via {:?}", kind);
let mut manager = <dyn ServiceManager>::target_or_native(kind)
.context("Failed to detect native service manager")?;
let mut args = vec![OsString::from("manager"), OsString::from("listen")];
if user {
args.push(OsString::from("--user"));
manager
.start(ServiceStartCtx {
label: SERVICE_LABEL.clone(),
})
.context("Failed to start service")?;
Ok(())
.set_level(ServiceLevel::User)
.context("Failed to set service manager to user level")?;
}
Self::Service(ManagerServiceSubcommand::Stop { kind, user }) => {
debug!("Stopping manager service via {:?}", kind);
let mut manager = <dyn ServiceManager>::target_or_native(kind)
.context("Failed to detect native service manager")?;
if user {
manager
.set_level(ServiceLevel::User)
.context("Failed to set service manager to user level")?;
}
manager
.install(ServiceInstallCtx {
label: SERVICE_LABEL.clone(),
// distant manager listen
program: std::env::current_exe()
.ok()
.unwrap_or_else(|| PathBuf::from("distant")),
args,
})
.context("Failed to install service")?;
Ok(())
}
ManagerSubcommand::Service(ManagerServiceSubcommand::Uninstall { kind, user }) => {
debug!("Uninstalling manager service via {:?}", kind);
let mut manager = <dyn ServiceManager>::target_or_native(kind)
.context("Failed to detect native service manager")?;
if user {
manager
.stop(ServiceStopCtx {
label: SERVICE_LABEL.clone(),
})
.context("Failed to stop service")?;
Ok(())
.set_level(ServiceLevel::User)
.context("Failed to set service manager to user level")?;
}
Self::Service(ManagerServiceSubcommand::Install { kind, user }) => {
debug!("Installing manager service via {:?}", kind);
let mut manager = <dyn ServiceManager>::target_or_native(kind)
.context("Failed to detect native service manager")?;
let mut args = vec![OsString::from("manager"), OsString::from("listen")];
if user {
args.push(OsString::from("--user"));
manager
.set_level(ServiceLevel::User)
.context("Failed to set service manager to user level")?;
manager
.uninstall(ServiceUninstallCtx {
label: SERVICE_LABEL.clone(),
})
.context("Failed to uninstall service")?;
Ok(())
}
ManagerSubcommand::Listen {
access,
daemon: _daemon,
network,
user,
} => {
let access = access.unwrap_or_default();
info!(
"Starting manager (network = {})",
if cfg!(windows) && network.windows_pipe.is_some() {
format!("custom:windows:{}", network.windows_pipe.as_ref().unwrap())
} else if cfg!(unix) && network.unix_socket.is_some() {
format!("custom:unix:{:?}", network.unix_socket.as_ref().unwrap())
} else if user {
"user".to_string()
} else {
"global".to_string()
}
);
let manager_ref = Manager {
access,
config: NetManagerConfig {
user,
launch_handlers: {
let mut handlers: HashMap<String, Box<dyn LaunchHandler>> = HashMap::new();
handlers.insert(
"manager".to_string(),
Box::new(handlers::ManagerLaunchHandler::new()),
);
#[cfg(any(feature = "libssh", feature = "ssh2"))]
handlers.insert("ssh".to_string(), Box::new(handlers::SshLaunchHandler));
handlers
},
connect_handlers: {
let mut handlers: HashMap<String, Box<dyn ConnectHandler>> = HashMap::new();
manager
.install(ServiceInstallCtx {
label: SERVICE_LABEL.clone(),
handlers.insert(
"distant".to_string(),
Box::new(handlers::DistantConnectHandler),
);
// distant manager listen
program: std::env::current_exe()
.ok()
.unwrap_or_else(|| PathBuf::from("distant")),
args,
})
.context("Failed to install service")?;
#[cfg(any(feature = "libssh", feature = "ssh2"))]
handlers.insert("ssh".to_string(), Box::new(handlers::SshConnectHandler));
Ok(())
handlers
},
..Default::default()
},
network,
}
Self::Service(ManagerServiceSubcommand::Uninstall { kind, user }) => {
debug!("Uninstalling manager service via {:?}", kind);
let mut manager = <dyn ServiceManager>::target_or_native(kind)
.context("Failed to detect native service manager")?;
if user {
manager
.set_level(ServiceLevel::User)
.context("Failed to set service manager to user level")?;
.listen()
.await
.context("Failed to start manager")?;
// Let our server run to completion
manager_ref
.as_ref()
.polling_wait()
.await
.context("Failed to wait on manager")?;
info!("Manager is shutting down");
Ok(())
}
ManagerSubcommand::Capabilities { format, network } => {
debug!("Connecting to manager");
let mut client = connect_to_manager(format, network).await?;
debug!("Getting list of capabilities");
let caps = client
.capabilities()
.await
.context("Failed to get list of capabilities")?;
debug!("Got capabilities: {caps:?}");
match format {
Format::Json => {
println!(
"{}",
serde_json::to_string(&caps)
.context("Failed to format capabilities as json")?
);
}
manager
.uninstall(ServiceUninstallCtx {
label: SERVICE_LABEL.clone(),
})
.context("Failed to uninstall service")?;
Format::Shell => {
#[derive(Tabled)]
struct CapabilityRow {
kind: String,
description: String,
}
Ok(())
println!(
"{}",
Table::new(caps.into_sorted_vec().into_iter().map(|cap| {
CapabilityRow {
kind: cap.kind,
description: cap.description,
}
}))
);
}
}
Self::Listen {
access,
network,
user,
..
} => {
let access = access.or(config.access).unwrap_or_default();
let network = network.merge(config.network);
info!(
"Starting manager (network = {})",
if (cfg!(windows) && network.windows_pipe.is_some())
|| (cfg!(unix) && network.unix_socket.is_some())
{
"custom"
} else if user {
"user"
} else {
"global"
Ok(())
}
ManagerSubcommand::Info {
format,
id,
network,
} => {
debug!("Connecting to manager");
let mut client = connect_to_manager(format, network).await?;
debug!("Getting info about connection {}", id);
let info = client
.info(id)
.await
.context("Failed to get info about connection")?;
debug!("Got info: {info:?}");
match format {
Format::Json => {
println!(
"{}",
serde_json::to_string(&info)
.context("Failed to format connection info as json")?
);
}
Format::Shell => {
#[derive(Tabled)]
struct InfoRow {
id: ConnectionId,
scheme: String,
host: String,
port: String,
options: String,
}
);
let manager_ref = Manager {
access,
config: NetManagerConfig {
user,
launch_handlers: {
let mut handlers: HashMap<String, Box<dyn LaunchHandler>> =
HashMap::new();
handlers.insert(
"manager".to_string(),
Box::new(handlers::ManagerLaunchHandler::new()),
);
#[cfg(any(feature = "libssh", feature = "ssh2"))]
handlers
.insert("ssh".to_string(), Box::new(handlers::SshLaunchHandler));
handlers
},
connect_handlers: {
let mut handlers: HashMap<String, Box<dyn ConnectHandler>> =
HashMap::new();
handlers.insert(
"distant".to_string(),
Box::new(handlers::DistantConnectHandler),
);
#[cfg(any(feature = "libssh", feature = "ssh2"))]
handlers
.insert("ssh".to_string(), Box::new(handlers::SshConnectHandler));
handlers
},
..Default::default()
},
network,
println!(
"{}",
Table::new(vec![InfoRow {
id: info.id,
scheme: info.destination.scheme.unwrap_or_default(),
host: info.destination.host.to_string(),
port: info
.destination
.port
.map(|x| x.to_string())
.unwrap_or_default(),
options: info.options.to_string()
}])
);
}
.listen()
}
Ok(())
}
ManagerSubcommand::List {
cache,
format,
network,
} => {
debug!("Connecting to manager");
let mut client = connect_to_manager(format, network).await?;
debug!("Getting list of connections");
let list = client
.list()
.await
.context("Failed to start manager")?;
.context("Failed to get list of connections")?;
debug!("Got list: {list:?}");
// Let our server run to completion
manager_ref
.as_ref()
.polling_wait()
.await
.context("Failed to wait on manager")?;
info!("Manager is shutting down");
debug!("Looking up selected connection");
let selected = Cache::read_from_disk_or_default(cache)
.await
.context("Failed to look up selected connection")?
.data
.selected;
debug!("Using selected: {selected}");
match format {
Format::Json => {
println!(
"{}",
serde_json::to_string(&list)
.context("Failed to format connection list as json")?
);
}
Format::Shell => {
#[derive(Tabled)]
struct ListRow {
selected: bool,
id: ConnectionId,
scheme: String,
host: String,
port: String,
}
Ok(())
}
Self::Capabilities { network } => {
let network = network.merge(config.network);
debug!("Getting list of capabilities");
let caps = Client::new(network)
.using_prompt_auth_handler()
.connect()
.await
.context("Failed to connect to manager")?
.capabilities()
.await
.context("Failed to get list of capabilities")?;
debug!("Got capabilities: {caps:?}");
#[derive(Tabled)]
struct CapabilityRow {
kind: String,
description: String,
println!(
"{}",
Table::new(list.into_iter().map(|(id, destination)| {
ListRow {
selected: *selected == id,
id,
scheme: destination.scheme.unwrap_or_default(),
host: destination.host.to_string(),
port: destination.port.map(|x| x.to_string()).unwrap_or_default(),
}
}))
);
}
}
println!(
"{}",
Table::new(caps.into_sorted_vec().into_iter().map(|cap| {
CapabilityRow {
kind: cap.kind,
description: cap.description,
}
}))
);
Ok(())
}
ManagerSubcommand::Kill {
format,
id,
network,
} => {
debug!("Connecting to manager");
let mut client = connect_to_manager(format, network).await?;
debug!("Killing connection {}", id);
client
.kill(id)
.await
.with_context(|| format!("Failed to kill connection to server {id}"))?;
Ok(())
debug!("Connection killed");
match format {
Format::Json => println!("{}", json!({"type": "ok"})),
Format::Shell => (),
}
Self::Info { network, id } => {
let network = network.merge(config.network);
debug!("Getting info about connection {}", id);
let info = Client::new(network)
.using_prompt_auth_handler()
.connect()
.await
.context("Failed to connect to manager")?
.info(id)
.await
.context("Failed to get info about connection")?;
debug!("Got info: {info:?}");
#[derive(Tabled)]
struct InfoRow {
id: ConnectionId,
scheme: String,
host: String,
port: String,
options: String,
}
println!(
"{}",
Table::new(vec![InfoRow {
id: info.id,
scheme: info.destination.scheme.unwrap_or_default(),
host: info.destination.host.to_string(),
port: info
.destination
.port
.map(|x| x.to_string())
.unwrap_or_default(),
options: info.options.to_string()
}])
);
Ok(())
}
ManagerSubcommand::Select {
cache,
connection,
format,
network,
} => {
let mut cache = Cache::read_from_disk_or_default(cache)
.await
.context("Failed to look up cache")?;
Ok(())
}
Self::List { network, cache } => {
let network = network.merge(config.network);
debug!("Getting list of connections");
let list = Client::new(network)
.using_prompt_auth_handler()
.connect()
.await
.context("Failed to connect to manager")?
.list()
.await
.context("Failed to get list of connections")?;
debug!("Got list: {list:?}");
debug!("Looking up selected connection");
let selected = Cache::read_from_disk_or_default(cache)
.await
.context("Failed to look up selected connection")?
.data
.selected;
debug!("Using selected: {selected}");
#[derive(Tabled)]
struct ListRow {
selected: bool,
id: ConnectionId,
scheme: String,
host: String,
port: String,
match connection {
Some(id) => {
*cache.data.selected = id;
cache.write_to_disk().await?;
Ok(())
}
None => {
debug!("Connecting to manager");
let mut client = connect_to_manager(format, network).await?;
let list = client
.list()
.await
.context("Failed to get a list of managed connections")?;
if list.is_empty() {
return Err(CliError::Error(anyhow::anyhow!(
"No connection available in manager"
)));
}
println!(
"{}",
Table::new(list.into_iter().map(|(id, destination)| {
ListRow {
selected: *selected == id,
id,
scheme: destination.scheme.unwrap_or_default(),
host: destination.host.to_string(),
port: destination.port.map(|x| x.to_string()).unwrap_or_default(),
// Figure out the current selection
let current = list
.iter()
.enumerate()
.find_map(|(i, (id, _))| {
if *cache.data.selected == *id {
Some(i)
} else {
None
}
})
.unwrap_or_default();
trace!("Building selection prompt of {} choices", list.len());
let items: Vec<String> = list
.iter()
.map(|(_, destination)| {
format!(
"{}{}{}",
destination
.scheme
.as_ref()
.map(|scheme| format!(r"{scheme}://"))
.unwrap_or_default(),
destination.host,
destination
.port
.map(|port| format!(":{port}"))
.unwrap_or_default()
)
})
.collect();
// Prompt for a selection, with None meaning no change
let selected = match format {
Format::Shell => {
trace!("Rendering prompt");
Select::with_theme(&ColorfulTheme::default())
.items(&items)
.default(current)
.interact_on_opt(&Term::stderr())
.context("Failed to render prompt")?
}
}))
);
Ok(())
}
Self::Kill { network, id } => {
let network = network.merge(config.network);
debug!("Killing connection {}", id);
Client::new(network)
.using_prompt_auth_handler()
.connect()
.await
.context("Failed to connect to manager")?
.kill(id)
.await
.with_context(|| format!("Failed to kill connection to server {id}"))?;
debug!("Connection killed");
Ok(())
Format::Json => {
// Print out choices
MsgSender::from_stdout()
.send_blocking(&json!({
"type": "select",
"choices": items,
"current": current,
}))
.context("Failed to send JSON choices")?;
// Wait for a response
let msg = MsgReceiver::from_stdin()
.recv_blocking::<Value>()
.context("Failed to receive JSON selection")?;
// Verify the response type is "selected"
match msg.get("type") {
Some(value) if value == "selected" => msg
.get("choice")
.and_then(|value| value.as_u64())
.map(|choice| choice as usize),
Some(value) => {
return Err(CliError::Error(anyhow::anyhow!(
"Unexpected 'type' field value: {value}"
)))
}
None => {
return Err(CliError::Error(anyhow::anyhow!(
"Missing 'type' field"
)))
}
}
}
};
match selected {
Some(index) => {
trace!("Selected choice {}", index);
if let Some((id, _)) = list.iter().nth(index) {
debug!("Updating selected connection id in cache to {}", id);
*cache.data.selected = *id;
cache.write_to_disk().await?;
}
Ok(())
}
None => {
debug!("No change in selection of default connection id");
Ok(())
}
}
}
}
}
}
}
async fn connect_to_manager(
format: Format,
network: NetworkSettings,
) -> anyhow::Result<ManagerClient> {
debug!("Connecting to manager");
Ok(match format {
Format::Shell => Client::new(network)
.using_prompt_auth_handler()
.connect()
.await
.context("Failed to connect to manager")?,
Format::Json => Client::new(network)
.using_json_auth_handler()
.connect()
.await
.context("Failed to connect to manager")?,
})
}

@ -1,4 +1,4 @@
use crate::config::ClientLaunchConfig;
use crate::options::ClientLaunchConfig;
use async_trait::async_trait;
use distant_core::net::client::{Client, ClientConfig, ReconnectStrategy, UntypedClient};
use distant_core::net::common::authentication::msg::*;

@ -1,9 +1,6 @@
use crate::{
config::{BindAddress, ServerConfig, ServerListenConfig},
CliError, CliResult,
};
use crate::options::ServerSubcommand;
use crate::{CliError, CliResult};
use anyhow::Context;
use clap::Subcommand;
use distant_core::net::common::authentication::Verifier;
use distant_core::net::common::{Host, SecretKey32};
use distant_core::net::server::{Server, ServerConfig as NetServerConfig, ServerRef};
@ -11,239 +8,200 @@ use distant_core::{DistantApiServerHandler, DistantSingleKeyCredentials};
use log::*;
use std::io::{self, Read, Write};
#[derive(Debug, Subcommand)]
pub enum ServerSubcommand {
/// Listen for incoming requests as a server
Listen {
#[clap(flatten)]
config: ServerListenConfig,
/// If specified, will fork the process to run as a standalone daemon
#[clap(long)]
daemon: bool,
/// If specified, the server will not generate a key but instead listen on stdin for the next
/// 32 bytes that it will use as the key instead. Receiving less than 32 bytes before stdin
/// is closed is considered an error and any bytes after the first 32 are not used for the key
#[clap(long)]
key_from_stdin: bool,
/// If specified, will send output to the specified named pipe (internal usage)
#[cfg(windows)]
#[clap(long, help = None, long_help = None)]
output_to_local_pipe: Option<std::ffi::OsString>,
},
}
impl ServerSubcommand {
pub fn run(self, config: ServerConfig) -> CliResult {
match &self {
Self::Listen { daemon, .. } if *daemon => Self::run_daemon(self, config),
Self::Listen { .. } => {
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(Self::async_run(self, config, false))
}
pub fn run(cmd: ServerSubcommand) -> CliResult {
match &cmd {
ServerSubcommand::Listen { daemon, .. } if *daemon => run_daemon(cmd),
ServerSubcommand::Listen { .. } => {
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(async_run(cmd, false))
}
}
}
#[cfg(windows)]
fn run_daemon(self, _config: ServerConfig) -> CliResult {
use crate::cli::Spawner;
use distant_core::net::common::{Listener, TransportExt, WindowsPipeListener};
use std::ffi::OsString;
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(async {
let name = format!("distant_{}_{}", std::process::id(), rand::random::<u16>());
let mut listener = WindowsPipeListener::bind_local(name.as_str())
.with_context(|| "Failed to bind to local named pipe {name:?}")?;
let pid = Spawner::spawn_running_background(vec![
OsString::from("--output-to-local-pipe"),
OsString::from(name),
])
.context("Failed to spawn background process")?;
println!("[distant server detached, pid = {}]", pid);
// Wait to receive a connection from the above process
let transport = listener.accept().await.context(
"Failed to receive connection from background process to send credentials",
)?;
// Get the credentials and print them
let mut s = String::new();
let n = transport
.read_to_string(&mut s)
.await
.context("Failed to receive credentials")?;
if n == 0 {
anyhow::bail!("No credentials received from spawned server");
}
let credentials = s[..n]
.trim()
.parse::<DistantSingleKeyCredentials>()
.context("Failed to parse server credentials")?;
println!("\r");
println!("{}", credentials);
println!("\r");
io::stdout()
.flush()
.context("Failed to print server credentials")?;
Ok(())
})
.map_err(CliError::Error)
}
#[cfg(windows)]
fn run_daemon(_cmd: ServerSubcommand) -> CliResult {
use crate::cli::Spawner;
use distant_core::net::common::{Listener, TransportExt, WindowsPipeListener};
use std::ffi::OsString;
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(async {
let name = format!("distant_{}_{}", std::process::id(), rand::random::<u16>());
let mut listener = WindowsPipeListener::bind_local(name.as_str())
.with_context(|| "Failed to bind to local named pipe {name:?}")?;
let pid = Spawner::spawn_running_background(vec![
OsString::from("--output-to-local-pipe"),
OsString::from(name),
])
.context("Failed to spawn background process")?;
println!("[distant server detached, pid = {}]", pid);
// Wait to receive a connection from the above process
let transport = listener
.accept()
.await
.context("Failed to receive connection from background process to send credentials")?;
// Get the credentials and print them
let mut s = String::new();
let n = transport
.read_to_string(&mut s)
.await
.context("Failed to receive credentials")?;
if n == 0 {
anyhow::bail!("No credentials received from spawned server");
}
let credentials = s[..n]
.trim()
.parse::<DistantSingleKeyCredentials>()
.context("Failed to parse server credentials")?;
println!("\r");
println!("{}", credentials);
println!("\r");
io::stdout()
.flush()
.context("Failed to print server credentials")?;
Ok(())
})
.map_err(CliError::Error)
}
#[cfg(unix)]
fn run_daemon(self, config: ServerConfig) -> CliResult {
use fork::{daemon, Fork};
#[cfg(unix)]
fn run_daemon(cmd: ServerSubcommand) -> CliResult {
use fork::{daemon, Fork};
// NOTE: We keep the stdin, stdout, stderr open so we can print out the pid with the parent
debug!("Forking process");
match daemon(true, true) {
Ok(Fork::Child) => {
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(async { Self::async_run(self, config, true).await })?;
// NOTE: We keep the stdin, stdout, stderr open so we can print out the pid with the parent
debug!("Forking process");
match daemon(true, true) {
Ok(Fork::Child) => {
let rt = tokio::runtime::Runtime::new().context("Failed to start up runtime")?;
rt.block_on(async { async_run(cmd, true).await })?;
Ok(())
}
Ok(Fork::Parent(pid)) => {
println!("[distant server detached, pid = {pid}]");
if fork::close_fd().is_err() {
Err(CliError::Error(anyhow::anyhow!("Fork failed to close fd")))
} else {
Ok(())
}
Ok(Fork::Parent(pid)) => {
println!("[distant server detached, pid = {pid}]");
if fork::close_fd().is_err() {
Err(CliError::Error(anyhow::anyhow!("Fork failed to close fd")))
} else {
Ok(())
}
}
Err(_) => Err(CliError::Error(anyhow::anyhow!("Fork failed"))),
}
Err(_) => Err(CliError::Error(anyhow::anyhow!("Fork failed"))),
}
}
async fn async_run(self, config: ServerConfig, _is_forked: bool) -> CliResult {
match self {
Self::Listen {
config: listen_config,
key_from_stdin,
#[cfg(windows)]
output_to_local_pipe,
..
} => {
macro_rules! get {
(@flag $field:ident) => {{
config.listen.$field || listen_config.$field
}};
($field:ident) => {{
config.listen.$field.or(listen_config.$field)
}};
}
let host = get!(host).unwrap_or(BindAddress::Any);
trace!("Starting server using unresolved host '{host}'");
let addr = host.resolve(get!(@flag use_ipv6)).await?;
// If specified, change the current working directory of this program
if let Some(path) = get!(current_dir) {
debug!("Setting current directory to {:?}", path);
std::env::set_current_dir(path)
.context("Failed to set new current directory")?;
}
// Bind & start our server
let key = if key_from_stdin {
debug!("Reading secret key from stdin");
let mut buf = [0u8; 32];
io::stdin()
.read_exact(&mut buf)
.context("Failed to read secret key from stdin")?;
SecretKey32::from(buf)
} else {
SecretKey32::default()
};
debug!(
"Starting local API server, binding to {} {}",
addr,
match get!(port) {
Some(range) => format!("with port in range {range}"),
None => "using an ephemeral port".to_string(),
}
);
let handler = DistantApiServerHandler::local()
.context("Failed to create local distant api")?;
let server = Server::tcp()
.config(NetServerConfig {
shutdown: get!(shutdown).unwrap_or_default(),
..Default::default()
})
.handler(handler)
.verifier(Verifier::static_key(key.clone()))
.start(addr, get!(port).unwrap_or_else(|| 0.into()))
.await
.with_context(|| {
format!(
"Failed to start server @ {} with {}",
addr,
get!(port)
.map(|p| format!("port in range {p}"))
.unwrap_or_else(|| String::from("ephemeral port"))
)
})?;
let credentials = DistantSingleKeyCredentials {
host: Host::from(addr),
port: server.port(),
key,
username: None,
};
info!(
"Server listening at {}:{}",
credentials.host, credentials.port
);
// Print information about port, key, etc.
// NOTE: Following mosh approach of printing to make sure there's no garbage floating around
#[cfg(not(windows))]
{
println!("\r");
println!("{credentials}");
println!("\r");
io::stdout()
.flush()
.context("Failed to print credentials")?;
}
async fn async_run(cmd: ServerSubcommand, _is_forked: bool) -> CliResult {
match cmd {
#[allow(unused_variables)]
ServerSubcommand::Listen {
host,
port,
use_ipv6,
shutdown,
current_dir,
daemon: _,
key_from_stdin,
output_to_local_pipe,
} => {
let host = host.into_inner();
trace!("Starting server using unresolved host '{host}'");
let addr = host.resolve(use_ipv6).await?;
// If specified, change the current working directory of this program
if let Some(path) = current_dir {
debug!("Setting current directory to {:?}", path);
std::env::set_current_dir(path).context("Failed to set new current directory")?;
}
#[cfg(windows)]
if let Some(name) = output_to_local_pipe {
use distant_core::net::common::{TransportExt, WindowsPipeTransport};
let transport = WindowsPipeTransport::connect_local(&name)
.await
.with_context(|| {
format!("Failed to connect to local pipe named {name:?}")
})?;
transport
.write_all(credentials.to_string().as_bytes())
.await
.context("Failed to send credentials through pipe")?;
// Bind & start our server
let key = if key_from_stdin {
debug!("Reading secret key from stdin");
let mut buf = [0u8; 32];
io::stdin()
.read_exact(&mut buf)
.context("Failed to read secret key from stdin")?;
SecretKey32::from(buf)
} else {
SecretKey32::default()
};
let port = port.into_inner();
debug!(
"Starting local API server, binding to {} {}",
addr,
if port.is_ephemeral() {
format!("with port in range {port}")
} else {
println!("\r");
println!("{}", credentials);
println!("\r");
io::stdout()
.flush()
.context("Failed to print credentials")?;
"using an ephemeral port".to_string()
}
);
let handler =
DistantApiServerHandler::local().context("Failed to create local distant api")?;
let server = Server::tcp()
.config(NetServerConfig {
shutdown: shutdown.into_inner(),
..Default::default()
})
.handler(handler)
.verifier(Verifier::static_key(key.clone()))
.start(addr, port)
.await
.with_context(|| format!("Failed to start server @ {addr} with {port}"))?;
let credentials = DistantSingleKeyCredentials {
host: Host::from(addr),
port: server.port(),
key,
username: None,
};
info!(
"Server listening at {}:{}",
credentials.host, credentials.port
);
// Print information about port, key, etc.
// NOTE: Following mosh approach of printing to make sure there's no garbage floating around
#[cfg(not(windows))]
{
println!("\r");
println!("{credentials}");
println!("\r");
io::stdout()
.flush()
.context("Failed to print credentials")?;
}
// For the child, we want to fully disconnect it from pipes, which we do now
#[cfg(unix)]
if _is_forked && fork::close_fd().is_err() {
return Err(CliError::Error(anyhow::anyhow!("Fork failed to close fd")));
}
#[cfg(windows)]
if let Some(name) = output_to_local_pipe {
use distant_core::net::common::{TransportExt, WindowsPipeTransport};
let transport = WindowsPipeTransport::connect_local(&name)
.await
.with_context(|| format!("Failed to connect to local pipe named {name:?}"))?;
transport
.write_all(credentials.to_string().as_bytes())
.await
.context("Failed to send credentials through pipe")?;
} else {
println!("\r");
println!("{}", credentials);
println!("\r");
io::stdout()
.flush()
.context("Failed to print credentials")?;
}
// Let our server run to completion
server.wait().await.context("Failed to wait on server")?;
info!("Server is shutting down");
// For the child, we want to fully disconnect it from pipes, which we do now
#[cfg(unix)]
if _is_forked && fork::close_fd().is_err() {
return Err(CliError::Error(anyhow::anyhow!("Fork failed to close fd")));
}
}
Ok(())
// Let our server run to completion
server.wait().await.context("Failed to wait on server")?;
info!("Server is shutting down");
}
}
Ok(())
}

@ -0,0 +1,11 @@
mod cache;
mod client;
mod manager;
mod msg;
mod spawner;
pub use cache::*;
pub use client::*;
pub use manager::*;
pub use msg::*;
pub use spawner::*;

@ -1,4 +1,4 @@
use crate::paths::user::CACHE_FILE_PATH;
use crate::constants::user::CACHE_FILE_PATH;
use anyhow::Context;
use distant_core::net::common::ConnectionId;
use serde::{Deserialize, Serialize};

@ -1,4 +1,5 @@
use crate::config::NetworkConfig;
use crate::cli::common::{MsgReceiver, MsgSender};
use crate::options::NetworkSettings;
use async_trait::async_trait;
use distant_core::net::client::{Client as NetClient, ClientConfig, ReconnectStrategy};
use distant_core::net::common::authentication::msg::*;
@ -10,16 +11,13 @@ use log::*;
use std::io;
use std::time::Duration;
mod msg;
pub use msg::*;
pub struct Client<T> {
network: NetworkConfig,
network: NetworkSettings,
auth_handler: T,
}
impl Client<()> {
pub fn new(network: NetworkConfig) -> Self {
pub fn new(network: NetworkSettings) -> Self {
Self {
network,
auth_handler: (),
@ -45,7 +43,7 @@ impl<T> Client<T> {
impl<T: AuthHandler + Clone> Client<T> {
/// Connect to the manager listening on the socket or windows pipe based on
/// the [`NetworkConfig`] provided to the client earlier. Will return a new instance
/// the [`NetworkSettings`] provided to the client earlier. Will return a new instance
/// of the [`ManagerClient`] upon successful connection
pub async fn connect(self) -> anyhow::Result<ManagerClient> {
let client = self.connect_impl().await?;

@ -1,6 +1,6 @@
use crate::{
config::{AccessControl, NetworkConfig},
paths::{global as global_paths, user as user_paths},
constants::{global as global_paths, user as user_paths},
options::{AccessControl, NetworkSettings},
};
use anyhow::Context;
use distant_core::net::common::authentication::Verifier;
@ -11,7 +11,7 @@ use log::*;
pub struct Manager {
pub access: AccessControl,
pub config: ManagerConfig,
pub network: NetworkConfig,
pub network: NetworkSettings,
}
impl Manager {

@ -1,8 +0,0 @@
use clap::Args;
use serde::{Deserialize, Serialize};
#[derive(Args, Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct ClientActionConfig {
/// Represents the maximum time (in seconds) to wait for a network request before timing out
pub timeout: Option<f32>,
}

@ -1,83 +0,0 @@
use crate::config::BindAddress;
use clap::Args;
use distant_core::net::common::Map;
use serde::{Deserialize, Serialize};
#[derive(Args, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ClientLaunchConfig {
#[clap(flatten)]
#[serde(flatten)]
pub distant: ClientLaunchDistantConfig,
/// Additional options to provide, typically forwarded to the handler within the manager
/// facilitating the launch of a distant server. Options are key-value pairs separated by
/// comma.
///
/// E.g. `key="value",key2="value2"`
#[clap(long, default_value_t)]
pub options: Map,
}
impl From<Map> for ClientLaunchConfig {
fn from(mut map: Map) -> Self {
Self {
distant: ClientLaunchDistantConfig {
bin: map.remove("distant.bin"),
bind_server: map
.remove("distant.bind_server")
.and_then(|x| x.parse::<BindAddress>().ok()),
args: map.remove("distant.args"),
},
options: map,
}
}
}
impl From<ClientLaunchConfig> for Map {
fn from(config: ClientLaunchConfig) -> Self {
let mut this = Self::new();
if let Some(x) = config.distant.bin {
this.insert("distant.bin".to_string(), x);
}
if let Some(x) = config.distant.bind_server {
this.insert("distant.bind_server".to_string(), x.to_string());
}
if let Some(x) = config.distant.args {
this.insert("distant.args".to_string(), x);
}
this.extend(config.options);
this
}
}
#[derive(Args, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ClientLaunchDistantConfig {
/// Path to distant program on remote machine to execute via ssh;
/// by default, this program needs to be available within PATH as
/// specified when compiling ssh (not your login shell)
#[clap(name = "distant", long)]
pub bin: Option<String>,
/// Control the IP address that the server binds to.
///
/// The default is `ssh', in which case the server will reply from the IP address that the SSH
/// connection came from (as found in the SSH_CONNECTION environment variable). This is
/// useful for multihomed servers.
///
/// With --bind-server=any, the server will reply on the default interface and will not bind to
/// a particular IP address. This can be useful if the connection is made through sslh or
/// another tool that makes the SSH connection appear to come from localhost.
///
/// With --bind-server=IP, the server will attempt to bind to the specified IP address.
#[clap(name = "distant-bind-server", long, value_name = "ssh|any|IP")]
pub bind_server: Option<BindAddress>,
/// Additional arguments to provide to the server
#[clap(name = "distant-args", long, allow_hyphen_values(true))]
pub args: Option<String>,
}

@ -1,8 +0,0 @@
use clap::Args;
use serde::{Deserialize, Serialize};
#[derive(Args, Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct ClientReplConfig {
/// Represents the maximum time (in seconds) to wait for a network request before timing out
pub timeout: Option<f32>,
}

@ -1,19 +0,0 @@
use super::{AccessControl, CommonConfig, NetworkConfig};
use clap::Args;
use serde::{Deserialize, Serialize};
/// Represents configuration settings for the distant manager
#[derive(Args, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ManagerConfig {
/// Type of access to apply to created unix socket or windows pipe
#[clap(long, value_enum)]
pub access: Option<AccessControl>,
#[clap(flatten)]
#[serde(flatten)]
pub common: CommonConfig,
#[clap(flatten)]
#[serde(flatten)]
pub network: NetworkConfig,
}

@ -1,5 +1,121 @@
use directories::ProjectDirs;
use once_cell::sync::Lazy;
use std::path::PathBuf;
/// Represents the maximum size (in bytes) that data will be read from pipes
/// per individual `read` call
///
/// Current setting is 16k size
pub const MAX_PIPE_CHUNK_SIZE: usize = 16384;
/// Internal name to use for socket files.
const SOCKET_FILE_STR: &str = "distant.sock";
/// User-oriented paths.
pub mod user {
use super::*;
/// Root project directory used to calculate other paths
static PROJECT_DIR: Lazy<ProjectDirs> = Lazy::new(|| {
ProjectDirs::from("", "", "distant").expect("Could not determine valid $HOME path")
});
/// Path to configuration settings for distant client/manager/server
pub static CONFIG_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.config_dir().join("config.toml"));
/// Path to cache file used for arbitrary CLI data
pub static CACHE_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.cache_dir().join("cache.toml"));
pub static CACHE_FILE_PATH_STR: Lazy<String> =
Lazy::new(|| CACHE_FILE_PATH.to_string_lossy().to_string());
/// Path to log file for distant client
pub static CLIENT_LOG_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.cache_dir().join("client.log"));
/// Path to log file for distant manager
pub static MANAGER_LOG_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.cache_dir().join("manager.log"));
/// Path to log file for distant server
pub static SERVER_LOG_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.cache_dir().join("server.log"));
/// Path to log file for distant generate
pub static GENERATE_LOG_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.cache_dir().join("generate.log"));
/// For Linux & BSD, this uses the runtime path. For Mac, this uses the tmp path
///
/// * `/run/user/1001/distant/{user}.distant.sock` on Linux
/// * `/var/run/{user}.distant.sock` on BSD
/// * `/tmp/{user}.distant.dock` on MacOS
pub static UNIX_SOCKET_PATH: Lazy<PathBuf> = Lazy::new(|| {
// Form of {user}.distant.sock
let mut file_name = whoami::username_os();
file_name.push(".");
file_name.push(SOCKET_FILE_STR);
PROJECT_DIR
.runtime_dir()
.map(std::path::Path::to_path_buf)
.unwrap_or_else(std::env::temp_dir)
.join(file_name)
});
/// Name of the pipe used by Windows in the form of `{user}.distant`
pub static WINDOWS_PIPE_NAME: Lazy<String> =
Lazy::new(|| format!("{}.distant", whoami::username()));
}
/// Global paths.
pub mod global {
use super::*;
/// Windows ProgramData directory from from the %ProgramData% environment variable
#[cfg(windows)]
static PROGRAM_DATA_DIR: Lazy<PathBuf> = Lazy::new(|| {
PathBuf::from(std::env::var("ProgramData").expect("Could not determine %ProgramData%"))
});
/// Configuration directory for windows: `%ProgramData%\distant`.
#[cfg(windows)]
static CONFIG_DIR: Lazy<PathBuf> = Lazy::new(|| PROGRAM_DATA_DIR.join("distant"));
/// Configuration directory for unix: `/etc/distant`.
#[cfg(unix)]
static CONFIG_DIR: Lazy<PathBuf> = Lazy::new(|| PathBuf::from("/etc").join("distant"));
/// Path to configuration settings for distant client/manager/server.
pub static CONFIG_FILE_PATH: Lazy<PathBuf> = Lazy::new(|| CONFIG_DIR.join("config.toml"));
/// For Linux & BSD, this uses the runtime path. For Mac, this uses the tmp path
///
/// * `/run/distant.sock` on Linux
/// * `/var/run/distant.sock` on BSD
/// * `/tmp/distant.dock` on MacOS
/// * `@TERMUX_PREFIX@/var/run/distant.sock` on Android (Termux)
pub static UNIX_SOCKET_PATH: Lazy<PathBuf> = Lazy::new(|| {
if cfg!(target_os = "macos") {
std::env::temp_dir().join(SOCKET_FILE_STR)
} else if cfg!(any(
target_os = "freebsd",
target_os = "dragonfly",
target_os = "openbsd",
target_os = "netbsd"
)) {
PathBuf::from("/var").join("run").join(SOCKET_FILE_STR)
} else if cfg!(target_os = "android") {
PathBuf::from("@TERMUX_PREFIX@/var")
.join("run")
.join(SOCKET_FILE_STR)
} else {
PathBuf::from("/run").join(SOCKET_FILE_STR)
}
});
/// Name of the pipe used by Windows.
pub static WINDOWS_PIPE_NAME: Lazy<String> = Lazy::new(|| "distant".to_string());
}

@ -2,15 +2,14 @@ use derive_more::{Display, Error, From};
use std::process::{ExitCode, Termination};
mod cli;
pub mod config;
mod constants;
mod paths;
mod options;
#[cfg(windows)]
pub mod win_service;
pub use self::config::Config;
pub use cli::Cli;
pub use options::Options;
/// Wrapper around a [`CliResult`] that provides [`Termination`] support
pub struct MainResult(CliResult);

File diff suppressed because it is too large Load Diff

@ -0,0 +1,13 @@
mod address;
mod cmd;
mod logging;
mod network;
mod search;
mod value;
pub use address::*;
pub use cmd::*;
pub use logging::*;
pub use network::*;
pub use search::*;
pub use value::*;

@ -1,109 +1,11 @@
use anyhow::Context;
use clap::Args;
use distant_core::net::common::{Host, HostParseError, Map, PortRange};
use distant_core::net::server::Shutdown;
use distant_core::net::common::{Host, HostParseError};
use serde::{Deserialize, Serialize};
use std::{
env, fmt,
net::{IpAddr, Ipv4Addr, Ipv6Addr},
path::PathBuf,
str::FromStr,
};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::str::FromStr;
use std::{env, fmt};
#[derive(Args, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ServerListenConfig {
/// Control the IP address that the distant binds to
///
/// There are three options here:
///
/// 1. `ssh`: the server will reply from the IP address that the SSH
/// connection came from (as found in the SSH_CONNECTION environment variable). This is
/// useful for multihomed servers.
///
/// 2. `any`: the server will reply on the default interface and will not bind to
/// a particular IP address. This can be useful if the connection is made through ssh or
/// another tool that makes the SSH connection appear to come from localhost.
///
/// 3. `IP`: the server will attempt to bind to the specified IP address.
#[clap(long, value_name = "ssh|any|IP")]
pub host: Option<BindAddress>,
/// Set the port(s) that the server will attempt to bind to
///
/// This can be in the form of PORT1 or PORT1:PORTN to provide a range of ports.
/// With `--port 0`, the server will let the operating system pick an available TCP port.
///
/// Please note that this option does not affect the server-side port used by SSH
#[clap(long, value_name = "PORT[:PORT2]")]
pub port: Option<PortRange>,
/// If specified, will bind to the ipv6 interface if host is "any" instead of ipv4
#[clap(short = '6', long)]
pub use_ipv6: bool,
/// Logic to apply to server when determining when to shutdown automatically
///
/// 1. "never" means the server will never automatically shut down
/// 2. "after=<N>" means the server will shut down after N seconds
/// 3. "lonely=<N>" means the server will shut down after N seconds with no connections
///
/// Default is to never shut down
#[clap(long)]
pub shutdown: Option<Shutdown>,
/// Changes the current working directory (cwd) to the specified directory
#[clap(long)]
pub current_dir: Option<PathBuf>,
}
impl From<Map> for ServerListenConfig {
fn from(mut map: Map) -> Self {
Self {
host: map
.remove("host")
.and_then(|x| x.parse::<BindAddress>().ok()),
port: map.remove("port").and_then(|x| x.parse::<PortRange>().ok()),
use_ipv6: map
.remove("use_ipv6")
.and_then(|x| x.parse::<bool>().ok())
.unwrap_or_default(),
shutdown: map
.remove("shutdown")
.and_then(|x| x.parse::<Shutdown>().ok()),
current_dir: map
.remove("current_dir")
.and_then(|x| x.parse::<PathBuf>().ok()),
}
}
}
impl From<ServerListenConfig> for Map {
fn from(config: ServerListenConfig) -> Self {
let mut this = Self::new();
if let Some(x) = config.host {
this.insert("host".to_string(), x.to_string());
}
if let Some(x) = config.port {
this.insert("port".to_string(), x.to_string());
}
this.insert("use_ipv6".to_string(), config.use_ipv6.to_string());
if let Some(x) = config.shutdown {
this.insert("shutdown".to_string(), x.to_string());
}
if let Some(x) = config.current_dir {
this.insert("current_dir".to_string(), x.to_string_lossy().to_string());
}
this
}
}
/// Represents options for binding a server to an IP address
/// Represents options for binding a server to an IP address.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BindAddress {
/// Should read address from `SSH_CONNECTION` environment variable, which contains four
@ -115,11 +17,11 @@ pub enum BindAddress {
/// * server port number
Ssh,
/// Should bind to `0.0.0.0` or `::` depending on ipv6 flag
/// Should bind to `0.0.0.0` or `::` depending on ipv6 flag.
Any,
/// Should bind to the specified host, which could be `example.com`, `localhost`, or an IP
/// address like `203.0.113.1` or `2001:DB8::1`
/// address like `203.0.113.1` or `2001:DB8::1`.
Host(Host),
}
@ -149,6 +51,7 @@ impl FromStr for BindAddress {
}
impl Serialize for BindAddress {
/// Will store the address as a string.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
@ -158,6 +61,7 @@ impl Serialize for BindAddress {
}
impl<'de> Deserialize<'de> for BindAddress {
/// Will parse a string into an address.
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,

@ -0,0 +1,146 @@
use clap::Args;
use std::fmt;
use std::str::FromStr;
/// Represents some command with arguments to execute.
///
/// NOTE: Must be derived with `#[clap(flatten)]` to properly take effect.
#[derive(Args, Clone, Debug, PartialEq, Eq)]
pub struct Cmd {
/// The command to execute.
#[clap(name = "CMD")]
cmd: String,
/// Arguments to provide to the command.
#[clap(name = "ARGS")]
args: Vec<String>,
}
impl Cmd {
/// Creates a new command from the given `cmd`.
pub fn new<C, I, A>(cmd: C, args: I) -> Self
where
C: Into<String>,
I: Iterator<Item = A>,
A: Into<String>,
{
Self {
cmd: cmd.into(),
args: args.map(Into::into).collect(),
}
}
}
impl From<Cmd> for String {
fn from(cmd: Cmd) -> Self {
cmd.to_string()
}
}
impl fmt::Display for Cmd {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.cmd)?;
for arg in self.args.iter() {
write!(f, " {arg}")?;
}
Ok(())
}
}
impl<'a> From<&'a str> for Cmd {
/// Parses `s` into [`Cmd`], or panics if unable to parse.
fn from(s: &'a str) -> Self {
s.parse().expect("Failed to parse into cmd")
}
}
impl FromStr for Cmd {
type Err = Box<dyn std::error::Error>;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let tokens = if cfg!(unix) {
shell_words::split(s)?
} else if cfg!(windows) {
winsplit::split(s)
} else {
unreachable!(
"FromStr<Cmd>: Unsupported operating system outside Unix and Windows families!"
);
};
// If we get nothing, then we want an empty command
if tokens.is_empty() {
return Ok(Self {
cmd: String::new(),
args: Vec::new(),
});
}
let mut it = tokens.into_iter();
Ok(Self {
cmd: it.next().unwrap(),
args: it.collect(),
})
}
}
/*
impl FromArgMatches for Cmd {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
let mut matches = matches.clone();
Self::from_arg_matches_mut(&mut matches)
}
fn from_arg_matches_mut(matches: &mut ArgMatches) -> Result<Self, Error> {
let cmd = matches.get_one::<String>("cmd").ok_or_else(|| {
Error::raw(
ErrorKind::MissingRequiredArgument,
"program must be specified",
)
})?;
let args: Vec<String> = matches
.get_many::<String>("arg")
.unwrap_or_default()
.map(ToString::to_string)
.collect();
Ok(Self::new(format!("{cmd} {}", args.join(" "))))
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
let mut matches = matches.clone();
self.update_from_arg_matches_mut(&mut matches)
}
fn update_from_arg_matches_mut(&mut self, _matches: &mut ArgMatches) -> Result<(), Error> {
Ok(())
}
}
impl Args for Cmd {
fn augment_args(cmd: Command) -> Command {
cmd.arg(
Arg::new("cmd")
.required(true)
.value_name("CMD")
.help("")
.action(ArgAction::Set),
)
.trailing_var_arg(true)
.arg(
Arg::new("arg")
.value_name("ARGS")
.num_args(1..)
.action(ArgAction::Append),
)
}
fn augment_args_for_update(cmd: Command) -> Command {
Self::augment_args(cmd)
}
} */
/* #[cfg(test)]
mod tests {
use super::*;
#[test]
fn verify_cmd() {
Cmd::augment_args(Command::new("distant")).debug_assert();
}
} */

@ -2,6 +2,25 @@ use clap::{Args, ValueEnum};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
/// Contains settings are associated with logging.
#[derive(Args, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct LoggingSettings {
/// Log level to use throughout the application
#[clap(long, global = true, value_enum)]
pub log_level: Option<LogLevel>,
/// Path to file to use for logging
#[clap(long, global = true)]
pub log_file: Option<PathBuf>,
}
impl LoggingSettings {
pub fn log_level_or_default(&self) -> LogLevel {
self.log_level.as_ref().copied().unwrap_or_default()
}
}
/// Represents the level associated with logging.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize, ValueEnum)]
#[clap(rename_all = "snake_case")]
#[serde(rename_all = "snake_case")]
@ -32,21 +51,3 @@ impl Default for LogLevel {
Self::Info
}
}
/// Contains options that are common across subcommands
#[derive(Args, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct CommonConfig {
/// Log level to use throughout the application
#[clap(long, global = true, value_enum)]
pub log_level: Option<LogLevel>,
/// Path to file to use for logging
#[clap(long, global = true)]
pub log_file: Option<PathBuf>,
}
impl CommonConfig {
pub fn log_level_or_default(&self) -> LogLevel {
self.log_level.as_ref().copied().unwrap_or_default()
}
}

@ -1,3 +1,4 @@
use crate::constants;
use clap::Args;
use serde::{Deserialize, Serialize};
@ -36,7 +37,7 @@ impl Default for AccessControl {
/// Represents common networking configuration
#[derive(Args, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct NetworkConfig {
pub struct NetworkSettings {
/// Override the path to the Unix socket used by the manager (unix-only)
#[clap(long)]
pub unix_socket: Option<std::path::PathBuf>,
@ -46,12 +47,12 @@ pub struct NetworkConfig {
pub windows_pipe: Option<String>,
}
impl NetworkConfig {
pub fn merge(self, other: Self) -> Self {
Self {
unix_socket: self.unix_socket.or(other.unix_socket),
windows_pipe: self.windows_pipe.or(other.windows_pipe),
}
impl NetworkSettings {
/// Merge these settings with the `other` settings. These settings take priority
/// over the `other` settings.
pub fn merge(&mut self, other: Self) {
self.unix_socket = self.unix_socket.take().or(other.unix_socket);
self.windows_pipe = self.windows_pipe.take().or(other.windows_pipe);
}
/// Returns option containing reference to unix path if configured
@ -70,8 +71,8 @@ impl NetworkConfig {
match self.unix_socket.as_deref() {
Some(path) => vec![path],
None => vec![
crate::paths::user::UNIX_SOCKET_PATH.as_path(),
crate::paths::global::UNIX_SOCKET_PATH.as_path(),
constants::user::UNIX_SOCKET_PATH.as_path(),
constants::global::UNIX_SOCKET_PATH.as_path(),
],
}
}
@ -82,8 +83,8 @@ impl NetworkConfig {
match self.windows_pipe.as_deref() {
Some(name) => vec![name],
None => vec![
crate::paths::user::WINDOWS_PIPE_NAME.as_str(),
crate::paths::global::WINDOWS_PIPE_NAME.as_str(),
constants::user::WINDOWS_PIPE_NAME.as_str(),
constants::global::WINDOWS_PIPE_NAME.as_str(),
],
}
}

@ -0,0 +1,81 @@
use clap::{Args, ValueEnum};
use distant_core::data::FileType;
use distant_core::data::{SearchQueryOptions, SearchQueryTarget};
use std::collections::HashSet;
pub use distant_core::data::SearchQueryCondition as CliSearchQueryCondition;
/// Options to customize the search results.
#[derive(Args, Clone, Debug, Default, PartialEq, Eq)]
pub struct CliSearchQueryOptions {
/// Restrict search to only these file types (otherwise all are allowed)
#[clap(skip)]
pub allowed_file_types: HashSet<FileType>,
/// Regex to use to filter paths being searched to only those that match the include condition
#[clap(long)]
pub include: Option<CliSearchQueryCondition>,
/// Regex to use to filter paths being searched to only those that do not match the exclude
/// condition
#[clap(long)]
pub exclude: Option<CliSearchQueryCondition>,
/// Search should follow symbolic links
#[clap(long)]
pub follow_symbolic_links: bool,
/// Maximum results to return before stopping the query
#[clap(long)]
pub limit: Option<u64>,
/// Maximum depth (directories) to search
///
/// The smallest depth is 0 and always corresponds to the path given to the new function on
/// this type. Its direct descendents have depth 1, and their descendents have depth 2, and so
/// on.
///
/// Note that this will not simply filter the entries of the iterator, but it will actually
/// avoid descending into directories when the depth is exceeded.
#[clap(long)]
pub max_depth: Option<u64>,
/// Amount of results to batch before sending back excluding final submission that will always
/// include the remaining results even if less than pagination request
#[clap(long)]
pub pagination: Option<u64>,
}
impl From<CliSearchQueryOptions> for SearchQueryOptions {
fn from(x: CliSearchQueryOptions) -> Self {
Self {
allowed_file_types: x.allowed_file_types,
include: x.include,
exclude: x.exclude,
follow_symbolic_links: x.follow_symbolic_links,
limit: x.limit,
max_depth: x.max_depth,
pagination: x.pagination,
}
}
}
/// Kind of data to examine using conditions
#[derive(Copy, Clone, Debug, PartialEq, Eq, ValueEnum)]
#[clap(rename_all = "snake_case")]
pub enum CliSearchQueryTarget {
/// Checks path of file, directory, or symlink
Path,
/// Checks contents of files
Contents,
}
impl From<CliSearchQueryTarget> for SearchQueryTarget {
fn from(x: CliSearchQueryTarget) -> Self {
match x {
CliSearchQueryTarget::Contents => Self::Contents,
CliSearchQueryTarget::Path => Self::Path,
}
}
}

@ -0,0 +1,124 @@
use derive_more::{Display, IsVariant};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::ops::{Deref, DerefMut};
use std::str::FromStr;
/// Represents a value for some CLI option or config. This exists to support optional values that
/// have a default value so we can distinguish if a CLI value was a default or explicitly defined.
#[derive(Copy, Clone, Debug, Display, IsVariant)]
pub enum Value<T> {
/// Value is a default representation.
Default(T),
/// Value is explicitly defined by the user.
Explicit(T),
}
impl<T> Value<T> {
pub fn into_inner(self) -> T {
match self {
Self::Default(x) => x,
Self::Explicit(x) => x,
}
}
}
impl<T> AsRef<T> for Value<T> {
fn as_ref(&self) -> &T {
match self {
Value::Default(x) => x,
Value::Explicit(x) => x,
}
}
}
impl<T> AsMut<T> for Value<T> {
fn as_mut(&mut self) -> &mut T {
match self {
Value::Default(x) => x,
Value::Explicit(x) => x,
}
}
}
impl<T> Deref for Value<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
AsRef::as_ref(self)
}
}
impl<T> DerefMut for Value<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
AsMut::as_mut(self)
}
}
/*
impl<T> Into<T> for Value<T> {
fn into(self) -> T {
match self {
Self::Default(x) => x,
Self::Explicit(x) => x,
}
}
} */
impl<T> PartialEq for Value<T>
where
T: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
AsRef::as_ref(self) == AsRef::as_ref(other)
}
}
impl<T> PartialEq<T> for Value<T>
where
T: PartialEq,
{
fn eq(&self, other: &T) -> bool {
AsRef::as_ref(self) == other
}
}
impl<T> FromStr for Value<T>
where
T: FromStr,
{
type Err = T::Err;
/// Parses `s` into [Value], placing the result into the explicit variant.
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self::Explicit(T::from_str(s)?))
}
}
impl<T> Serialize for Value<T>
where
T: Serialize,
{
/// Serializes the underlying data within [Value]. The origin of the value (default vs
/// explicit) is not stored as config files using serialization are all explicitly set.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
T::serialize(self, serializer)
}
}
impl<'de, T> Deserialize<'de> for Value<T>
where
T: Deserialize<'de>,
{
/// Deserializes into an explicit variant of [Value]. It is assumed that any value coming from
/// a format like a config.toml is explicitly defined and not a default, even though we have a
/// default config.toml available.
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(Self::Explicit(T::deserialize(deserializer)?))
}
}

@ -1,25 +1,18 @@
use crate::paths;
use super::common;
use crate::constants;
use anyhow::Context;
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use std::{
io,
path::{Path, PathBuf},
};
use toml_edit::Document;
use std::path::PathBuf;
mod client;
mod common;
mod generate;
mod manager;
mod network;
mod server;
pub use client::*;
pub use common::*;
pub use generate::*;
pub use manager::*;
pub use network::*;
pub use server::*;
const DEFAULT_RAW_STR: &str = include_str!("config.toml");
@ -53,8 +46,8 @@ impl Config {
}
None => {
let paths = vec![
paths::global::CONFIG_FILE_PATH.as_path(),
paths::user::CONFIG_FILE_PATH.as_path(),
constants::global::CONFIG_FILE_PATH.as_path(),
constants::user::CONFIG_FILE_PATH.as_path(),
];
match (paths[0].exists(), paths[1].exists()) {
@ -77,60 +70,13 @@ impl Config {
}
/// Loads the specified `path` as a [`Config`]
pub async fn load(path: impl AsRef<Path>) -> anyhow::Result<Self> {
#[cfg(test)]
pub async fn load(path: impl AsRef<std::path::Path>) -> anyhow::Result<Self> {
let bytes = tokio::fs::read(path.as_ref())
.await
.with_context(|| format!("Failed to read config file {:?}", path.as_ref()))?;
toml_edit::de::from_slice(&bytes).context("Failed to parse config")
}
/// Like `edit` but will succeed without invoking `f` if the path is not found
pub async fn edit_if_exists(
path: impl AsRef<Path>,
f: impl FnOnce(&mut Document) -> io::Result<()>,
) -> io::Result<()> {
Self::edit(path, f).await.or_else(|x| {
if x.kind() == io::ErrorKind::NotFound {
Ok(())
} else {
Err(x)
}
})
}
/// Loads the specified `path` as a [`Document`], performs changes to the document using `f`,
/// and overwrites the `path` with the updated [`Document`]
pub async fn edit(
path: impl AsRef<Path>,
f: impl FnOnce(&mut Document) -> io::Result<()>,
) -> io::Result<()> {
let mut document = tokio::fs::read_to_string(path.as_ref())
.await?
.parse::<Document>()
.map_err(|x| io::Error::new(io::ErrorKind::InvalidData, x))?;
f(&mut document)?;
tokio::fs::write(path, document.to_string()).await
}
/// Saves the [`Config`] to the specified `path` only if the path points to no file
pub async fn save_if_not_found(&self, path: impl AsRef<Path>) -> io::Result<()> {
use tokio::io::AsyncWriteExt;
let text = toml_edit::ser::to_string_pretty(self)
.map_err(|x| io::Error::new(io::ErrorKind::InvalidData, x))?;
tokio::fs::OpenOptions::new()
.create_new(true)
.open(path)
.await?
.write_all(text.as_bytes())
.await
}
/// Saves the [`Config`] to the specified `path`, overwriting the file if it exists
pub async fn save(&self, path: impl AsRef<Path>) -> io::Result<()> {
let text = toml_edit::ser::to_string_pretty(self)
.map_err(|x| io::Error::new(io::ErrorKind::InvalidData, x))?;
tokio::fs::write(path, text).await
}
}
impl Default for Config {
@ -147,6 +93,7 @@ impl Default for Config {
#[cfg(test)]
mod tests {
use super::*;
use common::*;
use distant_core::net::common::{Host, Map, PortRange};
use distant_core::net::map;
use distant_core::net::server::Shutdown;
@ -161,11 +108,7 @@ mod tests {
config,
Config {
client: ClientConfig {
action: ClientActionConfig { timeout: Some(0.) },
common: CommonConfig {
log_level: Some(LogLevel::Info),
log_file: None
},
api: ClientApiConfig { timeout: Some(0.) },
connect: ClientConnectConfig {
options: Map::new()
},
@ -177,34 +120,33 @@ mod tests {
},
options: Map::new(),
},
network: NetworkConfig {
logging: LoggingSettings {
log_level: Some(LogLevel::Info),
log_file: None
},
network: NetworkSettings {
unix_socket: None,
windows_pipe: None
},
repl: ClientReplConfig { timeout: Some(0.) },
},
generate: GenerateConfig {
common: CommonConfig {
logging: LoggingSettings {
log_level: Some(LogLevel::Info),
log_file: None
},
},
manager: ManagerConfig {
access: Some(AccessControl::Owner),
common: CommonConfig {
logging: LoggingSettings {
log_level: Some(LogLevel::Info),
log_file: None
},
network: NetworkConfig {
network: NetworkSettings {
unix_socket: None,
windows_pipe: None
},
},
server: ServerConfig {
common: CommonConfig {
log_level: Some(LogLevel::Info),
log_file: None
},
listen: ServerListenConfig {
host: Some(BindAddress::Any),
port: Some(0.into()),
@ -212,13 +154,17 @@ mod tests {
shutdown: Some(Shutdown::Never),
current_dir: None,
},
logging: LoggingSettings {
log_level: Some(LogLevel::Info),
log_file: None
},
},
}
);
}
#[test(tokio::test)]
async fn default_should_parse_config_from_specified_file() {
async fn load_should_parse_config_from_specified_file() {
use assert_fs::prelude::*;
let config_file = assert_fs::NamedTempFile::new("config.toml").unwrap();
config_file
@ -230,8 +176,8 @@ log_level = "trace"
unix_socket = "client-unix-socket"
windows_pipe = "client-windows-pipe"
[client.action]
timeout = 123
[client.api]
timeout = 456
[client.connect]
options = "key=\"value\",key2=\"value2\""
@ -242,9 +188,6 @@ bind_server = "any"
args = "a b c"
options = "key3=\"value3\",key4=\"value4\""
[client.repl]
timeout = 456
[generate]
log_file = "generate-log-file"
log_level = "debug"
@ -275,12 +218,8 @@ current_dir = "server-current-dir"
config,
Config {
client: ClientConfig {
action: ClientActionConfig {
timeout: Some(123.)
},
common: CommonConfig {
log_level: Some(LogLevel::Trace),
log_file: Some(PathBuf::from("client-log-file")),
api: ClientApiConfig {
timeout: Some(456.)
},
connect: ClientConnectConfig {
options: map!("key" -> "value", "key2" -> "value2"),
@ -293,36 +232,33 @@ current_dir = "server-current-dir"
},
options: map!("key3" -> "value3", "key4" -> "value4"),
},
network: NetworkConfig {
logging: LoggingSettings {
log_level: Some(LogLevel::Trace),
log_file: Some(PathBuf::from("client-log-file")),
},
network: NetworkSettings {
unix_socket: Some(PathBuf::from("client-unix-socket")),
windows_pipe: Some(String::from("client-windows-pipe"))
},
repl: ClientReplConfig {
timeout: Some(456.)
},
},
generate: GenerateConfig {
common: CommonConfig {
logging: LoggingSettings {
log_level: Some(LogLevel::Debug),
log_file: Some(PathBuf::from("generate-log-file"))
},
},
manager: ManagerConfig {
access: Some(AccessControl::Anyone),
common: CommonConfig {
logging: LoggingSettings {
log_level: Some(LogLevel::Warn),
log_file: Some(PathBuf::from("manager-log-file"))
},
network: NetworkConfig {
network: NetworkSettings {
unix_socket: Some(PathBuf::from("manager-unix-socket")),
windows_pipe: Some(String::from("manager-windows-pipe")),
},
},
server: ServerConfig {
common: CommonConfig {
log_level: Some(LogLevel::Error),
log_file: Some(PathBuf::from("server-log-file")),
},
listen: ServerListenConfig {
host: Some(BindAddress::Host(Host::Ipv4(Ipv4Addr::new(127, 0, 0, 1)))),
port: Some(PortRange {
@ -333,6 +269,10 @@ current_dir = "server-current-dir"
shutdown: Some(Shutdown::After(Duration::from_secs(123))),
current_dir: Some(PathBuf::from("server-current-dir")),
},
logging: LoggingSettings {
log_level: Some(LogLevel::Error),
log_file: Some(PathBuf::from("server-log-file")),
},
},
}
);

@ -15,6 +15,13 @@
# The default setting is info
log_level = "info"
# Configuration related to the client's api command
[client.api]
# Maximum time (in seconds) to wait for a network request before timing out
# where 0 indicates no timeout will occur
timeout = 0
# Alternative unix domain socket to connect to when using a manger (Unix only)
# unix_socket = "path/to/socket"
@ -22,13 +29,6 @@ log_level = "info"
# manager (Windows only)
# windows_pipe = "some_name"
# Configuration related to the client's action command
[client.action]
# Maximum time (in seconds) to wait for a network request before timing out
# where 0 indicates no timeout will occur
timeout = 0
# Configuration related to the client's connect command
[client.connect]
@ -72,13 +72,6 @@ args = ""
# E.g. `key="value",key2="value2"`
options = ""
# Configuration related to the client's repl command
[client.repl]
# Maximum time (in seconds) to wait for a network request before timing out
# where 0 indicates no timeout will occur
timeout = 0
###############################################################################
# All configuration specific to the distant generate option will be found under
# this heading
@ -115,7 +108,7 @@ log_level = "info"
#
# * "owner": equates to `0o600` on Unix (read & write for owner).
# * "group": equates to `0o660` on Unix (read & write for owner and group).
# # "anyone": equates to `0o666` on Unix (read & write for owner, group, and other).
# * "anyone": equates to `0o666` on Unix (read & write for owner, group, and other).
access = "owner"
# Alternative unix domain socket to listen on (Unix only)

@ -1,27 +1,24 @@
use super::{CommonConfig, NetworkConfig};
use super::common::{self, LoggingSettings, NetworkSettings};
use serde::{Deserialize, Serialize};
mod action;
mod api;
mod connect;
mod launch;
mod repl;
pub use action::*;
pub use api::*;
pub use connect::*;
pub use launch::*;
pub use repl::*;
/// Represents configuration settings for the distant client
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct ClientConfig {
#[serde(flatten)]
pub common: CommonConfig,
pub logging: LoggingSettings,
pub action: ClientActionConfig,
#[serde(flatten)]
pub network: NetworkSettings,
pub api: ClientApiConfig,
pub connect: ClientConnectConfig,
pub launch: ClientLaunchConfig,
pub repl: ClientReplConfig,
#[serde(flatten)]
pub network: NetworkConfig,
}

@ -0,0 +1,6 @@
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct ClientApiConfig {
pub timeout: Option<f32>,
}

@ -1,14 +1,8 @@
use clap::Args;
use distant_core::net::common::Map;
use serde::{Deserialize, Serialize};
#[derive(Args, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ClientConnectConfig {
/// Additional options to provide, typically forwarded to the handler within the manager
/// facilitating the connection. Options are key-value pairs separated by comma.
///
/// E.g. `key="value",key2="value2"`
#[clap(long, default_value_t)]
pub options: Map,
}

@ -0,0 +1,54 @@
use super::common::BindAddress;
use distant_core::net::common::Map;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ClientLaunchConfig {
#[serde(flatten)]
pub distant: ClientLaunchDistantConfig,
pub options: Map,
}
impl From<Map> for ClientLaunchConfig {
fn from(mut map: Map) -> Self {
Self {
distant: ClientLaunchDistantConfig {
bin: map.remove("distant.bin"),
bind_server: map
.remove("distant.bind_server")
.and_then(|x| x.parse::<BindAddress>().ok()),
args: map.remove("distant.args"),
},
options: map,
}
}
}
impl From<ClientLaunchConfig> for Map {
fn from(config: ClientLaunchConfig) -> Self {
let mut this = Self::new();
if let Some(x) = config.distant.bin {
this.insert("distant.bin".to_string(), x);
}
if let Some(x) = config.distant.bind_server {
this.insert("distant.bind_server".to_string(), x.to_string());
}
if let Some(x) = config.distant.args {
this.insert("distant.args".to_string(), x);
}
this.extend(config.options);
this
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ClientLaunchDistantConfig {
pub bin: Option<String>,
pub bind_server: Option<BindAddress>,
pub args: Option<String>,
}

@ -1,9 +1,9 @@
use super::CommonConfig;
use super::common::LoggingSettings;
use serde::{Deserialize, Serialize};
/// Represents configuration settings for the distant generate
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct GenerateConfig {
#[serde(flatten)]
pub common: CommonConfig,
pub logging: LoggingSettings,
}

@ -0,0 +1,14 @@
use super::common::{AccessControl, LoggingSettings, NetworkSettings};
use serde::{Deserialize, Serialize};
/// Represents configuration settings for the distant manager
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ManagerConfig {
#[serde(flatten)]
pub logging: LoggingSettings,
#[serde(flatten)]
pub network: NetworkSettings,
pub access: Option<AccessControl>,
}

@ -1,4 +1,4 @@
use super::CommonConfig;
use super::common::LoggingSettings;
use serde::{Deserialize, Serialize};
mod listen;
@ -8,7 +8,7 @@ pub use listen::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ServerConfig {
#[serde(flatten)]
pub common: CommonConfig,
pub logging: LoggingSettings,
pub listen: ServerListenConfig,
}

@ -0,0 +1,61 @@
use crate::options::BindAddress;
use distant_core::net::common::{Map, PortRange};
use distant_core::net::server::Shutdown;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ServerListenConfig {
pub host: Option<BindAddress>,
pub port: Option<PortRange>,
pub use_ipv6: bool,
pub shutdown: Option<Shutdown>,
pub current_dir: Option<PathBuf>,
}
impl From<Map> for ServerListenConfig {
fn from(mut map: Map) -> Self {
Self {
host: map
.remove("host")
.and_then(|x| x.parse::<BindAddress>().ok()),
port: map.remove("port").and_then(|x| x.parse::<PortRange>().ok()),
use_ipv6: map
.remove("use_ipv6")
.and_then(|x| x.parse::<bool>().ok())
.unwrap_or_default(),
shutdown: map
.remove("shutdown")
.and_then(|x| x.parse::<Shutdown>().ok()),
current_dir: map
.remove("current_dir")
.and_then(|x| x.parse::<PathBuf>().ok()),
}
}
}
impl From<ServerListenConfig> for Map {
fn from(config: ServerListenConfig) -> Self {
let mut this = Self::new();
if let Some(x) = config.host {
this.insert("host".to_string(), x.to_string());
}
if let Some(x) = config.port {
this.insert("port".to_string(), x.to_string());
}
this.insert("use_ipv6".to_string(), config.use_ipv6.to_string());
if let Some(x) = config.shutdown {
this.insert("shutdown".to_string(), x.to_string());
}
if let Some(x) = config.current_dir {
this.insert("current_dir".to_string(), x.to_string_lossy().to_string());
}
this
}
}

@ -1,107 +0,0 @@
use directories::ProjectDirs;
use once_cell::sync::Lazy;
use std::path::PathBuf;
const SOCKET_FILE_STR: &str = "distant.sock";
/// User-oriented paths
pub mod user {
use super::*;
/// Root project directory used to calculate other paths
static PROJECT_DIR: Lazy<ProjectDirs> = Lazy::new(|| {
ProjectDirs::from("", "", "distant").expect("Could not determine valid $HOME path")
});
/// Path to configuration settings for distant client/manager/server
pub static CONFIG_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.config_dir().join("config.toml"));
/// Path to cache file used for arbitrary CLI data
pub static CACHE_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.cache_dir().join("cache.toml"));
pub static CACHE_FILE_PATH_STR: Lazy<String> =
Lazy::new(|| CACHE_FILE_PATH.to_string_lossy().to_string());
/// Path to log file for distant client
pub static CLIENT_LOG_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.cache_dir().join("client.log"));
/// Path to log file for distant manager
pub static MANAGER_LOG_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.cache_dir().join("manager.log"));
/// Path to log file for distant server
pub static SERVER_LOG_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.cache_dir().join("server.log"));
/// Path to log file for distant generate
pub static GENERATE_LOG_FILE_PATH: Lazy<PathBuf> =
Lazy::new(|| PROJECT_DIR.cache_dir().join("generate.log"));
/// For Linux & BSD, this uses the runtime path. For Mac, this uses the tmp path
///
/// * `/run/user/1001/distant/{user}.distant.sock` on Linux
/// * `/var/run/{user}.distant.sock` on BSD
/// * `/tmp/{user}.distant.dock` on MacOS
pub static UNIX_SOCKET_PATH: Lazy<PathBuf> = Lazy::new(|| {
// Form of {user}.distant.sock
let mut file_name = whoami::username_os();
file_name.push(".");
file_name.push(SOCKET_FILE_STR);
PROJECT_DIR
.runtime_dir()
.map(std::path::Path::to_path_buf)
.unwrap_or_else(std::env::temp_dir)
.join(file_name)
});
/// Name of the pipe used by Windows in the form of `{user}.distant`
pub static WINDOWS_PIPE_NAME: Lazy<String> =
Lazy::new(|| format!("{}.distant", whoami::username()));
}
/// Global paths
pub mod global {
use super::*;
/// Windows ProgramData directory from from the %ProgramData% environment variable
#[cfg(windows)]
static PROGRAM_DATA_DIR: Lazy<PathBuf> = Lazy::new(|| {
PathBuf::from(std::env::var("ProgramData").expect("Could not determine %ProgramData%"))
});
#[cfg(windows)]
static CONFIG_DIR: Lazy<PathBuf> = Lazy::new(|| PROGRAM_DATA_DIR.join("distant"));
#[cfg(unix)]
static CONFIG_DIR: Lazy<PathBuf> = Lazy::new(|| PathBuf::from("/etc").join("distant"));
/// Path to configuration settings for distant client/manager/server
pub static CONFIG_FILE_PATH: Lazy<PathBuf> = Lazy::new(|| CONFIG_DIR.join("config.toml"));
/// For Linux & BSD, this uses the runtime path. For Mac, this uses the tmp path
///
/// * `/run/distant.sock` on Linux
/// * `/var/run/distant.sock` on BSD
/// * `/tmp/distant.dock` on MacOS
pub static UNIX_SOCKET_PATH: Lazy<PathBuf> = Lazy::new(|| {
if cfg!(target_os = "macos") {
std::env::temp_dir().join(SOCKET_FILE_STR)
} else if cfg!(any(
target_os = "freebsd",
target_os = "dragonfly",
target_os = "openbsd",
target_os = "netbsd"
)) {
PathBuf::from("/var").join("run").join(SOCKET_FILE_STR)
} else {
PathBuf::from("/run").join(SOCKET_FILE_STR)
}
});
/// Name of the pipe used by Windows
pub static WINDOWS_PIPE_NAME: Lazy<String> = Lazy::new(|| "distant".to_string());
}

@ -1,65 +0,0 @@
use crate::cli::{fixtures::*, utils::FAILURE_LINE};
use assert_cmd::Command;
use assert_fs::prelude::*;
use rstest::*;
const FILE_CONTENTS: &str = r#"
some text
on multiple lines
that is a file's contents
"#;
const APPENDED_FILE_CONTENTS: &str = r#"
even more
file contents
"#;
#[rstest]
#[test_log::test]
fn should_report_ok_when_done(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
file.write_str(FILE_CONTENTS).unwrap();
// distant action file-append {path} -- {contents}
action_cmd
.args([
"file-append",
file.to_str().unwrap(),
"--",
APPENDED_FILE_CONTENTS,
])
.assert()
.success()
.stdout("")
.stderr("");
// NOTE: We wait a little bit to give the OS time to fully write to file
std::thread::sleep(std::time::Duration::from_millis(100));
// Because we're talking to a local server, we can verify locally
file.assert(format!("{}{}", FILE_CONTENTS, APPENDED_FILE_CONTENTS));
}
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-dir").child("missing-file");
// distant action file-append {path} -- {contents}
action_cmd
.args([
"file-append",
file.to_str().unwrap(),
"--",
APPENDED_FILE_CONTENTS,
])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
// Because we're talking to a local server, we can verify locally
file.assert(predicates::path::missing());
}

@ -1,65 +0,0 @@
use crate::cli::{fixtures::*, utils::FAILURE_LINE};
use assert_cmd::Command;
use assert_fs::prelude::*;
use rstest::*;
const FILE_CONTENTS: &str = r#"
some text
on multiple lines
that is a file's contents
"#;
const APPENDED_FILE_CONTENTS: &str = r#"
even more
file contents
"#;
#[rstest]
#[test_log::test]
fn should_report_ok_when_done(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
file.write_str(FILE_CONTENTS).unwrap();
// distant action file-append-text {path} -- {contents}
action_cmd
.args([
"file-append-text",
file.to_str().unwrap(),
"--",
APPENDED_FILE_CONTENTS,
])
.assert()
.success()
.stdout("")
.stderr("");
// NOTE: We wait a little bit to give the OS time to fully write to file
std::thread::sleep(std::time::Duration::from_millis(100));
// Because we're talking to a local server, we can verify locally
file.assert(format!("{}{}", FILE_CONTENTS, APPENDED_FILE_CONTENTS));
}
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-dir").child("missing-file");
// distant action file-append-text {path} -- {contents}
action_cmd
.args([
"file-append-text",
file.to_str().unwrap(),
"--",
APPENDED_FILE_CONTENTS,
])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
// Because we're talking to a local server, we can verify locally
file.assert(predicates::path::missing());
}

@ -1,41 +0,0 @@
use crate::cli::{fixtures::*, utils::FAILURE_LINE};
use assert_cmd::Command;
use assert_fs::prelude::*;
use rstest::*;
const FILE_CONTENTS: &str = r#"
some text
on multiple lines
that is a file's contents
"#;
#[rstest]
#[test_log::test]
fn should_print_out_file_contents(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
file.write_str(FILE_CONTENTS).unwrap();
// distant action file-read {path}
action_cmd
.args(["file-read", file.to_str().unwrap()])
.assert()
.success()
.stdout(format!("{}\n", FILE_CONTENTS))
.stderr("");
}
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-file");
// distant action file-read {path}
action_cmd
.args(["file-read", file.to_str().unwrap()])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
}

@ -1,41 +0,0 @@
use crate::cli::{fixtures::*, utils::FAILURE_LINE};
use assert_cmd::Command;
use assert_fs::prelude::*;
use rstest::*;
const FILE_CONTENTS: &str = r#"
some text
on multiple lines
that is a file's contents
"#;
#[rstest]
#[test_log::test]
fn should_print_out_file_contents(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
file.write_str(FILE_CONTENTS).unwrap();
// distant action file-read-text {path}
action_cmd
.args(["file-read-text", file.to_str().unwrap()])
.assert()
.success()
.stdout(format!("{}\n", FILE_CONTENTS))
.stderr("");
}
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-file");
// distant action file-read-text {path}
action_cmd
.args(["file-read-text", file.to_str().unwrap()])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
}

@ -1,49 +0,0 @@
use crate::cli::{fixtures::*, utils::FAILURE_LINE};
use assert_cmd::Command;
use assert_fs::prelude::*;
use rstest::*;
const FILE_CONTENTS: &str = r#"
some text
on multiple lines
that is a file's contents
"#;
#[rstest]
#[test_log::test]
fn should_report_ok_when_done(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
// distant action file-write {path} -- {contents}
action_cmd
.args(["file-write", file.to_str().unwrap(), "--", FILE_CONTENTS])
.assert()
.success()
.stdout("")
.stderr("");
// NOTE: We wait a little bit to give the OS time to fully write to file
std::thread::sleep(std::time::Duration::from_millis(100));
// Because we're talking to a local server, we can verify locally
file.assert(FILE_CONTENTS);
}
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-dir").child("missing-file");
// distant action file-write {path} -- {contents}
action_cmd
.args(["file-write", file.to_str().unwrap(), "--", FILE_CONTENTS])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
// Because we're talking to a local server, we can verify locally
file.assert(predicates::path::missing());
}

@ -1,59 +0,0 @@
use crate::cli::{fixtures::*, utils::FAILURE_LINE};
use assert_cmd::Command;
use assert_fs::prelude::*;
use rstest::*;
const FILE_CONTENTS: &str = r#"
some text
on multiple lines
that is a file's contents
"#;
#[rstest]
#[test_log::test]
fn should_report_ok_when_done(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
// distant action file-write-text {path} -- {contents}
action_cmd
.args([
"file-write-text",
file.to_str().unwrap(),
"--",
FILE_CONTENTS,
])
.assert()
.success()
.stdout("")
.stderr("");
// NOTE: We wait a little bit to give the OS time to fully write to file
std::thread::sleep(std::time::Duration::from_millis(100));
// Because we're talking to a local server, we can verify locally
file.assert(FILE_CONTENTS);
}
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-dir").child("missing-file");
// distant action file-write {path} -- {contents}
action_cmd
.args([
"file-write-text",
file.to_str().unwrap(),
"--",
FILE_CONTENTS,
])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
// Because we're talking to a local server, we can verify locally
file.assert(predicates::path::missing());
}

@ -6,8 +6,8 @@ use test_log::test;
#[rstest]
#[test(tokio::test)]
async fn should_support_json_capabilities(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_capabilities(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let id = rand::random::<u64>().to_string();
let req = json!({
@ -15,7 +15,7 @@ async fn should_support_json_capabilities(mut json_repl: CtxCommand<Repl>) {
"payload": { "type": "capabilities" },
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "capabilities", "JSON: {res}");

@ -13,8 +13,8 @@ that is a file's contents
#[rstest]
#[test(tokio::test)]
async fn should_support_json_copying_file(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_copying_file(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -33,7 +33,7 @@ async fn should_support_json_copying_file(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -50,8 +50,8 @@ async fn should_support_json_copying_file(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_copying_nonempty_directory(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_copying_nonempty_directory(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -74,7 +74,7 @@ async fn should_support_json_copying_nonempty_directory(mut json_repl: CtxComman
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -91,8 +91,8 @@ async fn should_support_json_copying_nonempty_directory(mut json_repl: CtxComman
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -109,7 +109,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -7,8 +7,8 @@ use test_log::test;
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let dir = temp.child("dir");
@ -23,7 +23,7 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -41,9 +41,9 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_creating_missing_parent_directories_if_specified(
mut json_repl: CtxCommand<Repl>,
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut json_repl).await;
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let dir = temp.child("dir1").child("dir2");
@ -58,7 +58,7 @@ async fn should_support_json_creating_missing_parent_directories_if_specified(
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -75,8 +75,8 @@ async fn should_support_json_creating_missing_parent_directories_if_specified(
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let dir = temp.child("missing-dir").child("dir");
@ -91,7 +91,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -71,8 +71,8 @@ fn make_directory() -> assert_fs::TempDir {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = make_directory();
@ -89,7 +89,7 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -111,9 +111,9 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_returning_absolute_paths_if_specified(
mut json_repl: CtxCommand<Repl>,
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut json_repl).await;
validate_authentication(&mut api_process).await;
let temp = make_directory();
@ -134,7 +134,7 @@ async fn should_support_json_returning_absolute_paths_if_specified(
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -156,9 +156,9 @@ async fn should_support_json_returning_absolute_paths_if_specified(
#[rstest]
#[test(tokio::test)]
async fn should_support_json_returning_all_files_and_directories_if_depth_is_0(
mut json_repl: CtxCommand<Repl>,
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut json_repl).await;
validate_authentication(&mut api_process).await;
let temp = make_directory();
@ -175,7 +175,7 @@ async fn should_support_json_returning_all_files_and_directories_if_depth_is_0(
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -207,9 +207,9 @@ async fn should_support_json_returning_all_files_and_directories_if_depth_is_0(
#[rstest]
#[test(tokio::test)]
async fn should_support_json_including_root_directory_if_specified(
mut json_repl: CtxCommand<Repl>,
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut json_repl).await;
validate_authentication(&mut api_process).await;
let temp = make_directory();
@ -230,7 +230,7 @@ async fn should_support_json_including_root_directory_if_specified(
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -252,8 +252,8 @@ async fn should_support_json_including_root_directory_if_specified(
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = make_directory();
let dir = temp.child("missing-dir");
@ -271,7 +271,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -6,8 +6,8 @@ use test_log::test;
#[rstest]
#[test(tokio::test)]
async fn should_support_json_true_if_exists(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_true_if_exists(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -24,7 +24,7 @@ async fn should_support_json_true_if_exists(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -39,8 +39,8 @@ async fn should_support_json_true_if_exists(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_false_if_not_exists(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_false_if_not_exists(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -56,7 +56,7 @@ async fn should_support_json_false_if_not_exists(mut json_repl: CtxCommand<Repl>
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(

@ -17,8 +17,8 @@ file contents
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
@ -34,7 +34,7 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -54,8 +54,8 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-dir").child("missing-file");
@ -70,7 +70,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -17,8 +17,8 @@ file contents
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
@ -34,7 +34,7 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -54,8 +54,8 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-dir").child("missing-file");
@ -70,7 +70,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -12,8 +12,8 @@ that is a file's contents
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
@ -28,7 +28,7 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -43,8 +43,8 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-file");
@ -58,7 +58,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -12,8 +12,8 @@ that is a file's contents
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
@ -28,7 +28,7 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -43,8 +43,8 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-file");
@ -58,7 +58,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -12,8 +12,8 @@ that is a file's contents
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
@ -28,7 +28,7 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -48,8 +48,8 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-dir").child("missing-file");
@ -64,7 +64,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -12,8 +12,8 @@ that is a file's contents
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
@ -28,7 +28,7 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -48,8 +48,8 @@ async fn should_support_json_output(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-dir").child("missing-file");
@ -64,7 +64,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -12,8 +12,8 @@ that is a file's contents
#[rstest]
#[test(tokio::test)]
async fn should_support_json_metadata_for_file(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_metadata_for_file(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -31,7 +31,7 @@ async fn should_support_json_metadata_for_file(mut json_repl: CtxCommand<Repl>)
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "metadata", "JSON: {res}");
@ -46,8 +46,8 @@ async fn should_support_json_metadata_for_file(mut json_repl: CtxCommand<Repl>)
#[rstest]
#[test(tokio::test)]
async fn should_support_json_metadata_for_directory(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_metadata_for_directory(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -65,7 +65,7 @@ async fn should_support_json_metadata_for_directory(mut json_repl: CtxCommand<Re
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "metadata", "JSON: {res}");
@ -81,9 +81,9 @@ async fn should_support_json_metadata_for_directory(mut json_repl: CtxCommand<Re
#[rstest]
#[test(tokio::test)]
async fn should_support_json_metadata_for_including_a_canonicalized_path(
mut json_repl: CtxCommand<Repl>,
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut json_repl).await;
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -104,7 +104,7 @@ async fn should_support_json_metadata_for_including_a_canonicalized_path(
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "metadata", "JSON: {res}");
@ -120,9 +120,9 @@ async fn should_support_json_metadata_for_including_a_canonicalized_path(
#[rstest]
#[test(tokio::test)]
async fn should_support_json_metadata_for_resolving_file_type_of_symlink(
mut json_repl: CtxCommand<Repl>,
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut json_repl).await;
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -143,7 +143,7 @@ async fn should_support_json_metadata_for_resolving_file_type_of_symlink(
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "metadata", "JSON: {res}");
@ -152,8 +152,8 @@ async fn should_support_json_metadata_for_resolving_file_type_of_symlink(
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -171,7 +171,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -72,9 +72,9 @@ fn check_value_as_str(value: &serde_json::Value, other: &str) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_to_execute_program_and_return_exit_status(
mut json_repl: CtxCommand<Repl>,
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut json_repl).await;
validate_authentication(&mut api_process).await;
let cmd = make_cmd(vec![ECHO_ARGS_TO_STDOUT.to_str().unwrap()]);
@ -88,7 +88,7 @@ async fn should_support_json_to_execute_program_and_return_exit_status(
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "proc_spawned", "JSON: {res}");
@ -96,8 +96,8 @@ async fn should_support_json_to_execute_program_and_return_exit_status(
#[rstest]
#[test(tokio::test)]
async fn should_support_json_to_capture_and_print_stdout(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_to_capture_and_print_stdout(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let cmd = make_cmd(vec![ECHO_ARGS_TO_STDOUT.to_str().unwrap(), "some output"]);
@ -112,20 +112,20 @@ async fn should_support_json_to_capture_and_print_stdout(mut json_repl: CtxComma
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], origin_id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "proc_spawned", "JSON: {res}");
// Wait for output to show up (for stderr)
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], origin_id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "proc_stdout", "JSON: {res}");
check_value_as_str(&res["payload"]["data"], "some output");
// Now we wait for the process to complete
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], origin_id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "proc_done", "JSON: {res}");
@ -134,8 +134,8 @@ async fn should_support_json_to_capture_and_print_stdout(mut json_repl: CtxComma
#[rstest]
#[test(tokio::test)]
async fn should_support_json_to_capture_and_print_stderr(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_to_capture_and_print_stderr(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let cmd = make_cmd(vec![ECHO_ARGS_TO_STDERR.to_str().unwrap(), "some output"]);
@ -150,20 +150,20 @@ async fn should_support_json_to_capture_and_print_stderr(mut json_repl: CtxComma
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], origin_id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "proc_spawned", "JSON: {res}");
// Wait for output to show up (for stderr)
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], origin_id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "proc_stderr", "JSON: {res}");
check_value_as_str(&res["payload"]["data"], "some output");
// Now we wait for the process to complete
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], origin_id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "proc_done", "JSON: {res}");
@ -172,8 +172,10 @@ async fn should_support_json_to_capture_and_print_stderr(mut json_repl: CtxComma
#[rstest]
#[test(tokio::test)]
async fn should_support_json_to_forward_stdin_to_remote_process(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_to_forward_stdin_to_remote_process(
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut api_process).await;
let cmd = make_cmd(vec![ECHO_STDIN_TO_STDOUT.to_str().unwrap()]);
@ -188,7 +190,7 @@ async fn should_support_json_to_forward_stdin_to_remote_process(mut json_repl: C
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], origin_id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "proc_spawned", "JSON: {res}");
@ -207,12 +209,12 @@ async fn should_support_json_to_forward_stdin_to_remote_process(mut json_repl: C
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "ok", "JSON: {res}");
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], origin_id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "proc_stdout", "JSON: {res}");
@ -220,7 +222,7 @@ async fn should_support_json_to_forward_stdin_to_remote_process(mut json_repl: C
// Now kill the process and wait for it to complete
let id = rand::random::<u64>().to_string();
let res = json_repl
let res = api_process
.write_and_read_json(json!({
"id": id,
"payload": {
@ -237,7 +239,7 @@ async fn should_support_json_to_forward_stdin_to_remote_process(mut json_repl: C
//
// NOTE: The above is a situation in Windows, but I've not seen it happen with Mac/Linux.
if res["payload"]["type"] == "ok" {
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(
res["payload"]["type"], "proc_done",
"Did not receive proc_done from killed process: {res}"
@ -249,8 +251,8 @@ async fn should_support_json_to_forward_stdin_to_remote_process(mut json_repl: C
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let id = rand::random::<u64>().to_string();
let req = json!({
@ -262,7 +264,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -7,8 +7,8 @@ use test_log::test;
#[rstest]
#[test(tokio::test)]
async fn should_support_json_removing_file(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_removing_file(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -25,7 +25,7 @@ async fn should_support_json_removing_file(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -41,8 +41,8 @@ async fn should_support_json_removing_file(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_removing_empty_directory(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_removing_empty_directory(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -60,7 +60,7 @@ async fn should_support_json_removing_empty_directory(mut json_repl: CtxCommand<
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -77,9 +77,9 @@ async fn should_support_json_removing_empty_directory(mut json_repl: CtxCommand<
#[rstest]
#[test(tokio::test)]
async fn should_support_json_removing_nonempty_directory_if_force_specified(
mut json_repl: CtxCommand<Repl>,
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut json_repl).await;
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -97,7 +97,7 @@ async fn should_support_json_removing_nonempty_directory_if_force_specified(
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -113,8 +113,8 @@ async fn should_support_json_removing_nonempty_directory_if_force_specified(
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -133,7 +133,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -13,8 +13,8 @@ that is a file's contents
#[rstest]
#[test(tokio::test)]
async fn should_support_json_renaming_file(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_renaming_file(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -33,7 +33,7 @@ async fn should_support_json_renaming_file(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -50,8 +50,8 @@ async fn should_support_json_renaming_file(mut json_repl: CtxCommand<Repl>) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_renaming_nonempty_directory(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_renaming_nonempty_directory(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -74,7 +74,7 @@ async fn should_support_json_renaming_nonempty_directory(mut json_repl: CtxComma
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -94,8 +94,8 @@ async fn should_support_json_renaming_nonempty_directory(mut json_repl: CtxComma
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -112,7 +112,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "error", "JSON: {res}");

@ -6,8 +6,10 @@ use test_log::test;
#[rstest]
#[test(tokio::test)]
async fn should_support_json_search_filesystem_using_query(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_search_filesystem_using_query(
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut api_process).await;
let root = assert_fs::TempDir::new().unwrap();
root.child("file1.txt").write_str("some file text").unwrap();
@ -30,7 +32,7 @@ async fn should_support_json_search_filesystem_using_query(mut json_repl: CtxCom
});
// Submit search request and get back started confirmation
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
// Get id from started confirmation
assert_eq!(res["origin_id"], id, "JSON: {res}");
@ -40,7 +42,7 @@ async fn should_support_json_search_filesystem_using_query(mut json_repl: CtxCom
.expect("id missing or not number");
// Get search results back
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
res["payload"],
@ -74,7 +76,7 @@ async fn should_support_json_search_filesystem_using_query(mut json_repl: CtxCom
);
// Get search completion confirmation
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
res["payload"],

@ -6,8 +6,8 @@ use test_log::test;
#[rstest]
#[test(tokio::test)]
async fn should_support_json_system_info(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_system_info(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let id = rand::random::<u64>().to_string();
let req = json!({
@ -15,7 +15,7 @@ async fn should_support_json_system_info(mut json_repl: CtxCommand<Repl>) {
"payload": { "type": "system_info" },
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(

@ -19,8 +19,8 @@ async fn wait_millis(millis: u64) {
#[rstest]
#[test(tokio::test)]
async fn should_support_json_watching_single_file(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_watching_single_file(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -37,7 +37,7 @@ async fn should_support_json_watching_single_file(mut json_repl: CtxCommand<Repl
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -56,7 +56,7 @@ async fn should_support_json_watching_single_file(mut json_repl: CtxCommand<Repl
// Get the response and verify the change
// NOTE: Don't bother checking the kind as it can vary by platform
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "changed", "JSON: {res}");
@ -69,8 +69,10 @@ async fn should_support_json_watching_single_file(mut json_repl: CtxCommand<Repl
#[rstest]
#[test(tokio::test)]
async fn should_support_json_watching_directory_recursively(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_watching_directory_recursively(
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -91,7 +93,7 @@ async fn should_support_json_watching_directory_recursively(mut json_repl: CtxCo
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(
@ -112,7 +114,7 @@ async fn should_support_json_watching_directory_recursively(mut json_repl: CtxCo
// Get the response and verify the change
// NOTE: Don't bother checking the kind as it can vary by platform
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "changed", "JSON: {res}");
@ -128,7 +130,7 @@ async fn should_support_json_watching_directory_recursively(mut json_repl: CtxCo
// Get the response and verify the change
// NOTE: Don't bother checking the kind as it can vary by platform
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "changed", "JSON: {res}");
@ -142,9 +144,9 @@ async fn should_support_json_watching_directory_recursively(mut json_repl: CtxCo
#[rstest]
#[test(tokio::test)]
async fn should_support_json_reporting_changes_using_correct_request_id(
mut json_repl: CtxCommand<Repl>,
mut api_process: CtxCommand<ApiProcess>,
) {
validate_authentication(&mut json_repl).await;
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
@ -164,7 +166,7 @@ async fn should_support_json_reporting_changes_using_correct_request_id(
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id_1, "JSON: {res}");
assert_eq!(
@ -185,7 +187,7 @@ async fn should_support_json_reporting_changes_using_correct_request_id(
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id_2, "JSON: {res}");
assert_eq!(
@ -204,7 +206,7 @@ async fn should_support_json_reporting_changes_using_correct_request_id(
// Get the response and verify the change
// NOTE: Don't bother checking the kind as it can vary by platform
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], id_1, "JSON: {res}");
assert_eq!(res["payload"]["type"], "changed", "JSON: {res}");
@ -219,7 +221,7 @@ async fn should_support_json_reporting_changes_using_correct_request_id(
// Sleep a bit to give time to get all changes happening
wait_a_bit().await;
if json_repl
if api_process
.try_read_line_from_stdout()
.expect("stdout closed unexpectedly")
.is_none()
@ -236,7 +238,7 @@ async fn should_support_json_reporting_changes_using_correct_request_id(
// Get the response and verify the change
// NOTE: Don't bother checking the kind as it can vary by platform
let res = json_repl.read_json_from_stdout().await.unwrap().unwrap();
let res = api_process.read_json_from_stdout().await.unwrap().unwrap();
assert_eq!(res["origin_id"], id_2, "JSON: {res}");
assert_eq!(res["payload"]["type"], "changed", "JSON: {res}");
@ -249,8 +251,8 @@ async fn should_support_json_reporting_changes_using_correct_request_id(
#[rstest]
#[test(tokio::test)]
async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
validate_authentication(&mut json_repl).await;
async fn should_support_json_output_for_error(mut api_process: CtxCommand<ApiProcess>) {
validate_authentication(&mut api_process).await;
let temp = assert_fs::TempDir::new().unwrap();
let path = temp.to_path_buf().join("missing");
@ -265,7 +267,7 @@ async fn should_support_json_output_for_error(mut json_repl: CtxCommand<Repl>) {
},
});
let res = json_repl.write_and_read_json(req).await.unwrap().unwrap();
let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
// Ensure we got an acknowledgement of watching that failed
assert_eq!(res["origin_id"], id, "JSON: {res}");

@ -1,5 +1,4 @@
use crate::cli::fixtures::*;
use assert_cmd::Command;
use indoc::indoc;
use rstest::*;
@ -57,10 +56,8 @@ const EXPECTED_TABLE: &str = indoc! {"
#[rstest]
#[test_log::test]
fn should_output_capabilities(mut action_cmd: CtxCommand<Command>) {
// distant action capabilities
action_cmd
.arg("capabilities")
fn should_output_capabilities(ctx: DistantManagerCtx) {
ctx.cmd("capabilities")
.assert()
.success()
.stdout(EXPECTED_TABLE)

@ -1,5 +1,4 @@
use crate::cli::{fixtures::*, utils::FAILURE_LINE};
use assert_cmd::Command;
use crate::cli::fixtures::*;
use assert_fs::prelude::*;
use predicates::prelude::*;
use rstest::*;
@ -12,7 +11,7 @@ that is a file's contents
#[rstest]
#[test_log::test]
fn should_support_copying_file(mut action_cmd: CtxCommand<Command>) {
fn should_support_copying_file(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let src = temp.child("file");
@ -20,9 +19,9 @@ fn should_support_copying_file(mut action_cmd: CtxCommand<Command>) {
let dst = temp.child("file2");
// distant action copy {src} {dst}
action_cmd
.args(["copy", src.to_str().unwrap(), dst.to_str().unwrap()])
// distant fs copy {src} {dst}
ctx.new_assert_cmd(["fs", "copy"])
.args([src.to_str().unwrap(), dst.to_str().unwrap()])
.assert()
.success()
.stdout("")
@ -34,7 +33,7 @@ fn should_support_copying_file(mut action_cmd: CtxCommand<Command>) {
#[rstest]
#[test_log::test]
fn should_support_copying_nonempty_directory(mut action_cmd: CtxCommand<Command>) {
fn should_support_copying_nonempty_directory(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
// Make a non-empty directory
@ -46,9 +45,9 @@ fn should_support_copying_nonempty_directory(mut action_cmd: CtxCommand<Command>
let dst = temp.child("dir2");
let dst_file = dst.child("file");
// distant action copy {src} {dst}
action_cmd
.args(["copy", src.to_str().unwrap(), dst.to_str().unwrap()])
// distant fs copy {src} {dst}
ctx.new_assert_cmd(["fs", "copy"])
.args([src.to_str().unwrap(), dst.to_str().unwrap()])
.assert()
.success()
.stdout("")
@ -60,19 +59,19 @@ fn should_support_copying_nonempty_directory(mut action_cmd: CtxCommand<Command>
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
fn yield_an_error_when_fails(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let src = temp.child("dir");
let dst = temp.child("dir2");
// distant action copy {src} {dst}
action_cmd
.args(["copy", src.to_str().unwrap(), dst.to_str().unwrap()])
// distant fs copy {src} {dst}
ctx.new_assert_cmd(["fs", "copy"])
.args([src.to_str().unwrap(), dst.to_str().unwrap()])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
.stderr(predicates::str::is_empty().not());
src.assert(predicate::path::missing());
dst.assert(predicate::path::missing());

@ -1,20 +1,19 @@
use crate::cli::fixtures::*;
use assert_cmd::Command;
use assert_fs::prelude::*;
use rstest::*;
#[rstest]
#[test_log::test]
fn should_output_true_if_exists(mut action_cmd: CtxCommand<Command>) {
fn should_output_true_if_exists(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
// Create file
let file = temp.child("file");
file.touch().unwrap();
// distant action exists {path}
action_cmd
.args(["exists", file.to_str().unwrap()])
// distant fs exists {path}
ctx.new_assert_cmd(["fs", "exists"])
.arg(file.to_str().unwrap())
.assert()
.success()
.stdout("true\n")
@ -23,15 +22,15 @@ fn should_output_true_if_exists(mut action_cmd: CtxCommand<Command>) {
#[rstest]
#[test_log::test]
fn should_output_false_if_not_exists(mut action_cmd: CtxCommand<Command>) {
fn should_output_false_if_not_exists(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
// Don't create file
let file = temp.child("file");
// distant action exists {path}
action_cmd
.args(["exists", file.to_str().unwrap()])
// distant fs exists {path}
ctx.new_assert_cmd(["fs", "exists"])
.arg(file.to_str().unwrap())
.assert()
.success()
.stdout("false\n")

@ -1,18 +1,17 @@
use crate::cli::{fixtures::*, utils::FAILURE_LINE};
use assert_cmd::Command;
use crate::cli::fixtures::*;
use assert_fs::prelude::*;
use predicates::prelude::*;
use rstest::*;
#[rstest]
#[test_log::test]
fn should_report_ok_when_done(mut action_cmd: CtxCommand<Command>) {
fn should_report_ok_when_done(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let dir = temp.child("dir");
// distant action dir-create {path}
action_cmd
.args(["dir-create", dir.to_str().unwrap()])
ctx.new_assert_cmd(["fs", "make-dir"])
.args([dir.to_str().unwrap()])
.assert()
.success()
.stdout("")
@ -24,15 +23,13 @@ fn should_report_ok_when_done(mut action_cmd: CtxCommand<Command>) {
#[rstest]
#[test_log::test]
fn should_support_creating_missing_parent_directories_if_specified(
mut action_cmd: CtxCommand<Command>,
) {
fn should_support_creating_missing_parent_directories_if_specified(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let dir = temp.child("dir1").child("dir2");
// distant action dir-create {path}
action_cmd
.args(["dir-create", "--all", dir.to_str().unwrap()])
ctx.new_assert_cmd(["fs", "make-dir"])
.args(["--all", dir.to_str().unwrap()])
.assert()
.success()
.stdout("")
@ -44,17 +41,17 @@ fn should_support_creating_missing_parent_directories_if_specified(
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
fn yield_an_error_when_fails(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let dir = temp.child("missing-dir").child("dir");
// distant action dir-create {path}
action_cmd
.args(["dir-create", dir.to_str().unwrap()])
ctx.new_assert_cmd(["fs", "make-dir"])
.args([dir.to_str().unwrap()])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
.stderr(predicates::str::is_empty().not());
dir.assert(predicate::path::missing());
}

@ -1,9 +1,6 @@
use crate::cli::{
fixtures::*,
utils::{regex_pred, FAILURE_LINE},
};
use assert_cmd::Command;
use crate::cli::{fixtures::*, utils::regex_pred};
use assert_fs::prelude::*;
use predicates::prelude::*;
use rstest::*;
const FILE_CONTENTS: &str = r#"
@ -14,15 +11,15 @@ that is a file's contents
#[rstest]
#[test_log::test]
fn should_output_metadata_for_file(mut action_cmd: CtxCommand<Command>) {
fn should_output_metadata_for_file(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("file");
file.write_str(FILE_CONTENTS).unwrap();
// distant action metadata {path}
action_cmd
.args(["metadata", file.to_str().unwrap()])
// distant fs metadata {path}
ctx.new_assert_cmd(["fs", "metadata"])
.arg(file.to_str().unwrap())
.assert()
.success()
.stdout(regex_pred(concat!(
@ -38,15 +35,15 @@ fn should_output_metadata_for_file(mut action_cmd: CtxCommand<Command>) {
#[rstest]
#[test_log::test]
fn should_output_metadata_for_directory(mut action_cmd: CtxCommand<Command>) {
fn should_output_metadata_for_directory(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let dir = temp.child("dir");
dir.create_dir_all().unwrap();
// distant action metadata {path}
action_cmd
.args(["metadata", dir.to_str().unwrap()])
// distant fs metadata {path}
ctx.new_assert_cmd(["fs", "metadata"])
.arg(dir.to_str().unwrap())
.assert()
.success()
.stdout(regex_pred(concat!(
@ -64,7 +61,7 @@ fn should_output_metadata_for_directory(mut action_cmd: CtxCommand<Command>) {
#[rstest]
#[test_log::test]
#[cfg_attr(windows, ignore)]
fn should_support_including_a_canonicalized_path(mut action_cmd: CtxCommand<Command>) {
fn should_support_including_a_canonicalized_path(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("file");
@ -73,9 +70,9 @@ fn should_support_including_a_canonicalized_path(mut action_cmd: CtxCommand<Comm
let link = temp.child("link");
link.symlink_to_file(file.path()).unwrap();
// distant action metadata --canonicalize {path}
action_cmd
.args(["metadata", "--canonicalize", link.to_str().unwrap()])
// distant fs metadata --canonicalize {path}
ctx.new_assert_cmd(["fs", "metadata"])
.args(["--canonicalize", link.to_str().unwrap()])
.assert()
.success()
.stdout(regex_pred(&format!(
@ -95,7 +92,7 @@ fn should_support_including_a_canonicalized_path(mut action_cmd: CtxCommand<Comm
#[rstest]
#[test_log::test]
fn should_support_resolving_file_type_of_symlink(mut action_cmd: CtxCommand<Command>) {
fn should_support_resolving_file_type_of_symlink(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("file");
@ -104,9 +101,9 @@ fn should_support_resolving_file_type_of_symlink(mut action_cmd: CtxCommand<Comm
let link = temp.child("link");
link.symlink_to_file(file.path()).unwrap();
// distant action metadata --canonicalize {path}
action_cmd
.args(["metadata", "--resolve-file-type", link.to_str().unwrap()])
// distant fs metadata --canonicalize {path}
ctx.new_assert_cmd(["fs", "metadata"])
.args(["--resolve-file-type", link.to_str().unwrap()])
.assert()
.success()
.stdout(regex_pred(concat!(
@ -122,17 +119,17 @@ fn should_support_resolving_file_type_of_symlink(mut action_cmd: CtxCommand<Comm
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
fn yield_an_error_when_fails(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
// Don't create file
let file = temp.child("file");
// distant action metadata {path}
action_cmd
.args(["metadata", file.to_str().unwrap()])
// distant fs metadata {path}
ctx.new_assert_cmd(["fs", "metadata"])
.arg(file.to_str().unwrap())
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
.stderr(predicates::str::is_empty().not());
}

@ -1,9 +1,6 @@
use crate::cli::{
fixtures::*,
utils::{regex_pred, FAILURE_LINE},
};
use assert_cmd::Command;
use crate::cli::{fixtures::*, utils::regex_pred};
use assert_fs::prelude::*;
use predicates::prelude::*;
use rstest::*;
use std::path::Path;
@ -89,7 +86,7 @@ fn regex_line(ty: &str, path: &str) -> String {
#[rstest]
#[test_log::test]
fn should_print_immediate_files_and_directories_by_default(mut action_cmd: CtxCommand<Command>) {
fn should_print_immediate_files_and_directories_by_default(ctx: DistantManagerCtx) {
let temp = make_directory();
let expected = regex_pred(&regex_stdout(vec![
@ -99,9 +96,9 @@ fn should_print_immediate_files_and_directories_by_default(mut action_cmd: CtxCo
("", "file2"),
]));
// distant action dir-read {path}
action_cmd
.args(["dir-read", temp.to_str().unwrap()])
// distant fs read {path}
ctx.new_assert_cmd(["fs", "read"])
.args([temp.to_str().unwrap()])
.assert()
.success()
.stdout(expected)
@ -112,7 +109,7 @@ fn should_print_immediate_files_and_directories_by_default(mut action_cmd: CtxCo
#[rstest]
#[test_log::test]
#[cfg_attr(windows, ignore)]
fn should_use_absolute_paths_if_specified(mut action_cmd: CtxCommand<Command>) {
fn should_use_absolute_paths_if_specified(ctx: DistantManagerCtx) {
let temp = make_directory();
// NOTE: Our root path is always canonicalized, so the absolute path
@ -126,9 +123,9 @@ fn should_use_absolute_paths_if_specified(mut action_cmd: CtxCommand<Command>) {
("", root_path.join("file2").to_str().unwrap()),
]));
// distant action dir-read --absolute {path}
action_cmd
.args(["dir-read", "--absolute", temp.to_str().unwrap()])
// distant fs read --absolute {path}
ctx.new_assert_cmd(["fs", "read"])
.args(["--absolute", temp.to_str().unwrap()])
.assert()
.success()
.stdout(expected)
@ -139,7 +136,7 @@ fn should_use_absolute_paths_if_specified(mut action_cmd: CtxCommand<Command>) {
#[rstest]
#[test_log::test]
#[cfg_attr(windows, ignore)]
fn should_print_all_files_and_directories_if_depth_is_0(mut action_cmd: CtxCommand<Command>) {
fn should_print_all_files_and_directories_if_depth_is_0(ctx: DistantManagerCtx) {
let temp = make_directory();
let expected = regex_pred(&regex_stdout(vec![
@ -173,9 +170,9 @@ fn should_print_all_files_and_directories_if_depth_is_0(mut action_cmd: CtxComma
("", Path::new("file2").to_str().unwrap()),
]));
// distant action dir-read --depth 0 {path}
action_cmd
.args(["dir-read", "--depth", "0", temp.to_str().unwrap()])
// distant fs read --depth 0 {path}
ctx.new_assert_cmd(["fs", "read"])
.args(["--depth", "0", temp.to_str().unwrap()])
.assert()
.success()
.stdout(expected)
@ -186,7 +183,7 @@ fn should_print_all_files_and_directories_if_depth_is_0(mut action_cmd: CtxComma
#[rstest]
#[test_log::test]
#[cfg_attr(windows, ignore)]
fn should_include_root_directory_if_specified(mut action_cmd: CtxCommand<Command>) {
fn should_include_root_directory_if_specified(ctx: DistantManagerCtx) {
let temp = make_directory();
// NOTE: Our root path is always canonicalized, so yielded entry
@ -201,9 +198,9 @@ fn should_include_root_directory_if_specified(mut action_cmd: CtxCommand<Command
("", "file2"),
]));
// distant action dir-read --include-root {path}
action_cmd
.args(["dir-read", "--include-root", temp.to_str().unwrap()])
// distant fs read --include-root {path}
ctx.new_assert_cmd(["fs", "read"])
.args(["--include-root", temp.to_str().unwrap()])
.assert()
.success()
.stdout(expected)
@ -212,15 +209,15 @@ fn should_include_root_directory_if_specified(mut action_cmd: CtxCommand<Command
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
fn yield_an_error_when_fails(ctx: DistantManagerCtx) {
let temp = make_directory();
let dir = temp.child("missing-dir");
// distant action dir-read {path}
action_cmd
.args(["dir-read", dir.to_str().unwrap()])
// distant fs read {path}
ctx.new_assert_cmd(["fs", "read"])
.args([dir.to_str().unwrap()])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
.stderr(predicates::str::is_empty().not());
}

@ -0,0 +1,42 @@
use crate::cli::fixtures::*;
use assert_fs::prelude::*;
use indoc::indoc;
use predicates::prelude::*;
use rstest::*;
const FILE_CONTENTS: &str = indoc! {r#"
some text
on multiple lines
that is a file's contents
"#};
#[rstest]
#[test_log::test]
fn should_print_out_file_contents(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
file.write_str(FILE_CONTENTS).unwrap();
// distant fs read {path}
ctx.new_assert_cmd(["fs", "read"])
.args([file.to_str().unwrap()])
.assert()
.success()
.stdout(FILE_CONTENTS)
.stderr("");
}
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-file");
// distant fs read {path}
ctx.new_assert_cmd(["fs", "read"])
.args([file.to_str().unwrap()])
.assert()
.code(1)
.stdout("")
.stderr(predicates::str::is_empty().not());
}

@ -1,19 +1,18 @@
use crate::cli::{fixtures::*, utils::FAILURE_LINE};
use assert_cmd::Command;
use crate::cli::fixtures::*;
use assert_fs::prelude::*;
use predicates::prelude::*;
use rstest::*;
#[rstest]
#[test_log::test]
fn should_support_removing_file(mut action_cmd: CtxCommand<Command>) {
fn should_support_removing_file(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("file");
file.touch().unwrap();
// distant action remove {path}
action_cmd
.args(["remove", file.to_str().unwrap()])
ctx.new_assert_cmd(["fs", "remove"])
.args([file.to_str().unwrap()])
.assert()
.success()
.stdout("")
@ -24,7 +23,7 @@ fn should_support_removing_file(mut action_cmd: CtxCommand<Command>) {
#[rstest]
#[test_log::test]
fn should_support_removing_empty_directory(mut action_cmd: CtxCommand<Command>) {
fn should_support_removing_empty_directory(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
// Make an empty directory
@ -32,8 +31,8 @@ fn should_support_removing_empty_directory(mut action_cmd: CtxCommand<Command>)
dir.create_dir_all().unwrap();
// distant action remove {path}
action_cmd
.args(["remove", dir.to_str().unwrap()])
ctx.new_assert_cmd(["fs", "remove"])
.args([dir.to_str().unwrap()])
.assert()
.success()
.stdout("")
@ -44,9 +43,7 @@ fn should_support_removing_empty_directory(mut action_cmd: CtxCommand<Command>)
#[rstest]
#[test_log::test]
fn should_support_removing_nonempty_directory_if_force_specified(
mut action_cmd: CtxCommand<Command>,
) {
fn should_support_removing_nonempty_directory_if_force_specified(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
// Make a non-empty directory
@ -55,8 +52,8 @@ fn should_support_removing_nonempty_directory_if_force_specified(
dir.child("file").touch().unwrap();
// distant action remove --force {path}
action_cmd
.args(["remove", "--force", dir.to_str().unwrap()])
ctx.new_assert_cmd(["fs", "remove"])
.args(["--force", dir.to_str().unwrap()])
.assert()
.success()
.stdout("")
@ -67,7 +64,7 @@ fn should_support_removing_nonempty_directory_if_force_specified(
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
fn yield_an_error_when_fails(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
// Make a non-empty directory
@ -76,12 +73,12 @@ fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
dir.child("file").touch().unwrap();
// distant action remove {path}
action_cmd
.args(["remove", dir.to_str().unwrap()])
ctx.new_assert_cmd(["fs", "remove"])
.args([dir.to_str().unwrap()])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
.stderr(predicates::str::is_empty().not());
dir.assert(predicate::path::exists());
dir.assert(predicate::path::is_dir());

@ -1,5 +1,4 @@
use crate::cli::{fixtures::*, utils::FAILURE_LINE};
use assert_cmd::Command;
use crate::cli::fixtures::*;
use assert_fs::prelude::*;
use predicates::prelude::*;
use rstest::*;
@ -12,7 +11,7 @@ that is a file's contents
#[rstest]
#[test_log::test]
fn should_support_renaming_file(mut action_cmd: CtxCommand<Command>) {
fn should_support_renaming_file(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let src = temp.child("file");
@ -21,8 +20,8 @@ fn should_support_renaming_file(mut action_cmd: CtxCommand<Command>) {
let dst = temp.child("file2");
// distant action rename {src} {dst}
action_cmd
.args(["rename", src.to_str().unwrap(), dst.to_str().unwrap()])
ctx.new_assert_cmd(["fs", "rename"])
.args([src.to_str().unwrap(), dst.to_str().unwrap()])
.assert()
.success()
.stdout("")
@ -34,7 +33,7 @@ fn should_support_renaming_file(mut action_cmd: CtxCommand<Command>) {
#[rstest]
#[test_log::test]
fn should_support_renaming_nonempty_directory(mut action_cmd: CtxCommand<Command>) {
fn should_support_renaming_nonempty_directory(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
// Make a non-empty directory
@ -47,8 +46,8 @@ fn should_support_renaming_nonempty_directory(mut action_cmd: CtxCommand<Command
let dst_file = dst.child("file");
// distant action rename {src} {dst}
action_cmd
.args(["rename", src.to_str().unwrap(), dst.to_str().unwrap()])
ctx.new_assert_cmd(["fs", "rename"])
.args([src.to_str().unwrap(), dst.to_str().unwrap()])
.assert()
.success()
.stdout("")
@ -63,19 +62,19 @@ fn should_support_renaming_nonempty_directory(mut action_cmd: CtxCommand<Command
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_cmd: CtxCommand<Command>) {
fn yield_an_error_when_fails(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let src = temp.child("dir");
let dst = temp.child("dir2");
// distant action rename {src} {dst}
action_cmd
.args(["rename", src.to_str().unwrap(), dst.to_str().unwrap()])
ctx.new_assert_cmd(["fs", "rename"])
.args([src.to_str().unwrap(), dst.to_str().unwrap()])
.assert()
.code(1)
.stdout("")
.stderr(FAILURE_LINE.clone());
.stderr(predicates::str::is_empty().not());
src.assert(predicate::path::missing());
dst.assert(predicate::path::missing());

@ -1,5 +1,4 @@
use crate::cli::fixtures::*;
use assert_cmd::Command;
use assert_fs::prelude::*;
use indoc::indoc;
use predicates::Predicate;
@ -18,7 +17,7 @@ const SEARCH_RESULTS_REGEX: &str = indoc! {r"
#[rstest]
#[test_log::test]
fn should_search_filesystem_using_query(mut action_cmd: CtxCommand<Command>) {
fn should_search_filesystem_using_query(ctx: DistantManagerCtx) {
let root = assert_fs::TempDir::new().unwrap();
root.child("file1.txt").write_str("some file text").unwrap();
root.child("file2.txt")
@ -46,8 +45,7 @@ fn should_search_filesystem_using_query(mut action_cmd: CtxCommand<Command>) {
});
// distant action search
action_cmd
.arg("search")
ctx.new_assert_cmd(["fs", "search"])
.arg("te[a-z]*\\b")
.arg(root.path())
.assert()

@ -1,7 +1,7 @@
use crate::cli::{fixtures::*, utils::ThreadedReader};
use assert_fs::prelude::*;
use rstest::*;
use std::{process::Command, thread, time::Duration};
use std::{thread, time::Duration};
fn wait_a_bit() {
wait_millis(250);
@ -17,14 +17,15 @@ fn wait_millis(millis: u64) {
#[rstest]
#[test_log::test]
fn should_support_watching_a_single_file(mut action_std_cmd: CtxCommand<Command>) {
fn should_support_watching_a_single_file(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("file");
file.touch().unwrap();
// distant action watch {path}
let mut child = action_std_cmd
.args(["watch", file.to_str().unwrap()])
// distant fs watch {path}
let mut child = ctx
.new_std_cmd(["fs", "watch"])
.arg(file.to_str().unwrap())
.spawn()
.expect("Failed to execute");
@ -68,7 +69,7 @@ fn should_support_watching_a_single_file(mut action_std_cmd: CtxCommand<Command>
#[rstest]
#[test_log::test]
fn should_support_watching_a_directory_recursively(mut action_std_cmd: CtxCommand<Command>) {
fn should_support_watching_a_directory_recursively(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let dir = temp.child("dir");
@ -77,9 +78,10 @@ fn should_support_watching_a_directory_recursively(mut action_std_cmd: CtxComman
let file = dir.child("file");
file.touch().unwrap();
// distant action watch {path}
let mut child = action_std_cmd
.args(["watch", "--recursive", temp.to_str().unwrap()])
// distant fs watch {path}
let mut child = ctx
.new_std_cmd(["fs", "watch"])
.args(["--recursive", temp.to_str().unwrap()])
.spawn()
.expect("Failed to execute");
@ -123,13 +125,14 @@ fn should_support_watching_a_directory_recursively(mut action_std_cmd: CtxComman
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(mut action_std_cmd: CtxCommand<Command>) {
fn yield_an_error_when_fails(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let invalid_path = temp.to_path_buf().join("missing");
// distant action watch {path}
let child = action_std_cmd
.args(["watch", invalid_path.to_str().unwrap()])
// distant fs watch {path}
let child = ctx
.new_std_cmd(["fs", "watch"])
.arg(invalid_path.to_str().unwrap())
.spawn()
.expect("Failed to execute");

@ -0,0 +1,125 @@
use crate::cli::fixtures::*;
use assert_fs::prelude::*;
use indoc::indoc;
use predicates::prelude::*;
use rstest::*;
const FILE_CONTENTS: &str = indoc! {r#"
some text
on multiple lines
that is a file's contents
"#};
const APPENDED_FILE_CONTENTS: &str = indoc! {r#"
even more
file contents
"#};
#[rstest]
#[test_log::test]
fn should_support_writing_stdin_to_file(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
// distant action file-write {path} -- {contents}
ctx.new_assert_cmd(["fs", "write"])
.args([file.to_str().unwrap()])
.write_stdin(FILE_CONTENTS)
.assert()
.success()
.stdout("")
.stderr("");
// NOTE: We wait a little bit to give the OS time to fully write to file
std::thread::sleep(std::time::Duration::from_millis(100));
// Because we're talking to a local server, we can verify locally
file.assert(FILE_CONTENTS);
}
#[rstest]
#[test_log::test]
fn should_support_appending_stdin_to_file(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
file.write_str(FILE_CONTENTS).unwrap();
// distant action file-write {path} -- {contents}
ctx.new_assert_cmd(["fs", "write"])
.args(["--append", file.to_str().unwrap()])
.write_stdin(APPENDED_FILE_CONTENTS)
.assert()
.success()
.stdout("")
.stderr("");
// NOTE: We wait a little bit to give the OS time to fully write to file
std::thread::sleep(std::time::Duration::from_millis(100));
// Because we're talking to a local server, we can verify locally
file.assert(format!("{}{}", FILE_CONTENTS, APPENDED_FILE_CONTENTS));
}
#[rstest]
#[test_log::test]
fn should_support_writing_argument_to_file(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
// distant action file-write {path} -- {contents}
ctx.new_assert_cmd(["fs", "write"])
.args([file.to_str().unwrap(), "--"])
.arg(FILE_CONTENTS)
.assert()
.success()
.stdout("")
.stderr("");
// NOTE: We wait a little bit to give the OS time to fully write to file
std::thread::sleep(std::time::Duration::from_millis(100));
// Because we're talking to a local server, we can verify locally
file.assert(FILE_CONTENTS);
}
#[rstest]
#[test_log::test]
fn should_support_appending_argument_to_file(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("test-file");
file.write_str(FILE_CONTENTS).unwrap();
// distant action file-write {path} -- {contents}
ctx.new_assert_cmd(["fs", "write"])
.args(["--append", file.to_str().unwrap(), "--"])
.arg(APPENDED_FILE_CONTENTS)
.assert()
.success()
.stdout("")
.stderr("");
// NOTE: We wait a little bit to give the OS time to fully write to file
std::thread::sleep(std::time::Duration::from_millis(100));
// Because we're talking to a local server, we can verify locally
file.assert(format!("{}{}", FILE_CONTENTS, APPENDED_FILE_CONTENTS));
}
#[rstest]
#[test_log::test]
fn yield_an_error_when_fails(ctx: DistantManagerCtx) {
let temp = assert_fs::TempDir::new().unwrap();
let file = temp.child("missing-dir").child("missing-file");
// distant action file-write {path} -- {contents}
ctx.new_assert_cmd(["fs", "write"])
.args([file.to_str().unwrap(), "--"])
.arg(FILE_CONTENTS)
.assert()
.code(1)
.stdout("")
.stderr(predicates::str::is_empty().not());
// Because we're talking to a local server, we can verify locally
file.assert(predicates::path::missing());
}

@ -0,0 +1,14 @@
mod capabilities;
mod fs_copy;
mod fs_exists;
mod fs_make_dir;
mod fs_metadata;
mod fs_read_directory;
mod fs_read_file;
mod fs_remove;
mod fs_rename;
mod fs_search;
mod fs_watch;
mod fs_write;
mod spawn;
mod system_info;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save