Compare commits

...

126 Commits

Author SHA1 Message Date
Chip Senkbeil 3fe1fba339
Correct wget usage for installation 10 months ago
Chip Senkbeil 48f7eb74ec
Update readme example to use --daemon instead of & for background manager 11 months ago
Chip Senkbeil 96abcefdc5
Add extra debug logging when starting a manager 11 months ago
Chip Senkbeil 22f3c2dd76
Fix bugs in set permissions for CLI and distant-local 12 months ago
Chip Senkbeil 0320e7fe24
Bump to v0.20.0 12 months ago
Chip Senkbeil 9e48300e83
Fix zombies being leftover from distant launch manager://localhost when servers self-terminate 12 months ago
Chip Senkbeil e304e6a689
Fix shutting down killed connections from a manager 12 months ago
Chip Senkbeil 8972013716
Refactor capabilities to version for manager, integrate version checking for client/server/manager, and define protocol version (#219) 12 months ago
Chip Senkbeil 0efb5aee4c
Add --shell support to CLI (#218) 12 months ago
Chip Senkbeil 56b3b8f4f1
Fix CLI commands with --format json not outputting errors in JSON 12 months ago
Chip Senkbeil eb23b4e1ad
Fix win service 12 months ago
Chip Senkbeil dc7e9b5309
Bump to alpha.12 12 months ago
Chip Senkbeil e0b8769087
Fix return code of --help and --version on cli 12 months ago
Chip Senkbeil 9bc50886bb
Update latest tagging with custom code that uses a personal access token to trigger workflows 12 months ago
Chip Senkbeil bd3b068651
Add workflow to tag latest 12 months ago
Chip Senkbeil c61393750a
Bump minimum version of Rust to 1.70.0 12 months ago
Chip Senkbeil 2abaf0b814
Use sparse checkout during publish 12 months ago
Chip Senkbeil 0e03fc3011
Reintroduce checkout to publish step 12 months ago
Chip Senkbeil cb8ea0507f
Bump to 0.20.0-alpha.11 and restore ci tests 12 months ago
Chip Senkbeil 8a34fec1f7
Update README 12 months ago
Chip Senkbeil 6feeb2d012
Tweaking release config until it works 12 months ago
Chip Senkbeil fefbe19a3c
Switch to stripping using cargo and supporting a latest release tag 12 months ago
Chip Senkbeil be7a15caa0
Refactor generation commands to use --output for files and printing to stdout by default 12 months ago
Chip Senkbeil 84ea28402d
Add support for distant spawn -c 'cmd str' 1 year ago
Chip Senkbeil b74cba28df
Bump to v0.20.0-alpha.10 1 year ago
Chip Senkbeil f4180f6245
Change search default to not use standard filters, and provide options to set filters manually 1 year ago
Chip Senkbeil c250acdfb4
Fix search task exiting on failing to start a search with distant-local 1 year ago
Chip Senkbeil 1836f20a2a
Bump to 0.20.0-alpha.9 1 year ago
Chip Senkbeil 9096a7d81b
Fix destination username & password parsing to accept full character set 1 year ago
Chip Senkbeil 7c08495904
Switch to unbounded channels for `Reply` (#207) 1 year ago
Chip Senkbeil da75801639
Fix server hangup (#206) 1 year ago
Nagy Botond 8009cc9361
fix(parser): allow `-` (hyphen) to appear in usernames (#203) 1 year ago
Chip Senkbeil 4fb9045152
Support sequential batch processing (#201) 1 year ago
Chip Senkbeil efad345a0d
Add header support to request & response (#200) 1 year ago
Chip Senkbeil 6ba3ded188
Fix not serializing when only renamed set, reset field name to timestamp from ts 1 year ago
Chip Senkbeil c4c46f80a9
Remove Formatter code by inlining logic for search and watch 1 year ago
Chip Senkbeil 791a41c29e
Refactor Change to use single path & support renamed detail field (#196) 1 year ago
Chip Senkbeil a36263e7e1
Fix makefile 1 year ago
Chip Senkbeil 6f98e44723
Bump to alpha.8 1 year ago
Chip Senkbeil 72cc998595
Update change to include timestamp and details fields 1 year ago
Chip Senkbeil 4eaae55d53
Refactor to use debouncer for file watching and support configuration (#195) 1 year ago
Chip Senkbeil 9da7679081
Support alternative file watching implementation for MacOS 1 year ago
Chip Senkbeil 009996b554
Remove crossbeam-channel feature from notify dependency (https://github.com/notify-rs/notify/issues/380) 1 year ago
Chip Senkbeil b163094d49
Update to test READMEs 1 year ago
Chip Senkbeil 3225471e28
Add some basic readmes 1 year ago
Chip Senkbeil 9f345eb31b
Update changelog for v0.20.0-alpha.7 release 1 year ago
Chip Senkbeil e99329d9a9
Refactor local crate & update/clean dependencies (#191) 1 year ago
Chip Senkbeil 40c265e35b
Update changelog to reflect new manager service install feature 1 year ago
Chip Senkbeil af903013f6
Support installing manager service with custom arguments 1 year ago
Chip Senkbeil 76dc7cf1fa
Refactor into protocol crate & change capabilities -> version (#189) 1 year ago
Chip Senkbeil 95c0d0c0d1
Fix bad test reference 1 year ago
Chip Senkbeil 528dea0917
Fix windows old auth reference 1 year ago
Chip Senkbeil 8cf7f11269
Refactor authentication into distant-auth 1 year ago
Chip Senkbeil 2042684c97
Update changelog with --lsp change 1 year ago
Chip Senkbeil 31aff1e282
Refactor --lsp [SCHEME] to just take the scheme and not the :// 1 year ago
Chip Senkbeil ea0424e2f4
Feat: set permissions support (#184) 1 year ago
Chip Senkbeil 137b4dc289
Bump to 0.20.0-alpha.7 as next version 1 year ago
Chip Senkbeil 3208fdcaa2
Update release.yml to update for a couple of installs 1 year ago
Chip Senkbeil 8768106c67
Bump changelog again 1 year ago
Chip Senkbeil b3e0f651d5
Bump changelog 1 year ago
Chip Senkbeil f2bd2f15f5
Add support for --lsp [scheme] 1 year ago
Chip Senkbeil 398aff2f12
Rename distant-core::data to distant-core::protocol and remove distant-core::data::{Msg, Request, Response} from being re-exported at crate root 1 year ago
Chip Senkbeil 7fceb63aa3
Update Cargo.lock dependencies 1 year ago
Chip Senkbeil 5740c2cc4d
Bump to 0.20.0-alpha.6 1 year ago
Chip Senkbeil b8fecaacc0
Apply nightly formatting to reorder imports and use module-level import granularity 1 year ago
Chip Senkbeil 5b19870b98
Add upward searching as an option 1 year ago
Chip Senkbeil bbf74f1e71
Fix launch manager://localhost not parsing distant.args properly 1 year ago
Chip Senkbeil c989a851ce
Add fix to changelog 1 year ago
Chip Senkbeil 09e8442892
Fix launch of manager://localhost 1 year ago
Chip Senkbeil 4b983b0229
Refactor into unified settings (#172) 1 year ago
Chip Senkbeil 093b4d2ec4
Bump referenced minimum Rust version in READMEs 1 year ago
Chip Senkbeil cfee78c2da
Bump more dependencies and fix tests for capabilities table and search results that use tabled 1 year ago
Chip Senkbeil d44df53e83
Update changelog 1 year ago
Chip Senkbeil 90305607e9
Bump dependency versions and update to v0.20.0-alpha.4 1 year ago
Chip Senkbeil 2b6bf3c0a8
Fix formatting 1 year ago
Chip Senkbeil 656a8007d6
Merge branch 'master' of github.com:chipsenkbeil/distant 1 year ago
Chip Senkbeil 8853d1072a
Bump versions 1 year ago
Chip Senkbeil 2ab41c4976
Fix timeout default 1 year ago
Chip Senkbeil 5940b21339
Fix formatting issues 1 year ago
Chip Senkbeil 78b0ee628e
Update changelog 1 year ago
Chip Senkbeil 40bd20e4ac
Add --current-dir as option for distant shell and lsp commands 1 year ago
Chip Senkbeil 55036478a0
Add default configuration and ability to generate default configuration 1 year ago
Chip Senkbeil 27dc5775f9
Update ssh launch to use pty (#157) 2 years ago
Chip Senkbeil 9b2f0de0c5
Reenable cli tests on windows (#156)
Also fixes a failing Windows test that has different behavior than Linux/Unix
2 years ago
Chip Senkbeil a023b8f22d
Add reconnect feature to table list 2 years ago
Chip Senkbeil ee50eaf9b3
Add heartbeat support (#153)
* Update to support zero-size frame items

* Add heartbeat functionality with client reconnecting logic

* Fix connection reauthentication failures preventing future reauthentication

* More logging

* Remove persist

* Update connection logic to have server take on client id rather than having client take on server id during reconnect

* Bump minimum rust version to 1.64.0

* Bump to v0.20.0-alpha.3 and fix clippy warnings

* Update cargo.lock
2 years ago
Chip Senkbeil ee595551ae
Release v0.20.0-alpha.2 2 years ago
Chip Senkbeil a544587bab
Add ConnectionState & ConnectionWatcher; update server to drop connection on read error 2 years ago
Chip Senkbeil 1c393ef723
Bump dependency versions and update our version to 0.20.0-alpha.2 2 years ago
Chip Senkbeil a41ef5996e
Bump version to 0.20.0-alpha.1 and add more targets for release 2 years ago
Chip Senkbeil 10141f2090
Another attempt at fixing release build for new linux 2 years ago
Chip Senkbeil e13ec37603
Fix broken release yml 2 years ago
Chip Senkbeil 8f3b204474
Update to attempt more linux release builds 2 years ago
Chip Senkbeil 3a4b98cdde
Ignore markdown files in ci 2 years ago
Chip Senkbeil bc3f6eef04
Update changelog for new release 2 years ago
Chip Senkbeil 65fdbe8650
Reset release.yml trigger 2 years ago
Chip Senkbeil 9ef32fe811
Update release to only apply on primary branch 2 years ago
Chip Senkbeil 4798b67dfe
Rewrite to support custom authentication, handshakes for encryption/compression, and reconnecting (#146) 2 years ago
Chip Senkbeil 7d1b3ba6f0
Fix stack overflow parsing BindAddress and improve resolution of BindAddress 2 years ago
Chip Senkbeil 4cf869ecb7
Add untyped request response (#137)
Add untyped request/response types to distant-net and trace for sending server message
2 years ago
Chip Senkbeil a8107aed3a
Update SearchQueryCondition to support logical or and contains types; also update non-regex types to escape regex 2 years ago
Chip Senkbeil 193bb6d237
Integrate ignore crate to provide parallel search, binary detection, and support ignore files (#136) 2 years ago
Chip Senkbeil dac318eb1e
Release v0.19.0 2 years ago
Chip Senkbeil cae6c5e244
Refactor SearchQuery to support multiple paths at once 2 years ago
Chip Senkbeil 01610a3ac7
Add search support (#131) 2 years ago
Chip Senkbeil 5130ee3b5f
Add missing capabilities() method to DistantChannelExt 2 years ago
Chip Senkbeil 53fd8d0c4f
Add capabilities support to server and manager 2 years ago
Chip Senkbeil c19df9f538
Fix ssh system info (#127) 2 years ago
Chip Senkbeil 1fa3a8acea
Refactor ssh options into a generic options map and rename extra/Extra into options/Map 2 years ago
Chip Senkbeil b9c00153a0
Refactor distant-ssh2 is_windows with fix for exec command 2 years ago
Chip Senkbeil 22b2a351de
Ignore family of system-info tests via ssh when on CI 2 years ago
Chip Senkbeil 591cd6ff41
Fix ssh launch login shell using sh on windows; update shell cli to use default shell from system info before defaulting 2 years ago
Chip Senkbeil 6d0bbd56fc
Add back missing newlines for system info printing 2 years ago
Chip Senkbeil 56a030e6dd
Update SystemInfo to support username and shell 2 years ago
Chip Senkbeil 486e5399ff
Leverage typed-path to determine if path is windows 2 years ago
Chip Senkbeil 4011671a77
Add SshFamily::as_static_str for logging purposes 2 years ago
Chip Senkbeil 04b20d1348
Fix distant client shell choosing appropriate default; update CHANGELOG 2 years ago
Chip Senkbeil 6c4318baa0
Update PtyProcess to only clone weak references to master pty for resizing; remove export of MasterPtyProcess 2 years ago
Chip Senkbeil ec95f573b9
Add logging for api local simple & pty processes 2 years ago
Chip Senkbeil 30548cdbfb
Update referenced versions in READMEs and add note about doing so to PUBLISH.md 2 years ago
Chip Senkbeil fd325e4523
Release v0.18.0 2 years ago
Chip Senkbeil 2cdfb89751
Add test to verify larger files can be written, read, and appended 2 years ago
Chip Senkbeil 74a37209eb
Refactor --shutdown-after into --shutdown (#49) 2 years ago
Chip Senkbeil 8e8eb8c574
Release v0.17.6 2 years ago
Chip Senkbeil 1ff3ef2db1
Fix shutdown-after such that it now functions 2 years ago
Chip Senkbeil a0c7c492bd
Add publish guide so I won't forget 2 years ago

@ -0,0 +1,5 @@
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"
[target.armv7-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabihf-gcc"

@ -1,4 +1,6 @@
[profile.ci]
fail-fast = false
retries = 2
retries = 4
slow-timeout = { period = "60s", terminate-after = 3 }
status-level = "fail"
final-status-level = "fail"

@ -2,9 +2,15 @@ name: CI
on:
push:
paths-ignore:
- '**.md'
- 'Makefile.toml'
branches:
- master
pull_request:
paths-ignore:
- '**.md'
- 'Makefile.toml'
branches:
- master
@ -26,22 +32,20 @@ jobs:
git config --system core.autocrlf false
git config --system core.eol lf
if: matrix.os == 'windows-latest'
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust (clippy)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
components: clippy
- uses: Swatinem/rust-cache@v1
- uses: Swatinem/rust-cache@v2
with:
key: "ci-clippy-${{ matrix.os }}"
- name: Check Cargo availability
run: cargo --version
- name: distant-core (all features)
run: cargo clippy -p distant-core --all-targets --verbose --all-features
- name: distant-ssh2 (all features)
run: cargo clippy -p distant-ssh2 --all-targets --verbose --all-features
- name: distant (all features)
run: cargo clippy --all-targets --verbose --all-features
- name: Run clippy (all features)
run: cargo clippy --workspace --all-targets --verbose --all-features
rustfmt:
name: "Verify code formatting (${{ matrix.os }})"
runs-on: ${{ matrix.os }}
@ -57,20 +61,25 @@ jobs:
git config --system core.autocrlf false
git config --system core.eol lf
if: matrix.os == 'windows-latest'
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust (rustfmt)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
components: rustfmt
- uses: Swatinem/rust-cache@v1
- uses: Swatinem/rust-cache@v2
with:
key: "ci-rustfmt-${{ matrix.os }}"
- name: Check Cargo availability
run: cargo --version
- run: cargo fmt --all -- --check
tests:
name: "Test Rust ${{ matrix.rust }} on ${{ matrix.os }}"
runs-on: ${{ matrix.os }}
env:
RUSTFLAGS: --cfg ci
RUST_LOG: trace
strategy:
fail-fast: false
matrix:
@ -78,9 +87,9 @@ jobs:
- { rust: stable, os: windows-latest, target: x86_64-pc-windows-msvc }
- { rust: stable, os: macos-latest }
- { rust: stable, os: ubuntu-latest }
- { rust: 1.61.0, os: ubuntu-latest }
- { rust: 1.70.0, os: ubuntu-latest }
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust ${{ matrix.rust }}
uses: actions-rs/toolchain@v1
with:
@ -89,103 +98,72 @@ jobs:
target: ${{ matrix.target }}
- uses: taiki-e/install-action@v1
with:
tool: cargo-nextest
- uses: Swatinem/rust-cache@v1
tool: cargo-nextest@0.9.45
- uses: Swatinem/rust-cache@v2
with:
key: "ci-tests-${{ matrix.os }}-${{ matrix.rust }}-${{ matrix.target }}"
- name: Check Cargo availability
run: cargo --version
- name: Install OpenSSH on Windows
run: |
# From https://gist.github.com/inevity/a0d7b9f1c5ba5a813917b92736122797
Add-Type -AssemblyName System.IO.Compression.FileSystem
function Unzip
{
param([string]$zipfile, [string]$outpath)
- uses: nick-fields/retry@v2
name: Install OpenSSH on Windows
if: matrix.os == 'windows-latest'
with:
timeout_minutes: 10
max_attempts: 3
shell: pwsh
command: |
# From https://gist.github.com/inevity/a0d7b9f1c5ba5a813917b92736122797
Add-Type -AssemblyName System.IO.Compression.FileSystem
function Unzip
{
param([string]$zipfile, [string]$outpath)
[System.IO.Compression.ZipFile]::ExtractToDirectory($zipfile, $outpath)
}
[System.IO.Compression.ZipFile]::ExtractToDirectory($zipfile, $outpath)
}
$url = 'https://github.com/PowerShell/Win32-OpenSSH/releases/latest/'
$request = [System.Net.WebRequest]::Create($url)
$request.AllowAutoRedirect=$false
$response=$request.GetResponse()
$file = $([String]$response.GetResponseHeader("Location")).Replace('tag','download') + '/OpenSSH-Win64.zip'
$url = 'https://github.com/PowerShell/Win32-OpenSSH/releases/latest/'
$request = [System.Net.WebRequest]::Create($url)
$request.AllowAutoRedirect=$false
$response=$request.GetResponse()
$file = $([String]$response.GetResponseHeader("Location")).Replace('tag','download') + '/OpenSSH-Win64.zip'
$client = new-object system.Net.Webclient;
$client.DownloadFile($file ,"c:\\OpenSSH-Win64.zip")
$client = new-object system.Net.Webclient;
$client.DownloadFile($file ,"c:\\OpenSSH-Win64.zip")
Unzip "c:\\OpenSSH-Win64.zip" "C:\Program Files\"
mv "c:\\Program Files\OpenSSH-Win64" "C:\Program Files\OpenSSH\"
Unzip "c:\\OpenSSH-Win64.zip" "C:\Program Files\"
mv "c:\\Program Files\OpenSSH-Win64" "C:\Program Files\OpenSSH\"
powershell.exe -ExecutionPolicy Bypass -File "C:\Program Files\OpenSSH\install-sshd.ps1"
powershell.exe -ExecutionPolicy Bypass -File "C:\Program Files\OpenSSH\install-sshd.ps1"
New-NetFirewallRule -Name sshd -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22,49152-65535
New-NetFirewallRule -Name sshd -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22,49152-65535
net start sshd
net start sshd
Set-Service sshd -StartupType Automatic
Set-Service ssh-agent -StartupType Automatic
Set-Service sshd -StartupType Automatic
Set-Service ssh-agent -StartupType Automatic
cd "C:\Program Files\OpenSSH\"
Powershell.exe -ExecutionPolicy Bypass -Command '. .\FixHostFilePermissions.ps1 -Confirm:$false'
cd "C:\Program Files\OpenSSH\"
Powershell.exe -ExecutionPolicy Bypass -Command '. .\FixHostFilePermissions.ps1 -Confirm:$false'
$registryPath = "HKLM:\SOFTWARE\OpenSSH\"
$Name = "DefaultShell"
$value = "C:\windows\System32\WindowsPowerShell\v1.0\powershell.exe"
$registryPath = "HKLM:\SOFTWARE\OpenSSH\"
$Name = "DefaultShell"
$value = "C:\windows\System32\WindowsPowerShell\v1.0\powershell.exe"
IF(!(Test-Path $registryPath))
{
New-Item -Path $registryPath -Force
New-ItemProperty -Path $registryPath -Name $name -Value $value -PropertyType String -Force
} ELSE {
New-ItemProperty -Path $registryPath -Name $name -Value $value -PropertyType String -Force
}
shell: pwsh
IF(!(Test-Path $registryPath))
{
New-Item -Path $registryPath -Force
New-ItemProperty -Path $registryPath -Name $name -Value $value -PropertyType String -Force
} ELSE {
New-ItemProperty -Path $registryPath -Name $name -Value $value -PropertyType String -Force
}
- name: Extend Windows retry count to be more resilient
run: echo "NEXTEST_RETRIES=9" >> $GITHUB_ENV
shell: bash
if: matrix.os == 'windows-latest'
- name: Run net tests (default features)
run: cargo nextest run --profile ci --release --verbose -p distant-net
- name: Run core tests (default features)
run: cargo nextest run --profile ci --release --verbose -p distant-core
- name: Run core tests (all features)
run: cargo nextest run --profile ci --release --verbose --all-features -p distant-core
- name: Ensure /run/sshd exists on Unix
run: mkdir -p /run/sshd
if: matrix.os == 'ubuntu-latest'
- name: Run ssh2 client tests (default features)
run: cargo nextest run --profile ci --release --verbose -p distant-ssh2 ssh2::client
- name: Run ssh2 client tests (all features)
run: cargo nextest run --profile ci --release --verbose --all-features -p distant-ssh2 ssh2::client
- name: Run CLI tests
run: cargo nextest run --profile ci --release --verbose
- name: Run CLI tests (no default features)
run: cargo nextest run --profile ci --release --verbose --no-default-features
ssh-launch-tests:
name: "Test ssh launch using Rust ${{ matrix.rust }} on ${{ matrix.os }}"
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
include:
- { rust: stable, os: macos-latest }
- { rust: stable, os: ubuntu-latest }
- { rust: 1.61.0, os: ubuntu-latest }
steps:
- uses: actions/checkout@v2
- name: Install Rust ${{ matrix.rust }}
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ matrix.rust }}
- uses: taiki-e/install-action@v1
with:
tool: cargo-nextest
- uses: Swatinem/rust-cache@v1
- name: Check Cargo availability
run: cargo --version
- name: Install distant cli for use in launch tests
run: |
cargo install --path .
echo "DISTANT_PATH=$HOME/.cargo/bin/distant" >> $GITHUB_ENV
- name: Run ssh2 launch tests (default features)
run: cargo nextest run --profile ci --release --verbose -p distant-ssh2 ssh2::launched
- name: Run ssh2 launch tests (all features)
run: cargo nextest run --profile ci --release --verbose --all-features -p distant-ssh2 ssh2::launched
- name: Run all workspace tests (all features)
run: cargo nextest run --profile ci --release --all-features --workspace
- name: Run all doc tests (all features)
run: cargo test --release --all-features --workspace --doc

@ -0,0 +1,24 @@
name: 'Tag latest'
on:
push:
branches:
- master
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Tag latest and push
env:
GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
run: |
git config user.name "${GITHUB_ACTOR}"
git config user.email "${GITHUB_ACTOR}@users.noreply.github.com"
origin_url="$(git config --get remote.origin.url)"
origin_url="${origin_url/#https:\/\//https:\/\/$GITHUB_TOKEN@}" # add token to URL
git tag latest --force
git push "$origin_url" --tags --force

@ -0,0 +1,28 @@
name: 'Lock Threads'
on:
schedule:
- cron: '0 3 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
concurrency:
group: lock
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: dessant/lock-threads@v4
with:
issue-inactive-days: '30'
issue-comment: >
I'm going to lock this issue because it has been closed for _30 days_ ⏳.
This helps our maintainers find and focus on the active issues.
If you have found a problem that seems similar to this, please open a new
issue and complete the issue template so we can capture all the details
necessary to investigate further.
process-only: 'issues'

@ -5,248 +5,312 @@ on:
tags:
- v[0-9]+.[0-9]+.[0-9]+
- v[0-9]+.[0-9]+.[0-9]+-**
- latest
# Status of Targets:
#
# ✅ x86_64-apple-darwin
# ✅ aarch64-apple-darwin
#
# ✅ x86_64-pc-windows-msvc
# ✅ aarch64-pc-windows-msvc
#
# ✅ x86_64-unknown-linux-gnu
# ✅ aarch64-unknown-linux-gnu
# ❌ aarch64-linux-android (fails due to termios)
# ✅ armv7-unknown-linux-gnueabihf
#
# ✅ x86_64-unknown-linux-musl
# ✅ aarch64-unknown-linux-musl
#
# ✅ x86_64-unknown-freebsd
# ❓ aarch64-unknown-freebsd (works manually, but cannot cross-compile via CI)
#
# ❌ x86_64-unknown-netbsd (fails due to termios)
# ❌ aarch64-unknown-netbsd (???)
#
# ❌ x86_64-unknown-openbsd (fails due to rustc internal error at end)
# ❌ aarch64-unknown-openbsd (fails due to openssl-src)
#
jobs:
macos:
name: "Build release on MacOS"
name: "Build release on MacOS (${{ matrix.target }})"
runs-on: macos-11.0
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: macos
X86_ARCH: x86_64-apple-darwin
ARM_ARCH: aarch64-apple-darwin
X86_DIR: target/x86_64-apple-darwin/release
ARM_DIR: target/aarch64-apple-darwin/release
BUILD_BIN: distant
UNIVERSAL_REL_BIN: distant-macos
strategy:
matrix:
target:
- x86_64-apple-darwin
- aarch64-apple-darwin
steps:
- uses: actions/checkout@v2
- name: Install Rust (x86)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_ARCH }}
- name: Install Rust (ARM)
- uses: actions/checkout@v3
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.ARM_ARCH }}
- uses: Swatinem/rust-cache@v1
- name: Build binary (x86_64)
run: |
cargo build --release --all-features --target ${{ env.X86_ARCH }}
ls -l ./${{ env.X86_DIR }}
strip ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }}
- name: Build binary (aarch64)
target: ${{ matrix.target }}
override: true
- uses: Swatinem/rust-cache@v2
- name: Build binary (${{ matrix.target }})
run: |
cargo build --release --all-features --target ${{ env.ARM_ARCH }}
ls -l ./${{ env.ARM_DIR }}
strip ./${{ env.ARM_DIR }}/${{ env.BUILD_BIN }}
cargo build --release --all-features --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant ./distant-${{ matrix.target }}
chmod +x ./distant-${{ matrix.target }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}
if-no-files-found: error
retention-days: 5
macos_unify:
name: "Build universal binary on MacOS"
needs: [macos]
runs-on: macos-11.0
steps:
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
- name: Unify binaries
run: |
lipo -create -output ${{ env.UNIVERSAL_REL_BIN }} \
./${{ env.X86_DIR }}/${{ env.BUILD_BIN }} \
./${{ env.ARM_DIR }}/${{ env.BUILD_BIN }}
chmod +x ./${{ env.UNIVERSAL_REL_BIN }}
lipo -create -output distant-universal-apple-darwin \
./x86_64-apple-darwin/distant-x86_64-apple-darwin \
./aarch64-apple-darwin/distant-aarch64-apple-darwin
chmod +x ./distant-universal-apple-darwin
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.UNIVERSAL_REL_BIN }}
name: universal-apple-darwin
path: ./distant-universal-apple-darwin
if-no-files-found: error
retention-days: 5
windows:
name: "Build release on Windows"
name: "Build release on Windows (${{ matrix.target }})"
runs-on: windows-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: win64
X86_ARCH: x86_64-pc-windows-msvc
X86_DIR: target/x86_64-pc-windows-msvc/release
BUILD_BIN: distant.exe
X86_REL_BIN: distant-win64.exe
strategy:
matrix:
target:
- x86_64-pc-windows-msvc
- aarch64-pc-windows-msvc
steps:
- uses: actions/checkout@v2
- name: Install Rust (MSVC)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_ARCH }}
- uses: Swatinem/rust-cache@v1
- name: Build binary (x86_64)
target: ${{ matrix.target }}
override: true
- uses: Swatinem/rust-cache@v2
- name: Build binary (${{ matrix.target }})
run: |
cargo build --release --all-features --target ${{ env.X86_ARCH }}
ls -l ./${{ env.X86_DIR }}
strip ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_REL_BIN }}
chmod +x ./${{ env.X86_REL_BIN }}
cargo build --release --all-features --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant.exe ./distant-${{ matrix.target }}.exe
chmod +x ./distant-${{ matrix.target }}.exe
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}.exe
if-no-files-found: error
retention-days: 5
linux_gnu:
name: "Build release on Linux (gnu)"
linux:
name: "Build release on Linux (${{ matrix.target }})"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-gnu
X86_GNU_ARCH: x86_64-unknown-linux-gnu
X86_GNU_DIR: target/x86_64-unknown-linux-gnu/release
BUILD_BIN: distant
X86_GNU_REL_BIN: distant-linux64-gnu
strategy:
matrix:
include:
- target: x86_64-unknown-linux-gnu
build: --all-features
cargo: cargo
- target: aarch64-unknown-linux-gnu
build: --all-features
deps: gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
cargo: cargo
- target: armv7-unknown-linux-gnueabihf
build: --all-features
deps: gcc-arm-linux-gnueabihf binutils-arm-linux-gnueabihf
cargo: cargo
- target: x86_64-unknown-linux-musl
build: --no-default-features --features ssh2
deps: musl-tools
cargo: cargo
- target: aarch64-unknown-linux-musl
build: --no-default-features --features ssh2
deps: musl-tools gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
cargo: cross
prepare: |
curl -L "https://github.com/cross-rs/cross/releases/download/v0.2.5/cross-x86_64-unknown-linux-musl.tar.gz" |
tar xz -C $HOME/.cargo/bin
- target: x86_64-unknown-freebsd
build: --all-features
cargo: cross
prepare: |
curl -L "https://github.com/cross-rs/cross/releases/download/v0.2.5/cross-x86_64-unknown-linux-musl.tar.gz" |
tar xz -C $HOME/.cargo/bin
steps:
- uses: actions/checkout@v2
- name: Install Rust (GNU)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_GNU_ARCH }}
- uses: Swatinem/rust-cache@v1
- name: Build binary (GNU x86_64)
target: ${{ matrix.target }}
override: true
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
if: ${{ matrix.deps }}
run: |
cargo build --release --all-features --target ${{ env.X86_GNU_ARCH }}
ls -l ./${{ env.X86_GNU_DIR }}
strip ./${{ env.X86_GNU_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_GNU_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_GNU_REL_BIN }}
chmod +x ./${{ env.X86_GNU_REL_BIN }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_GNU_REL_BIN }}
# NOTE: For musl, we only support ssh2 and not libssh for the time being due to some
# build issue with libssh-rs-sys not finding the symbol ENGINE_cleanup in libcrypto
linux_musl:
name: "Build release on Linux (musl)"
runs-on: ubuntu-latest
container:
image: alpine:3.14
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-musl
X86_MUSL_ARCH: x86_64-unknown-linux-musl
X86_MUSL_DIR: target/x86_64-unknown-linux-musl/release
BUILD_BIN: distant
X86_MUSL_REL_BIN: distant-linux64-musl
steps:
- uses: actions/checkout@v2
- name: Install base dependencies
run: |
apk add --update --no-cache build-base libc6-compat curl perl
- name: Install Rust (MUSL)
run: |
curl https://sh.rustup.rs -sSf | sh -s -- -y --profile minimal
- uses: Swatinem/rust-cache@v1
- name: Build binary (MUSL x86_64)
sudo apt update
sudo apt install -y ${{ matrix.deps }}
- name: Preparing system
if: ${{ matrix.prepare }}
run: ${{ matrix.prepare }}
- name: Build binary (${{ matrix.target }})
run: |
source $HOME/.cargo/env
cargo build --release --no-default-features --features ssh2 --target ${{ env.X86_MUSL_ARCH }}
ls -l ./${{ env.X86_MUSL_DIR }}
strip ./${{ env.X86_MUSL_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_MUSL_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_MUSL_REL_BIN }}
chmod +x ./${{ env.X86_MUSL_REL_BIN }}
${{ matrix.cargo }} build --release ${{ matrix.build }} --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant ./distant-${{ matrix.target }}
chmod +x ./distant-${{ matrix.target }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_MUSL_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}
if-no-files-found: error
retention-days: 5
# bsd:
# name: "Build release on ${{ matrix.os.name }} (${{ matrix.os.target }})"
# runs-on: ${{ matrix.os.host }}
# strategy:
# matrix:
# os:
# - name: freebsd
# architecture: x86-64
# version: '13.2'
# host: macos-12
# target: x86_64-unknown-freebsd
# build: --all-features
# prepare: sudo pkg install -y openssl gmake lang/rust devel/llvm-devel
# - name: netbsd
# architecture: x86-64
# version: '9.3'
# host: macos-12
# target: x86_64-unknown-netbsd
# build: --all-features
# prepare: |
# PATH="/usr/pkg/sbin:/usr/pkg/bin:$PATH"
# PKG_PATH="https://cdn.NetBSD.org/pub/pkgsrc/packages"
# PKG_PATH="$PKG_PATH/NetBSD/x86_64/9.3/All/"
# export PATH PKG_PATH
# sudo -E pkg_add -I gmake rust
# cargo update --dry-run
# - name: openbsd
# architecture: x86-64
# version: '7.3'
# host: macos-12
# target: x86_64-unknown-openbsd
# build: --all-features
# prepare: |
# sudo pkg_add -I gmake rust llvm
# sed -i 's/lto = true/lto = false/' Cargo.toml
# steps:
# - uses: actions/checkout@v3
# - uses: Swatinem/rust-cache@v2
# - name: Build in VM
# uses: cross-platform-actions/action@v0.15.0
# env:
# CARGO_INCREMENTAL: 0
# with:
# environment_variables: CARGO_INCREMENTAL
# operating_system: ${{ matrix.os.name }}
# architecture: ${{ matrix.os.architecture }}
# version: ${{ matrix.os.version }}
# shell: bash
# run: |
# ${{ matrix.os.prepare }}
# cargo build --release ${{ matrix.os.build }} --target ${{ matrix.os.target }}
# mv ./target/${{ matrix.os.target }}/release/distant ./distant-${{ matrix.os.target }}
# chmod +x ./distant-${{ matrix.os.target }}
# - name: Upload
# uses: actions/upload-artifact@v2
# with:
# name: ${{ matrix.os.target }}
# path: ./distant-${{ matrix.os.target }}
# if-no-files-found: error
# retention-days: 5
publish:
needs: [macos, windows, linux_gnu, linux_musl]
needs: [macos, macos_unify, windows, linux]
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
permissions:
contents: write
env:
MACOS: macos
MACOS_UNIVERSAL_BIN: distant-macos
WIN64: win64
WIN64_BIN: distant-win64.exe
LINUX64_GNU: linux64-gnu
LINUX64_GNU_BIN: distant-linux64-gnu
LINUX64_MUSL: linux64-musl
LINUX64_MUSL_BIN: distant-linux64-musl
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
sparse-checkout: |
CHANGELOG.md
sparse-checkout-cone-mode: false
- uses: actions/download-artifact@v2
- name: Generate MacOS SHA256 checksums
run: |
cd ${{ env.MACOS }}
sha256sum ${{ env.MACOS_UNIVERSAL_BIN }} > ${{ env.MACOS_UNIVERSAL_BIN }}.sha256sum
echo "SHA_MACOS_BIN=$(cat ${{ env.MACOS_UNIVERSAL_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Win64 SHA256 checksums
- name: Generate SHA256 checksums
run: |
cd ${{ env.WIN64 }}
sha256sum ${{ env.WIN64_BIN }} > ${{ env.WIN64_BIN }}.sha256sum
echo "SHA_WIN64_BIN=$(cat ${{ env.WIN64_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (gnu) SHA256 checksums
run: |
cd ${{ env.LINUX64_GNU }}
sha256sum ${{ env.LINUX64_GNU_BIN }} > ${{ env.LINUX64_GNU_BIN }}.sha256sum
echo "SHA_LINUX64_GNU_BIN=$(cat ${{ env.LINUX64_GNU_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (musl) SHA256 checksums
run: |
cd ${{ env.LINUX64_MUSL }}
sha256sum ${{ env.LINUX64_MUSL_BIN }} > ${{ env.LINUX64_MUSL_BIN }}.sha256sum
echo "SHA_LINUX64_MUSL_BIN=$(cat ${{ env.LINUX64_MUSL_BIN }}.sha256sum)" >> $GITHUB_ENV
for i in $(find . -name "distant-*" -type f); do
echo "Generating checksum for ${i}"
sha256sum "${i}" > "${i}.sha256sum"
done
- name: Determine git tag
if: github.event_name == 'push'
run: |
TAG_NAME=${{ github.ref }}
echo "TAG_NAME=${TAG_NAME#refs/tags/}" >> $GITHUB_ENV
echo "TAG_VERSION=${TAG_NAME#refs/tags/v}" >> $GITHUB_ENV
- name: Check git tag for pre-release
- name: Check git tag for pre-release or latest
id: check-tag
run: |
if [[ ${{ github.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-.*$ ]]; then
echo ::set-output name=match::true
echo "is_prerelease=true" >> $GITHUB_OUTPUT
elif [[ ${{ github.ref }} =~ ^refs/tags/latest$ ]]; then
echo "is_latest=true" >> $GITHUB_OUTPUT
fi
- name: Print pre-release status
run: |
echo "Is ${{ github.ref }} a pre-release: ${{ steps.check-tag.outputs.match }}"
echo "Is ${{ github.ref }} pre-release: ${{ steps.check-tag.outputs.is_prerelease }}"
echo "Is ${{ github.ref }} latest: ${{ steps.check-tag.outputs.is_latest }}"
- name: Get Changelog Entry
id: changelog
uses: mindsers/changelog-reader-action@v2
with:
version: ${{ env.TAG_VERSION }}
path: "./CHANGELOG.md"
- name: Publish
if: ${{ steps.check-tag.outputs.is_latest != 'true' }}
- name: Publish (latest)
if: ${{ steps.check-tag.outputs.is_latest == 'true' }}
uses: softprops/action-gh-release@v1
with:
name: Latest Build
fail_on_unmatched_files: true
target_commitish: ${{ github.sha }}
draft: false
prerelease: true
files: |
**/distant-*
body: |
This is the latest commit (${{ github.sha }}) built for testing.
This is not guaranteed to pass all tests or even function properly.
- name: Publish (release)
if: ${{ steps.check-tag.outputs.is_latest != 'true' }}
uses: softprops/action-gh-release@v1
with:
name: distant ${{ env.TAG_NAME }}
fail_on_unmatched_files: true
target_commitish: ${{ github.sha }}
draft: false
prerelease: ${{ steps.check-tag.outputs.match == 'true' }}
prerelease: ${{ steps.check-tag.outputs.is_prerelease == 'true' }}
files: |
${{ env.MACOS }}/${{ env.MACOS_UNIVERSAL_BIN }}
${{ env.WIN64 }}/${{ env.WIN64_BIN }}
${{ env.LINUX64_GNU }}/${{ env.LINUX64_GNU_BIN }}
${{ env.LINUX64_MUSL }}/${{ env.LINUX64_MUSL_BIN }}
**/*.sha256sum
**/distant-*
body: |
## Release Notes
${{ steps.changelog.outputs.changes }}
## Binaries
Standalone binaries are built out for Windows (x86_64), MacOS (Intel & ARM), and Linux (x86_64).
- **linux64-gnu** is the x86-64 release on Linux using libc
- **linux64-musl** is the x86-64 release on Linux using musl (static binary, no libc dependency)
- **macos** is a universal binary for Mac OS that supports x86-64 and aarch64 (arm) platforms
- **win64** is the x86-64 release on Windows using MSVC
## SHA256 Checksums
```
${{ env.SHA_MACOS_BIN }}
${{ env.SHA_WIN64_BIN }}
${{ env.SHA_LINUX64_GNU_BIN }}
${{ env.SHA_LINUX64_MUSL_BIN }}
```

1
.gitignore vendored

@ -2,3 +2,4 @@
**/.DS_Store
/distant-core/Cargo.lock
/distant-ssh2/Cargo.lock
**/*.un~

@ -6,6 +6,402 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Fixed
- Bug in `distant fs set-permissions` where partial permissions such as `go-w`
would result in clearing all permissions
- Bug in `distant-local` implementation of `SetPermissions` where read-only
status was being set/cleared prior to Unix permissions being applied,
resulting in applying an invalid change to the permissions
## [0.20.0]
All changes described in these alpha releases:
- [Alpha 13](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.13)
- [Alpha 12](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.12)
- [Alpha 11](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.11)
- [Alpha 10](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.10)
- [Alpha 9](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.9)
- [Alpha 8](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.8)
- [Alpha 7](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.7)
- [Alpha 6](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.6)
- [Alpha 5](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.5)
- [Alpha 4](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.4)
- [Alpha 3](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.3)
- [Alpha 2](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.2)
- [Alpha 1](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.1)
### Fixed
- When terminating a connection using `distant manager kill`, the connection is
now properly dropped, resulting servers waiting to terminate due to
`--shutdown lonely=N` to now shutdown accordingly
- Zombies from spawned servers via `distant launch manager://localhost` are now
properly terminated by checking the exit status of processes
## [0.20.0-alpha.13]
### Added
- Support for `--shell` with optional path to an explicit shell as an option
when executing `distant spawn` in order to run the command within a shell
rather than directly
- `semver` crate to be used for version information in protocol and manager
- `is_compatible_with` function to root of `distant-protocol` crate that checks
if a provided version is compatible with the protocol
### Changed
- `distant_protocol::PROTOCOL_VERSION` now uses the crate's major, minor, and
patch version at compile-time (parsed via `const-str` crate) to streamline
version handling between crate and protocol
- Protocol and manager now supply a version request instead of capabilities and
the capabilities of protocol are now a `Vec<String>` to contain a set of more
broad capabilities instead of every possible request type
### Fixed
- CLI commands like `distant manager select` will now output errors in a JSON
format when configured to communicate using JSON
- `distant-ssh2` no longer caches the remote family globally, but instead
caches it per `Ssh` instance
### Removed
- `Cmd::program` and `Cmd::arguments` functions as they were misleading (didn't
do what `distant-local` or `distant-ssh2` do)
- Removed `Capability` and `Capabilities` from protocol and manager
## [0.20.0-alpha.12]
### Changed
- Minimum Rust version is now `1.70.0` due to bump in `grep-cli` minimum
requirement. This technically applied to v0.20.0-alpha.11, but wasn't caught
until the dependency updated
### Fixed
- `distant --help` will now return exit code of 0
- `distant --version` will now return exit code of 0
## [0.20.0-alpha.11]
### Added
- CLI now supports `-c <STR>` and `--cmd <STR>` to use a given string as the
command as an alternative to `-- <CMD> <ARG> <ARG>`
- Add build for FreeBSD
### Changed
- Cli no longer uses `-c` as shorthand for specifying a config file
- `--file` option for generating completion has been renamed to `--output`
- CLI command to generate config files now defaults to printing to stdout with
`--output` providing the option to write to a file
- Artifacts built now use format of `distant-<TRIPLE>`
## [0.20.0-alpha.10]
### Added
- `use_hidden`, `use_ignore_files`, `use_parent_ignore_files`,
`use_git_ignore`, `use_global_git_ignore`, and `use_git_exclude` as new
options for searching
### Changed
- Searching now disables all standard filters by default with re-introducing
the ability to set the filters by individual options
### Fixed
- Failing to start a search will no longer cause the search task to exit when
using the local server, which would result in no more searches being able to
be executed
## [0.20.0-alpha.9]
### Added
- `Request` and `Response` types from `distant-net` now support an optional
`Header` to send miscellaneous information
### Changed
- `Change` structure now provides a single `path` instead of `paths` with the
`distant-local` implementation sending a separate `Changed` event per path
- `ChangeDetails` now includes a `renamed` field to capture the new path name
when known
- `DistantApi` now handles batch requests in parallel, returning the results in
order. To achieve the previous sequential processing of batch requests, the
header value `sequence` needs to be set to true
- Rename `GenericServerRef` to `ServerRef` and remove `ServerRef` trait,
refactoring `TcpServerRef`, `UnixSocketServerRef`, and `WindowsPipeServerRef`
to use the struct instead of `Box<dyn ServerRef>`
- Update `Reply` trait and associated implementations to be non-blocking &
synchronous as opposed to asynchronous to avoid deadlocks and also be more
performant
### Fixed
- Username and password now support full character sets outside of `@` for
passwords and `:` and `@` for usernames
## [0.20.0-alpha.8]
### Added
- `distant-local` now has two features: `macos-fsevent` and `macos-kqueue`.
These are used to indicate what kind of file watching to support (for MacOS).
The default is `macos-fsevent`.
- `[server.watch]` configuration is now available with the following
settings:
- `native = <bool>` to specify whether to use native watching or polling
(default true)
- `poll_interval = <secs>` to specify seconds to wait between polling
attempts (only for polling watcher)
- `compare_contents = <bool>` to specify how polling watcher will evaluate a
file change (default false)
- `debounce_timeout = <secs>` to specify how long to wait before sending a
change notification (will aggregate and merge changes)
- `debounce_tick_rate = <secs>` to specify how long to wait between event
aggregation loops
- `distant-protocol` response for a change now supports these additional
fields:
- `timestamp` (serialized as `ts`) to communicate the seconds since unix
epoch when the event was received
- `details` containing `attributes` (clarify changes on attribute kind) and
`extra` (to convey arbitrary platform-specific extra information)
### Changed
- Bump minimum Rust version to 1.68.0
### Removed
- `crossbeam-channel` dependency removed from notify by disabling its feature
in order to avoid a `tokio::spawn` issue (https://github.com/notify-rs/notify/issues/380)
### Fixed
- usernames with `-` (hyphen) we're rejected as invalid
## [0.20.0-alpha.7]
### Added
- New `SetPermissions` enum variant on protocol request
- New `set_permissions` method available `DistantApi` and implemented by local
server (ssh unavailable due to https://github.com/wez/wezterm/issues/3784)
- Implementation of `DistantChannelExt::set_permissions`
- `distant version` to display information about connected server
- `distant manager service install` now accepts additional arguments to provide
the manager on startup
### Changed
- CLI `--lsp [<SCHEME>]` scheme now expects just the scheme and not `://`
- Moved `distant_net::common::authentication` to separate crate `distant-auth`
- Moved `distant_net::common::authentication::Keychain` to
`distant_net::common::Keychain`
- Moved `distant_net::common::transport::framed::codec::encryption::SecretKey`
and similar to `distant_net::common::SecretKey`
- Search matches reported with `match` key are now inlined as either a byte
array or a string and no longer an object with a `type` and `value` field
- Unset options and values are not now returned in `JSON` serialization versus
the explicit `null` value provided
- `Capabilities` message type has been changed to `Version` with new struct to
report the version information that includes a server version string,
protocol version tuple, and capabilities
- `distant_core::api::local` moved to `distant_local`
### Removed
- `distant capabilities` has been removed in favor of `distant version`
## [0.20.0-alpha.6]
### Changed
- Renamed `distant_core::data` to `distant_core::protocol`
- CLI `--lsp` now accepts an optional `scheme` to be used instead of
`distant://`, which is the default
- `RemoteLspProcess` now takes a second argument, `scheme`, which dictates
whether to translate `distant://` or something else
## [0.20.0-alpha.5]
### Added
- CLI now offers the following new subcommands
- `distant fs copy` is a refactoring of `distant client action copy`
- `distant fs exists` is a refactoring of `distant client action exists`
- `distant fs read` is a refactoring of `distant client action file-read`,
`distant client action file-read-text`, and `distant client action dir-read`
- `distant fs rename` is a refactoring of `distant client action rename`
- `distant fs write` is a refactoring of `distant client action file-write`,
`distant client action file-write-text`, `distant client action file-append`,
- `distant fs make-dir` is a refactoring of `distant client action dir-create`
- `distant fs metadata` is a refactoring of `distant client action metadata`
- `distant fs remove` is a refactoring of `distant client action remove`
- `distant fs search` is a refactoring of `distant client action search`
- `distant fs watch` is a refactoring of `distant client action watch`
- `distant spawn` is a refactoring of `distant client action proc-spawn`
with `distant client lsp` merged in using the `--lsp` flag
- `distant system-info` is a refactoring of `distant client action system-info`
- Search now supports `upward` as a directional setting to traverse upward
looking for results rather than recursing downward
### Changed
- CLI subcommands refactored
- `distant client select` moved to `distant manager select`
- `distant client action` moved to `distant action`
- `distant client launch` moved to `distant launch`
- `distant client connect` moved to `distant connect`
- `distant client lsp` moved to `distant lsp`
- `distant client repl` moved to `distant api`
- `distant client shell` moved to `distant shell`
### Removed
- `distant-core` crate no longer offers the `clap` feature
### Fixed
- `distant launch manager://localhost` now rejects a bind address of `ssh`
as the `SSH_CONNECTION` environment variable isn't available in most cases
## [0.20.0-alpha.4] - 2023-03-31
### Added
- Default configuration for `config.toml`
- Ability to generate default configuration using
`distant generate config /path/to/config.toml`
- `--current-dir` option for `distant client shell` and `distant client lsp`
### Changed
- Updated a variety of dependencies to latest versions
## [0.20.0-alpha.3] - 2022-11-27
### Added
- `Frame::empty` method as convenience for `Frame::new(&[])`
- `ClientConfig` to support `ReconnectStrategy` and a duration serving as the
maximum time to wait between server activity before attempting to reconnect
from the client
- Server sends empty frames periodically to act as heartbeats to let the client
know if the connection is still established
- Client now tracks length of time since last server activity and will attempt
a reconnect if no activity beyond that point
### Changed
- `Frame` methods `read` and `write` no longer return an `io::Result<...>`
and instead return `Option<Frame<...>>` and nothing respectively
- `Frame::read` method now supports zero-size items
- `Client::inmemory_spawn` and `UntypedClient::inmemory_spawn` now take a
`ClientConfig` as the second argument instead of `ReconnectStrategy`
- Persist option now removed from `ProcSpawn` message and CLI
- Bump minimum Rust version to 1.64.0
### Removed
- `--no-shell` option is removed as we automatically detect and use the PTY of
the remote system using a default shell
## [0.20.0-alpha.2] - 2022-11-20
### Added
- New `ConnectionState` and `ConnectionWatcher` to support watching changes to
the client connection, supporting `clone_connection_watcher` and
`on_connection_change` methods for the client
### Changed
- Server will now drop the connection if it receives an error (other than
WouldBlock) while trying to read from the transport, rather than just logging
the error, regardless of whether the error is resumable
## [0.20.0-alpha.1] - 2022-11-19
**NOTE: This is incomplete as v0.20.0 is a near-complete rewrite internally.**
### Added
- New `contains` and `or` types for `SearchQueryCondition`
### Changed
- `SearchQueryCondition` now escapes regex for all types except `regex`
- Removed `min_depth` option from search
- Updated search to properly use binary detection, filter out common ignore
file patterns, and execute in parallel via the `ignore` crate and `num_cpus`
crate to calculate thread count
### Fixed
- Resolution of `BindAddress` now properly handles hostnames ranging from
`localhost` to `example.com`
- Parsing of `BindAddress` no longer causes a stack overflow
## [0.19.0] - 2022-08-30
### Added
- `SystemInfo` via ssh backend now detects and reports username and shell
- `SystemInfo` via ssh backend now reports os when windows detected
- `Capabilities` request/response for server and manager that report back the
capabilities (and descriptions) supported by the server or manager
- `Search` and `CancelSearch` request/response for server that performs a
search using `grep` crate against paths or file contents, returning results
back as a stream
- New `Searcher` available as part of distant client interface to support
performing a search and getting back results
- Updated `DistantChannelExt` to support creating a `Searcher` and canceling
an ongoing search query
- `distant client action search` now supported, waiting for results and
printing them out
### Changed
- `SystemInfo` data type now includes two additional fields: `username` and
`shell`. The `username` field represents the name of the user running the
server process. The `shell` field points to the default shell associated with
the user running the server process
### Fixed
- `distant client shell` will now use the default shell from system info, or
choose between `/bin/sh` and `cmd.exe` as the default shell based on the
family returned by a system info request
- `distant client shell` properly terminates master pty when the shell exits,
resolving the hanging that occurred for Windows `cmd.exe` and
`powershell.exe` upon exit
- ssh launch with login shell now only uses `sh` when remote family is `unix`
- ssh backend implementation of copy now works more widely across windows
systems by switching to `powershell.exe` to perform copy
## [0.18.0] - 2022-08-18
### Changed
- `shutdown-after` replaced with `shutdown` that supports three options:
1. `never` - server will never shutdown automatically
2. `after=N` - server will shutdown after N seconds
3. `lonely=N` - server will shutdown N seconds after no connections
## [0.17.6] - 2022-08-18
### Fixed
- `shutdown-after` cli parameter and config option now properly shuts down
server after N seconds with no connections
## [0.17.5] - 2022-08-18
### Fixed
@ -247,7 +643,25 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
pending upon full channel and no longer locks up
- stdout, stderr, and stdin of `RemoteProcess` no longer cause deadlock
[Unreleased]: https://github.com/chipsenkbeil/distant/compare/v0.17.4...HEAD
[Unreleased]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.13...HEAD
[0.20.0-alpha.13]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.12...v0.20.0-alpha.13
[0.20.0-alpha.12]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.11...v0.20.0-alpha.12
[0.20.0-alpha.11]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.10...v0.20.0-alpha.11
[0.20.0-alpha.10]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.9...v0.20.0-alpha.10
[0.20.0-alpha.9]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.8...v0.20.0-alpha.9
[0.20.0-alpha.8]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.7...v0.20.0-alpha.8
[0.20.0-alpha.7]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.6...v0.20.0-alpha.7
[0.20.0-alpha.6]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.5...v0.20.0-alpha.6
[0.20.0-alpha.5]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.4...v0.20.0-alpha.5
[0.20.0-alpha.4]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.3...v0.20.0-alpha.4
[0.20.0-alpha.3]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.2...v0.20.0-alpha.3
[0.20.0-alpha.2]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.1...v0.20.0-alpha.2
[0.20.0-alpha.1]: https://github.com/chipsenkbeil/distant/compare/v0.19.0...v0.20.0-alpha.1
[0.19.0]: https://github.com/chipsenkbeil/distant/compare/v0.18.0...v0.19.0
[0.19.0]: https://github.com/chipsenkbeil/distant/compare/v0.18.0...v0.19.0
[0.18.0]: https://github.com/chipsenkbeil/distant/compare/v0.17.6...v0.18.0
[0.17.6]: https://github.com/chipsenkbeil/distant/compare/v0.17.5...v0.17.6
[0.17.5]: https://github.com/chipsenkbeil/distant/compare/v0.17.4...v0.17.5
[0.17.4]: https://github.com/chipsenkbeil/distant/compare/v0.17.3...v0.17.4
[0.17.3]: https://github.com/chipsenkbeil/distant/compare/v0.17.2...v0.17.3
[0.17.2]: https://github.com/chipsenkbeil/distant/compare/v0.17.1...v0.17.2

2160
Cargo.lock generated

File diff suppressed because it is too large Load Diff

@ -3,7 +3,7 @@ name = "distant"
description = "Operate on a remote computer through file and process manipulation"
categories = ["command-line-utilities"]
keywords = ["cli"]
version = "0.17.5"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -12,12 +12,20 @@ readme = "README.md"
license = "MIT OR Apache-2.0"
[workspace]
members = ["distant-core", "distant-net", "distant-ssh2"]
members = [
"distant-auth",
"distant-core",
"distant-local",
"distant-net",
"distant-protocol",
"distant-ssh2",
]
[profile.release]
opt-level = 'z'
lto = true
codegen-units = 1
strip = true
[features]
default = ["libssh", "ssh2"]
@ -25,47 +33,52 @@ libssh = ["distant-ssh2/libssh"]
ssh2 = ["distant-ssh2/ssh2"]
[dependencies]
anyhow = "1.0.60"
async-trait = "0.1.57"
clap = { version = "3.2.16", features = ["derive"] }
clap_complete = "3.2.3"
config = { version = "0.13.2", default-features = false, features = ["toml"] }
anyhow = "1.0.71"
async-trait = "0.1.68"
clap = { version = "4.3.0", features = ["derive"] }
clap_complete = "4.3.0"
config = { version = "0.13.3", default-features = false, features = ["toml"] }
derive_more = { version = "0.99.17", default-features = false, features = ["display", "from", "error", "is_variant"] }
dialoguer = { version = "0.10.2", default-features = false }
distant-core = { version = "=0.17.5", path = "distant-core", features = ["clap", "schemars"] }
directories = "4.0.1"
flexi_logger = "0.23.0"
indoc = "1.0.7"
log = "0.4.17"
once_cell = "1.13.0"
dialoguer = { version = "0.10.4", default-features = false }
distant-core = { version = "=0.20.0", path = "distant-core" }
distant-local = { version = "=0.20.0", path = "distant-local" }
directories = "5.0.1"
file-mode = "0.1.2"
flexi_logger = "0.25.5"
indoc = "2.0.1"
log = "0.4.18"
once_cell = "1.17.2"
rand = { version = "0.8.5", features = ["getrandom"] }
rpassword = "7.0.0"
serde = { version = "1.0.142", features = ["derive"] }
serde_json = "1.0.83"
rpassword = "7.2.0"
serde = { version = "1.0.163", features = ["derive"] }
serde_json = "1.0.96"
shell-words = "1.1.0"
service-manager = { version = "0.1.3", features = ["clap", "serde"] }
tabled = "0.8.0"
tokio = { version = "1.20.1", features = ["full"] }
toml_edit = { version = "0.14.4", features = ["serde"] }
terminal_size = "0.2.1"
termwiz = "0.17.1"
which = "4.2.5"
service-manager = { version = "0.2.0", features = ["clap", "serde"] }
tabled = "0.12.0"
tokio = { version = "1.28.2", features = ["full"] }
toml_edit = { version = "0.19.10", features = ["serde"] }
terminal_size = "0.2.6"
termwiz = "0.20.0"
typed-path = "0.3.2"
which = "4.4.0"
winsplit = "0.1.0"
whoami = "1.2.1"
whoami = "1.4.0"
# Optional native SSH functionality
distant-ssh2 = { version = "=0.17.5", path = "distant-ssh2", default-features = false, features = ["serde"], optional = true }
distant-ssh2 = { version = "=0.20.0", path = "distant-ssh2", default-features = false, features = ["serde"], optional = true }
[target.'cfg(unix)'.dependencies]
fork = "0.1.19"
fork = "0.1.21"
[target.'cfg(windows)'.dependencies]
sysinfo = "0.25.1"
windows-service = "0.5.0"
sysinfo = "0.29.0"
windows-service = "0.6.0"
[dev-dependencies]
assert_cmd = "2.0.4"
assert_fs = "1.0.7"
indoc = "1.0.7"
predicates = "2.1.1"
rstest = "0.15.0"
assert_cmd = "2.0.11"
assert_fs = "1.0.13"
env_logger = "0.10.0"
indoc = "2.0.1"
predicates = "3.0.3"
rstest = "0.17.0"
test-log = "0.2.11"

@ -0,0 +1,44 @@
[tasks.format]
clear = true
install_crate = "rustfmt-nightly"
command = "cargo"
args = ["+nightly", "fmt", "--all"]
[tasks.test]
clear = true
command = "cargo"
args = ["test", "--release", "--all-features", "--workspace"]
[tasks.ci-test]
clear = true
command = "cargo"
args = ["nextest", "run", "--profile", "ci", "--release", "--all-features", "--workspace"]
[tasks.post-ci-test]
clear = true
command = "cargo"
args = ["test", "--release", "--all-features", "--workspace", "--doc"]
[tasks.publish]
clear = true
script = '''
cargo publish --all-features -p distant-auth
cargo publish --all-features -p distant-protocol
cargo publish --all-features -p distant-net
cargo publish --all-features -p distant-core
cargo publish --all-features -p distant-local
cargo publish --all-features -p distant-ssh2
cargo publish --all-features
'''
[tasks.dry-run-publish]
clear = true
script = '''
cargo publish --all-features --dry-run -p distant-auth
cargo publish --all-features --dry-run -p distant-protocol
cargo publish --all-features --dry-run -p distant-net
cargo publish --all-features --dry-run -p distant-core
cargo publish --all-features --dry-run -p distant-local
cargo publish --all-features --dry-run -p distant-ssh2
cargo publish --all-features --dry-run
'''

@ -0,0 +1,58 @@
# Publish
Guide to publishing the binary and associated crates.
## 1. Update Changelog
Ensure that the changelog is updated for a new release. The CI build requires
that the release version is specified in the format: `[VERSION] - DATE`.
1. Update the changelog by changing `[Unreleased]` to the latest version and
date.
2. Re-add a new `[Unreleased]` header at the top.
3. At the bottom, add a new link for the current version.
4. Update the `[Unreleased]` link with the latest tag.
## 2. Update READMEs
Each crate README has a reference to installing a specific version and needs to
be updated.
e.g. Open `distant-core/README.md` and replace `0.17` with `0.18` if applicable
## 3. Update Crate Versions
Run a command to update the crate versions. An easy way is to use `sed`.
On Mac, this would be `sed -i '' "s~0.17.4~0.17.5~g" **/*.toml` where the old
and new versions would be specified.
*Make sure to review the changed files! Sometimes a version overlaps with
another crate and then we've bumped something wrong!*
## 4. Build to get Cargo.lock update
Run `cargo build` to get a new `Cargo.lock` refresh and commit it.
## 5. Tag Commit
Tag the release commit with the form `vMAJOR.MINOR.PATCH` by using
`git tag vMAJOR.MINOR.PATCH` and publish the tag via `git push --tags`.
Once the tag is pushed, a new job will start to build and publish the artifacts
on Github.
## 6. Publish Crates
Now, `cd` into each sub-crate and publish. Sometimes, it takes a little while
for a crate to be indexed after getting published. This can lead to the publish
of a downstream crate to fail. If so, try again in a couple of seconds.
1. **distant-net:** `(cd distant-net && cargo publish)`
2. **distant-core:** `(cd distant-core && cargo publish)`
3. **distant-ssh2:** `(cd distant-ssh2 && cargo publish)`
4. **distant:** `cargo publish`
## 7. Celebrate
Another release done!

@ -1,6 +1,11 @@
# distant - remotely edit files and run programs
<h1 align="center">
<img src="https://distant.dev/assets/images/distant-with-logo-300x87.png" alt="Distant">
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![CI][distant_ci_img]][distant_ci_lnk] [![RustC 1.61+][distant_rustc_img]][distant_rustc_lnk]
<a href="https://distant.dev/">Documentation</a> |
<a href="https://github.com/chipsenkbeil/distant/discussions">Discussion</a>
</h1>
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![CI][distant_ci_img]][distant_ci_lnk] [![RustC 1.70+][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant.svg
[distant_crates_lnk]: https://crates.io/crates/distant
@ -8,125 +13,52 @@
[distant_doc_lnk]: https://docs.rs/distant
[distant_ci_img]: https://github.com/chipsenkbeil/distant/actions/workflows/ci.yml/badge.svg
[distant_ci_lnk]: https://github.com/chipsenkbeil/distant/actions/workflows/ci.yml
[distant_rustc_img]: https://img.shields.io/badge/distant-rustc_1.61+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2022/05/19/Rust-1.61.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
🚧 **(Alpha stage software) This program is in rapid development and may break or change frequently!** 🚧
## Details
The `distant` binary supplies both a server and client component as well as
a command to start a server and configure the local client to be able to
talk to the server.
- Asynchronous in nature, powered by [`tokio`](https://tokio.rs/)
- Data is serialized to send across the wire via [`msgpack`](https://msgpack.org/)
- Encryption & authentication are handled via
[XChaCha20Poly1305](https://tools.ietf.org/html/rfc8439) for an authenticated
encryption scheme via
[RustCrypto/ChaCha20Poly1305](https://github.com/RustCrypto/AEADs/tree/master/chacha20poly1305)
Additionally, the core of the distant client and server codebase can be pulled
in to be used with your own Rust crates via the `distant-core` crate.
## Installation
### Prebuilt Binaries
If you would like a pre-built binary, check out the
[releases section](https://github.com/chipsenkbeil/distant/releases).
### Unix
### Building from Source
```sh
# Need to include -L to follow redirects as this returns 301
curl -L https://sh.distant.dev | sh
If you have [`cargo`](https://github.com/rust-lang/cargo) installed, you can
directly download and build the source via:
```bash
cargo install distant
# Can also use wget to the same result
wget -q -O- https://sh.distant.dev | sh
```
Alternatively, you can clone this repository and build from source following
the [build guide](./BUILDING.md).
## Example
### Starting the manager
In order to facilitate communication between a client and server, you first
need to start the manager. This can be done in one of two ways:
1. Leverage the `service` functionality to spawn the manager using one of the
following supported service management platforms:
- [`sc.exe`](https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-r2-and-2012/cc754599(v=ws.11)) for use with [Window Service](https://en.wikipedia.org/wiki/Windows_service) (Windows)
- [Launchd](https://en.wikipedia.org/wiki/Launchd) (MacOS)
- [systemd](https://en.wikipedia.org/wiki/Systemd) (Linux)
- [OpenRC](https://en.wikipedia.org/wiki/OpenRC) (Linux)
- [rc.d](https://en.wikipedia.org/wiki/Init#Research_Unix-style/BSD-style) (FreeBSD)
2. Run the manager manually by using the `listen` subcommand
See https://distant.dev/getting-started/installation/unix/ for more details.
#### Service management
### Windows
```bash
# If you want to install the manager as a service, you can use the service
# interface available directly from the CLI
#
# By default, this will install a system-level service, which means that you
# will need elevated permissions to both install AND communicate with the
# manager
distant manager service install
# If you want to maintain a user-level manager service, you can include the
# --user flag. Note that this is only supported on MacOS (via launchd) and
# Linux (via systemd)
distant manager service install --user
# ........
# Once you have installed the service, you will normally need to start it
# manually or restart your machine to trigger startup on boot
distant manager service start # --user if you are working with user-level
```powershell
Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time
irm sh.distant.dev | iex
```
#### Manual start
See https://distant.dev/getting-started/installation/windows/ for more details.
```bash
# If you choose to run the manager without a service management platform, you
# can either run the manager in the foreground or provide --daemon to spawn and
# detach the manager
## Usage
# Run in the foreground
distant manager listen
# Detach the manager where it will not terminate even if the parent exits
```sh
# Start a manager in the background
distant manager listen --daemon
```
### Interacting with a remote machine
# SSH into a server, start distant, and connect to the distant server
distant launch ssh://example.com
Once you have a manager listening for client requests, you can begin
interacting with the manager, spawn and/or connect to servers, and interact
with remote machines.
# Read the current working directory
distant fs read .
```bash
# Connect to my.example.com on port 22 via SSH and start a distant server
distant client launch ssh://my.example.com
# After the connection is established, you can perform different operations
# on the remote machine via `distant client action {command} [args]`
distant client action copy path/to/file new/path/to/file
distant client action spawn -- echo 'Hello, this is from the other side'
# Opening a shell to the remote machine is trivial
distant client shell
# If you have more than one connection open, you can switch between active
# connections by using the `select` subcommand
distant client select '<ID>'
# For programmatic use, a REPL following the JSON API is available
distant client repl --format json
# Start a shell on the remote machine
distant shell
```
See https://distant.dev/getting-started/usage/ for more details.
## License
This project is licensed under either of

@ -0,0 +1,27 @@
[package]
name = "distant-auth"
description = "Authentication library for distant, providing various implementations"
categories = ["authentication"]
keywords = ["auth", "authentication", "async"]
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
repository = "https://github.com/chipsenkbeil/distant"
readme = "README.md"
license = "MIT OR Apache-2.0"
[features]
default = []
tests = []
[dependencies]
async-trait = "0.1.68"
derive_more = { version = "0.99.17", default-features = false, features = ["display", "from", "error"] }
log = "0.4.18"
serde = { version = "1.0.163", features = ["derive"] }
[dev-dependencies]
env_logger = "0.10.0"
test-log = "0.2.11"
tokio = { version = "1.28.2", features = ["full"] }

@ -0,0 +1,35 @@
# distant auth
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-auth.svg
[distant_crates_lnk]: https://crates.io/crates/distant-auth
[distant_doc_img]: https://docs.rs/distant-auth/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-auth
[distant_rustc_img]: https://img.shields.io/badge/distant_auth-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details
The `distant-auth` library supplies the authentication functionality for the
distant interfaces and distant cli.
## Installation
You can import the dependency by adding the following to your `Cargo.toml`:
```toml
[dependencies]
distant-auth = "0.20"
```
## License
This project is licensed under either of
Apache License, Version 2.0, (LICENSE-APACHE or
[apache-license][apache-license]) MIT license (LICENSE-MIT or
[mit-license][mit-license]) at your option.
[apache-license]: http://www.apache.org/licenses/LICENSE-2.0
[mit-license]: http://opensource.org/licenses/MIT

@ -0,0 +1,110 @@
use std::io;
use async_trait::async_trait;
use crate::handler::AuthHandler;
use crate::msg::*;
/// Represents an interface for authenticating with a server.
#[async_trait]
pub trait Authenticate {
/// Performs authentication by leveraging the `handler` for any received challenge.
async fn authenticate(&mut self, mut handler: impl AuthHandler + Send) -> io::Result<()>;
}
/// Represents an interface for submitting challenges for authentication.
#[async_trait]
pub trait Authenticator: Send {
/// Issues an initialization notice and returns the response indicating which authentication
/// methods to pursue
async fn initialize(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse>;
/// Issues a challenge and returns the answers to the `questions` asked.
async fn challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse>;
/// Requests verification of some `kind` and `text`, returning true if passed verification.
async fn verify(&mut self, verification: Verification) -> io::Result<VerificationResponse>;
/// Reports information with no response expected.
async fn info(&mut self, info: Info) -> io::Result<()>;
/// Reports an error occurred during authentication, consuming the authenticator since no more
/// challenges should be issued.
async fn error(&mut self, error: Error) -> io::Result<()>;
/// Reports that the authentication has started for a specific method.
async fn start_method(&mut self, start_method: StartMethod) -> io::Result<()>;
/// Reports that the authentication has finished successfully, consuming the authenticator
/// since no more challenges should be issued.
async fn finished(&mut self) -> io::Result<()>;
}
/// Represents an implementator of [`Authenticator`] used purely for testing purposes.
#[cfg(any(test, feature = "tests"))]
pub struct TestAuthenticator {
pub initialize: Box<dyn FnMut(Initialization) -> io::Result<InitializationResponse> + Send>,
pub challenge: Box<dyn FnMut(Challenge) -> io::Result<ChallengeResponse> + Send>,
pub verify: Box<dyn FnMut(Verification) -> io::Result<VerificationResponse> + Send>,
pub info: Box<dyn FnMut(Info) -> io::Result<()> + Send>,
pub error: Box<dyn FnMut(Error) -> io::Result<()> + Send>,
pub start_method: Box<dyn FnMut(StartMethod) -> io::Result<()> + Send>,
pub finished: Box<dyn FnMut() -> io::Result<()> + Send>,
}
#[cfg(any(test, feature = "tests"))]
impl Default for TestAuthenticator {
fn default() -> Self {
Self {
initialize: Box::new(|x| Ok(InitializationResponse { methods: x.methods })),
challenge: Box::new(|x| {
Ok(ChallengeResponse {
answers: x.questions.into_iter().map(|x| x.text).collect(),
})
}),
verify: Box::new(|_| Ok(VerificationResponse { valid: true })),
info: Box::new(|_| Ok(())),
error: Box::new(|_| Ok(())),
start_method: Box::new(|_| Ok(())),
finished: Box::new(|| Ok(())),
}
}
}
#[cfg(any(test, feature = "tests"))]
#[async_trait]
impl Authenticator for TestAuthenticator {
async fn initialize(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse> {
(self.initialize)(initialization)
}
async fn challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
(self.challenge)(challenge)
}
async fn verify(&mut self, verification: Verification) -> io::Result<VerificationResponse> {
(self.verify)(verification)
}
async fn info(&mut self, info: Info) -> io::Result<()> {
(self.info)(info)
}
async fn error(&mut self, error: Error) -> io::Result<()> {
(self.error)(error)
}
async fn start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
(self.start_method)(start_method)
}
async fn finished(&mut self) -> io::Result<()> {
(self.finished)()
}
}

@ -0,0 +1,422 @@
use std::collections::HashMap;
use std::fmt::Display;
use std::io;
use async_trait::async_trait;
use crate::authenticator::Authenticator;
use crate::msg::*;
mod methods;
pub use methods::*;
/// Interface for a handler of authentication requests for all methods.
#[async_trait]
pub trait AuthHandler: AuthMethodHandler + Send {
/// Callback when authentication is beginning, providing available authentication methods and
/// returning selected authentication methods to pursue.
async fn on_initialization(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse> {
Ok(InitializationResponse {
methods: initialization.methods,
})
}
/// Callback when authentication starts for a specific method.
#[allow(unused_variables)]
async fn on_start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
Ok(())
}
/// Callback when authentication is finished and no more requests will be received.
async fn on_finished(&mut self) -> io::Result<()> {
Ok(())
}
}
/// Dummy implementation of [`AuthHandler`] where any challenge or verification request will
/// instantly fail.
pub struct DummyAuthHandler;
#[async_trait]
impl AuthHandler for DummyAuthHandler {}
#[async_trait]
impl AuthMethodHandler for DummyAuthHandler {
async fn on_challenge(&mut self, _: Challenge) -> io::Result<ChallengeResponse> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
async fn on_verification(&mut self, _: Verification) -> io::Result<VerificationResponse> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
async fn on_info(&mut self, _: Info) -> io::Result<()> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
async fn on_error(&mut self, _: Error) -> io::Result<()> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
}
/// Implementation of [`AuthHandler`] that uses the same [`AuthMethodHandler`] for all methods.
pub struct SingleAuthHandler(Box<dyn AuthMethodHandler>);
impl SingleAuthHandler {
pub fn new<T: AuthMethodHandler + 'static>(method_handler: T) -> Self {
Self(Box::new(method_handler))
}
}
#[async_trait]
impl AuthHandler for SingleAuthHandler {}
#[async_trait]
impl AuthMethodHandler for SingleAuthHandler {
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
self.0.on_challenge(challenge).await
}
async fn on_verification(
&mut self,
verification: Verification,
) -> io::Result<VerificationResponse> {
self.0.on_verification(verification).await
}
async fn on_info(&mut self, info: Info) -> io::Result<()> {
self.0.on_info(info).await
}
async fn on_error(&mut self, error: Error) -> io::Result<()> {
self.0.on_error(error).await
}
}
/// Implementation of [`AuthHandler`] that maintains a map of [`AuthMethodHandler`] implementations
/// for specific methods, invoking [`on_challenge`], [`on_verification`], [`on_info`], and
/// [`on_error`] for a specific handler based on an associated id.
///
/// [`on_challenge`]: AuthMethodHandler::on_challenge
/// [`on_verification`]: AuthMethodHandler::on_verification
/// [`on_info`]: AuthMethodHandler::on_info
/// [`on_error`]: AuthMethodHandler::on_error
pub struct AuthHandlerMap {
active: String,
map: HashMap<&'static str, Box<dyn AuthMethodHandler>>,
}
impl AuthHandlerMap {
/// Creates a new, empty map of auth method handlers.
pub fn new() -> Self {
Self {
active: String::new(),
map: HashMap::new(),
}
}
/// Returns the `id` of the active [`AuthMethodHandler`].
pub fn active_id(&self) -> &str {
&self.active
}
/// Sets the active [`AuthMethodHandler`] by its `id`.
pub fn set_active_id(&mut self, id: impl Into<String>) {
self.active = id.into();
}
/// Inserts the specified `handler` into the map, associating it with `id` for determining the
/// method that would trigger this handler.
pub fn insert_method_handler<T: AuthMethodHandler + 'static>(
&mut self,
id: &'static str,
handler: T,
) -> Option<Box<dyn AuthMethodHandler>> {
self.map.insert(id, Box::new(handler))
}
/// Removes a handler with the associated `id`.
pub fn remove_method_handler(
&mut self,
id: &'static str,
) -> Option<Box<dyn AuthMethodHandler>> {
self.map.remove(id)
}
/// Retrieves a mutable reference to the active [`AuthMethodHandler`] with the specified `id`,
/// returning an error if no handler for the active id is found.
pub fn get_mut_active_method_handler_or_error(
&mut self,
) -> io::Result<&mut (dyn AuthMethodHandler + 'static)> {
let id = self.active.clone();
self.get_mut_active_method_handler().ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, format!("No active handler for {id}"))
})
}
/// Retrieves a mutable reference to the active [`AuthMethodHandler`] with the specified `id`.
pub fn get_mut_active_method_handler(
&mut self,
) -> Option<&mut (dyn AuthMethodHandler + 'static)> {
// TODO: Optimize this
self.get_mut_method_handler(&self.active.clone())
}
/// Retrieves a mutable reference to the [`AuthMethodHandler`] with the specified `id`.
pub fn get_mut_method_handler(
&mut self,
id: &str,
) -> Option<&mut (dyn AuthMethodHandler + 'static)> {
self.map.get_mut(id).map(|h| h.as_mut())
}
}
impl AuthHandlerMap {
/// Consumes the map, returning a new map that supports the `static_key` method.
pub fn with_static_key<K>(mut self, key: K) -> Self
where
K: Display + Send + 'static,
{
self.insert_method_handler("static_key", StaticKeyAuthMethodHandler::simple(key));
self
}
}
impl Default for AuthHandlerMap {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl AuthHandler for AuthHandlerMap {
async fn on_initialization(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse> {
let methods = initialization
.methods
.into_iter()
.filter(|method| self.map.contains_key(method.as_str()))
.collect();
Ok(InitializationResponse { methods })
}
async fn on_start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
self.set_active_id(start_method.method);
Ok(())
}
async fn on_finished(&mut self) -> io::Result<()> {
Ok(())
}
}
#[async_trait]
impl AuthMethodHandler for AuthHandlerMap {
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
let handler = self.get_mut_active_method_handler_or_error()?;
handler.on_challenge(challenge).await
}
async fn on_verification(
&mut self,
verification: Verification,
) -> io::Result<VerificationResponse> {
let handler = self.get_mut_active_method_handler_or_error()?;
handler.on_verification(verification).await
}
async fn on_info(&mut self, info: Info) -> io::Result<()> {
let handler = self.get_mut_active_method_handler_or_error()?;
handler.on_info(info).await
}
async fn on_error(&mut self, error: Error) -> io::Result<()> {
let handler = self.get_mut_active_method_handler_or_error()?;
handler.on_error(error).await
}
}
/// Implementation of [`AuthHandler`] that redirects all requests to an [`Authenticator`].
pub struct ProxyAuthHandler<'a>(&'a mut dyn Authenticator);
impl<'a> ProxyAuthHandler<'a> {
pub fn new(authenticator: &'a mut dyn Authenticator) -> Self {
Self(authenticator)
}
}
#[async_trait]
impl<'a> AuthHandler for ProxyAuthHandler<'a> {
async fn on_initialization(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse> {
Authenticator::initialize(self.0, initialization).await
}
async fn on_start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
Authenticator::start_method(self.0, start_method).await
}
async fn on_finished(&mut self) -> io::Result<()> {
Authenticator::finished(self.0).await
}
}
#[async_trait]
impl<'a> AuthMethodHandler for ProxyAuthHandler<'a> {
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
Authenticator::challenge(self.0, challenge).await
}
async fn on_verification(
&mut self,
verification: Verification,
) -> io::Result<VerificationResponse> {
Authenticator::verify(self.0, verification).await
}
async fn on_info(&mut self, info: Info) -> io::Result<()> {
Authenticator::info(self.0, info).await
}
async fn on_error(&mut self, error: Error) -> io::Result<()> {
Authenticator::error(self.0, error).await
}
}
/// Implementation of [`AuthHandler`] that holds a mutable reference to another [`AuthHandler`]
/// trait object to use underneath.
pub struct DynAuthHandler<'a>(&'a mut dyn AuthHandler);
impl<'a> DynAuthHandler<'a> {
pub fn new(handler: &'a mut dyn AuthHandler) -> Self {
Self(handler)
}
}
impl<'a, T: AuthHandler> From<&'a mut T> for DynAuthHandler<'a> {
fn from(handler: &'a mut T) -> Self {
Self::new(handler as &mut dyn AuthHandler)
}
}
#[async_trait]
impl<'a> AuthHandler for DynAuthHandler<'a> {
async fn on_initialization(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse> {
self.0.on_initialization(initialization).await
}
async fn on_start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
self.0.on_start_method(start_method).await
}
async fn on_finished(&mut self) -> io::Result<()> {
self.0.on_finished().await
}
}
#[async_trait]
impl<'a> AuthMethodHandler for DynAuthHandler<'a> {
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
self.0.on_challenge(challenge).await
}
async fn on_verification(
&mut self,
verification: Verification,
) -> io::Result<VerificationResponse> {
self.0.on_verification(verification).await
}
async fn on_info(&mut self, info: Info) -> io::Result<()> {
self.0.on_info(info).await
}
async fn on_error(&mut self, error: Error) -> io::Result<()> {
self.0.on_error(error).await
}
}
/// Represents an implementator of [`AuthHandler`] used purely for testing purposes.
#[cfg(any(test, feature = "tests"))]
pub struct TestAuthHandler {
pub on_initialization:
Box<dyn FnMut(Initialization) -> io::Result<InitializationResponse> + Send>,
pub on_challenge: Box<dyn FnMut(Challenge) -> io::Result<ChallengeResponse> + Send>,
pub on_verification: Box<dyn FnMut(Verification) -> io::Result<VerificationResponse> + Send>,
pub on_info: Box<dyn FnMut(Info) -> io::Result<()> + Send>,
pub on_error: Box<dyn FnMut(Error) -> io::Result<()> + Send>,
pub on_start_method: Box<dyn FnMut(StartMethod) -> io::Result<()> + Send>,
pub on_finished: Box<dyn FnMut() -> io::Result<()> + Send>,
}
#[cfg(any(test, feature = "tests"))]
impl Default for TestAuthHandler {
fn default() -> Self {
Self {
on_initialization: Box::new(|x| Ok(InitializationResponse { methods: x.methods })),
on_challenge: Box::new(|x| {
Ok(ChallengeResponse {
answers: x.questions.into_iter().map(|x| x.text).collect(),
})
}),
on_verification: Box::new(|_| Ok(VerificationResponse { valid: true })),
on_info: Box::new(|_| Ok(())),
on_error: Box::new(|_| Ok(())),
on_start_method: Box::new(|_| Ok(())),
on_finished: Box::new(|| Ok(())),
}
}
}
#[cfg(any(test, feature = "tests"))]
#[async_trait]
impl AuthHandler for TestAuthHandler {
async fn on_initialization(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse> {
(self.on_initialization)(initialization)
}
async fn on_start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
(self.on_start_method)(start_method)
}
async fn on_finished(&mut self) -> io::Result<()> {
(self.on_finished)()
}
}
#[cfg(any(test, feature = "tests"))]
#[async_trait]
impl AuthMethodHandler for TestAuthHandler {
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
(self.on_challenge)(challenge)
}
async fn on_verification(
&mut self,
verification: Verification,
) -> io::Result<VerificationResponse> {
(self.on_verification)(verification)
}
async fn on_info(&mut self, info: Info) -> io::Result<()> {
(self.on_info)(info)
}
async fn on_error(&mut self, error: Error) -> io::Result<()> {
(self.on_error)(error)
}
}

@ -0,0 +1,33 @@
use std::io;
use async_trait::async_trait;
use crate::msg::{Challenge, ChallengeResponse, Error, Info, Verification, VerificationResponse};
/// Interface for a handler of authentication requests for a specific authentication method.
#[async_trait]
pub trait AuthMethodHandler: Send {
/// Callback when a challenge is received, returning answers to the given questions.
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse>;
/// Callback when a verification request is received, returning true if approvided or false if
/// unapproved.
async fn on_verification(
&mut self,
verification: Verification,
) -> io::Result<VerificationResponse>;
/// Callback when information is received. To fail, return an error from this function.
async fn on_info(&mut self, info: Info) -> io::Result<()>;
/// Callback when an error is received. Regardless of the result returned, this will terminate
/// the authenticator. In the situation where a custom error would be preferred, have this
/// callback return an error.
async fn on_error(&mut self, error: Error) -> io::Result<()>;
}
mod prompt;
pub use prompt::*;
mod static_key;
pub use static_key::*;

@ -0,0 +1,90 @@
use std::io;
use async_trait::async_trait;
use log::*;
use crate::handler::AuthMethodHandler;
use crate::msg::{
Challenge, ChallengeResponse, Error, Info, Verification, VerificationKind, VerificationResponse,
};
/// Blocking implementation of [`AuthMethodHandler`] that uses prompts to communicate challenge &
/// verification requests, receiving responses to relay back.
pub struct PromptAuthMethodHandler<T, U> {
text_prompt: T,
password_prompt: U,
}
impl<T, U> PromptAuthMethodHandler<T, U> {
pub fn new(text_prompt: T, password_prompt: U) -> Self {
Self {
text_prompt,
password_prompt,
}
}
}
#[async_trait]
impl<T, U> AuthMethodHandler for PromptAuthMethodHandler<T, U>
where
T: Fn(&str) -> io::Result<String> + Send + Sync + 'static,
U: Fn(&str) -> io::Result<String> + Send + Sync + 'static,
{
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
trace!("on_challenge({challenge:?})");
let mut answers = Vec::new();
for question in challenge.questions.iter() {
// Contains all prompt lines including same line
let mut lines = question.text.split('\n').collect::<Vec<_>>();
// Line that is prompt on same line as answer
let line = lines.pop().unwrap();
// Go ahead and display all other lines
for line in lines.into_iter() {
eprintln!("{line}");
}
// Get an answer from user input, or use a blank string as an answer
// if we fail to get input from the user
let answer = (self.password_prompt)(line).unwrap_or_default();
answers.push(answer);
}
Ok(ChallengeResponse { answers })
}
async fn on_verification(
&mut self,
verification: Verification,
) -> io::Result<VerificationResponse> {
trace!("on_verify({verification:?})");
match verification.kind {
VerificationKind::Host => {
eprintln!("{}", verification.text);
let answer = (self.text_prompt)("Enter [y/N]> ")?;
trace!("Verify? Answer = '{answer}'");
Ok(VerificationResponse {
valid: matches!(answer.trim(), "y" | "Y" | "yes" | "YES"),
})
}
x => {
error!("Unsupported verify kind: {x}");
Ok(VerificationResponse { valid: false })
}
}
}
async fn on_info(&mut self, info: Info) -> io::Result<()> {
trace!("on_info({info:?})");
println!("{}", info.text);
Ok(())
}
async fn on_error(&mut self, error: Error) -> io::Result<()> {
trace!("on_error({error:?})");
eprintln!("{}: {}", error.kind, error.text);
Ok(())
}
}

@ -0,0 +1,175 @@
use std::fmt::Display;
use std::io;
use async_trait::async_trait;
use log::*;
use crate::handler::AuthMethodHandler;
use crate::msg::{Challenge, ChallengeResponse, Error, Info, Verification, VerificationResponse};
/// Implementation of [`AuthMethodHandler`] that answers challenge requests using a static
/// [`HeapSecretKey`]. All other portions of method authentication are handled by another
/// [`AuthMethodHandler`].
pub struct StaticKeyAuthMethodHandler<K> {
key: K,
handler: Box<dyn AuthMethodHandler>,
}
impl<K> StaticKeyAuthMethodHandler<K> {
/// Creates a new [`StaticKeyAuthMethodHandler`] that responds to challenges using a static
/// `key`. All other requests are passed to the `handler`.
pub fn new<T: AuthMethodHandler + 'static>(key: K, handler: T) -> Self {
Self {
key,
handler: Box::new(handler),
}
}
/// Creates a new [`StaticKeyAuthMethodHandler`] that responds to challenges using a static
/// `key`. All other requests are passed automatically, meaning that verification is always
/// approvide and info/errors are ignored.
pub fn simple(key: K) -> Self {
Self::new(key, {
struct __AuthMethodHandler;
#[async_trait]
impl AuthMethodHandler for __AuthMethodHandler {
async fn on_challenge(&mut self, _: Challenge) -> io::Result<ChallengeResponse> {
unreachable!("on_challenge should be handled by StaticKeyAuthMethodHandler");
}
async fn on_verification(
&mut self,
_: Verification,
) -> io::Result<VerificationResponse> {
Ok(VerificationResponse { valid: true })
}
async fn on_info(&mut self, _: Info) -> io::Result<()> {
Ok(())
}
async fn on_error(&mut self, _: Error) -> io::Result<()> {
Ok(())
}
}
__AuthMethodHandler
})
}
}
#[async_trait]
impl<K> AuthMethodHandler for StaticKeyAuthMethodHandler<K>
where
K: Display + Send,
{
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
trace!("on_challenge({challenge:?})");
let mut answers = Vec::new();
for question in challenge.questions.iter() {
// Only challenges with a "key" label are allowed, all else will fail
if question.label != "key" {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Only 'key' challenges are supported",
));
}
answers.push(self.key.to_string());
}
Ok(ChallengeResponse { answers })
}
async fn on_verification(
&mut self,
verification: Verification,
) -> io::Result<VerificationResponse> {
trace!("on_verify({verification:?})");
self.handler.on_verification(verification).await
}
async fn on_info(&mut self, info: Info) -> io::Result<()> {
trace!("on_info({info:?})");
self.handler.on_info(info).await
}
async fn on_error(&mut self, error: Error) -> io::Result<()> {
trace!("on_error({error:?})");
self.handler.on_error(error).await
}
}
#[cfg(test)]
mod tests {
use test_log::test;
use super::*;
use crate::msg::{ErrorKind, Question, VerificationKind};
#[test(tokio::test)]
async fn on_challenge_should_fail_if_non_key_question_received() {
let mut handler = StaticKeyAuthMethodHandler::simple(String::from("secret-key"));
handler
.on_challenge(Challenge {
questions: vec![Question::new("test")],
options: Default::default(),
})
.await
.unwrap_err();
}
#[test(tokio::test)]
async fn on_challenge_should_answer_with_stringified_key_for_key_questions() {
let mut handler = StaticKeyAuthMethodHandler::simple(String::from("secret-key"));
let response = handler
.on_challenge(Challenge {
questions: vec![Question::new("key")],
options: Default::default(),
})
.await
.unwrap();
assert_eq!(response.answers.len(), 1, "Wrong answer set received");
assert!(!response.answers[0].is_empty(), "Empty answer being sent");
}
#[test(tokio::test)]
async fn on_verification_should_leverage_fallback_handler() {
let mut handler = StaticKeyAuthMethodHandler::simple(String::from("secret-key"));
let response = handler
.on_verification(Verification {
kind: VerificationKind::Host,
text: "host".to_string(),
})
.await
.unwrap();
assert!(response.valid, "Unexpected result from fallback handler");
}
#[test(tokio::test)]
async fn on_info_should_leverage_fallback_handler() {
let mut handler = StaticKeyAuthMethodHandler::simple(String::from("secret-key"));
handler
.on_info(Info {
text: "info".to_string(),
})
.await
.unwrap();
}
#[test(tokio::test)]
async fn on_error_should_leverage_fallback_handler() {
let mut handler = StaticKeyAuthMethodHandler::simple(String::from("secret-key"));
handler
.on_error(Error {
kind: ErrorKind::Error,
text: "text".to_string(),
})
.await
.unwrap();
}
}

@ -0,0 +1,19 @@
#![doc = include_str!("../README.md")]
#[doc = include_str!("../README.md")]
#[cfg(doctest)]
pub struct ReadmeDoctests;
mod authenticator;
mod handler;
mod methods;
pub mod msg;
pub use authenticator::*;
pub use handler::*;
pub use methods::*;
#[cfg(any(test, feature = "tests"))]
pub mod tests {
pub use crate::{TestAuthHandler, TestAuthenticator};
}

@ -0,0 +1,365 @@
use std::collections::HashMap;
use std::io;
use std::str::FromStr;
use async_trait::async_trait;
use log::*;
use crate::authenticator::Authenticator;
use crate::msg::*;
mod none;
mod static_key;
pub use none::*;
pub use static_key::*;
/// Supports authenticating using a variety of methods
pub struct Verifier {
methods: HashMap<&'static str, Box<dyn AuthenticationMethod>>,
}
impl Verifier {
pub fn new<I>(methods: I) -> Self
where
I: IntoIterator<Item = Box<dyn AuthenticationMethod>>,
{
let mut m = HashMap::new();
for method in methods {
m.insert(method.id(), method);
}
Self { methods: m }
}
/// Creates a verifier with no methods.
pub fn empty() -> Self {
Self {
methods: HashMap::new(),
}
}
/// Creates a verifier that uses the [`NoneAuthenticationMethod`] exclusively.
pub fn none() -> Self {
Self::new(vec![
Box::new(NoneAuthenticationMethod::new()) as Box<dyn AuthenticationMethod>
])
}
/// Creates a verifier that uses the [`StaticKeyAuthenticationMethod`] exclusively.
pub fn static_key<K>(key: K) -> Self
where
K: FromStr + PartialEq + Send + Sync + 'static,
{
Self::new(vec![
Box::new(StaticKeyAuthenticationMethod::new(key)) as Box<dyn AuthenticationMethod>
])
}
/// Returns an iterator over the ids of the methods supported by the verifier
pub fn methods(&self) -> impl Iterator<Item = &'static str> + '_ {
self.methods.keys().copied()
}
/// Attempts to verify by submitting challenges using the `authenticator` provided. Returns the
/// id of the authentication method that succeeded. Fails if no authentication method succeeds.
pub async fn verify(&self, authenticator: &mut dyn Authenticator) -> io::Result<&'static str> {
// Initiate the process to get methods to use
let response = authenticator
.initialize(Initialization {
methods: self.methods.keys().map(ToString::to_string).collect(),
})
.await?;
for method in response.methods {
match self.methods.get(method.as_str()) {
Some(method) => {
// Report the authentication method
authenticator
.start_method(StartMethod {
method: method.id().to_string(),
})
.await?;
// Perform the actual authentication
if method.authenticate(authenticator).await.is_ok() {
authenticator.finished().await?;
return Ok(method.id());
}
}
None => {
trace!("Skipping authentication {method} as it is not available or supported");
}
}
}
Err(io::Error::new(
io::ErrorKind::PermissionDenied,
"No authentication method succeeded",
))
}
}
impl From<Vec<Box<dyn AuthenticationMethod>>> for Verifier {
fn from(methods: Vec<Box<dyn AuthenticationMethod>>) -> Self {
Self::new(methods)
}
}
/// Represents an interface to authenticate using some method
#[async_trait]
pub trait AuthenticationMethod: Send + Sync {
/// Returns a unique id to distinguish the method from other methods
fn id(&self) -> &'static str;
/// Performs authentication using the `authenticator` to submit challenges and other
/// information based on the authentication method
async fn authenticate(&self, authenticator: &mut dyn Authenticator) -> io::Result<()>;
}
#[cfg(test)]
mod tests {
use std::sync::mpsc;
use test_log::test;
use super::*;
use crate::authenticator::TestAuthenticator;
struct SuccessAuthenticationMethod;
#[async_trait]
impl AuthenticationMethod for SuccessAuthenticationMethod {
fn id(&self) -> &'static str {
"success"
}
async fn authenticate(&self, _: &mut dyn Authenticator) -> io::Result<()> {
Ok(())
}
}
struct FailAuthenticationMethod;
#[async_trait]
impl AuthenticationMethod for FailAuthenticationMethod {
fn id(&self) -> &'static str {
"fail"
}
async fn authenticate(&self, _: &mut dyn Authenticator) -> io::Result<()> {
Err(io::Error::from(io::ErrorKind::Other))
}
}
#[test(tokio::test)]
async fn verifier_should_fail_to_verify_if_initialization_fails() {
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| Err(io::Error::from(io::ErrorKind::Other))),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> =
vec![Box::new(SuccessAuthenticationMethod)];
let verifier = Verifier::from(methods);
verifier.verify(&mut authenticator).await.unwrap_err();
}
#[test(tokio::test)]
async fn verifier_should_fail_to_verify_if_fails_to_send_finished_indicator_after_success() {
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![SuccessAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
})
}),
finished: Box::new(|| Err(io::Error::new(io::ErrorKind::Other, "test error"))),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> =
vec![Box::new(SuccessAuthenticationMethod)];
let verifier = Verifier::from(methods);
let err = verifier.verify(&mut authenticator).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::Other);
assert_eq!(err.to_string(), "test error");
}
#[test(tokio::test)]
async fn verifier_should_fail_to_verify_if_has_no_authentication_methods() {
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![SuccessAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
})
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> = vec![];
let verifier = Verifier::from(methods);
verifier.verify(&mut authenticator).await.unwrap_err();
}
#[test(tokio::test)]
async fn verifier_should_fail_to_verify_if_initialization_yields_no_valid_authentication_methods(
) {
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec!["other".to_string()].into_iter().collect(),
})
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> =
vec![Box::new(SuccessAuthenticationMethod)];
let verifier = Verifier::from(methods);
verifier.verify(&mut authenticator).await.unwrap_err();
}
#[test(tokio::test)]
async fn verifier_should_fail_to_verify_if_no_authentication_method_succeeds() {
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![FailAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
})
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> = vec![Box::new(FailAuthenticationMethod)];
let verifier = Verifier::from(methods);
verifier.verify(&mut authenticator).await.unwrap_err();
}
#[test(tokio::test)]
async fn verifier_should_return_id_of_authentication_method_upon_success() {
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![SuccessAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
})
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> =
vec![Box::new(SuccessAuthenticationMethod)];
let verifier = Verifier::from(methods);
assert_eq!(
verifier.verify(&mut authenticator).await.unwrap(),
SuccessAuthenticationMethod.id()
);
}
#[test(tokio::test)]
async fn verifier_should_try_authentication_methods_in_order_until_one_succeeds() {
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![
FailAuthenticationMethod.id().to_string(),
SuccessAuthenticationMethod.id().to_string(),
]
.into_iter()
.collect(),
})
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> = vec![
Box::new(FailAuthenticationMethod),
Box::new(SuccessAuthenticationMethod),
];
let verifier = Verifier::from(methods);
assert_eq!(
verifier.verify(&mut authenticator).await.unwrap(),
SuccessAuthenticationMethod.id()
);
}
#[test(tokio::test)]
async fn verifier_should_send_start_method_before_attempting_each_method() {
let (tx, rx) = mpsc::channel();
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![
FailAuthenticationMethod.id().to_string(),
SuccessAuthenticationMethod.id().to_string(),
]
.into_iter()
.collect(),
})
}),
start_method: Box::new(move |method| {
tx.send(method.method).unwrap();
Ok(())
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> = vec![
Box::new(FailAuthenticationMethod),
Box::new(SuccessAuthenticationMethod),
];
Verifier::from(methods)
.verify(&mut authenticator)
.await
.unwrap();
assert_eq!(rx.try_recv().unwrap(), FailAuthenticationMethod.id());
assert_eq!(rx.try_recv().unwrap(), SuccessAuthenticationMethod.id());
assert_eq!(rx.try_recv().unwrap_err(), mpsc::TryRecvError::Empty);
}
#[test(tokio::test)]
async fn verifier_should_send_finished_when_a_method_succeeds() {
let (tx, rx) = mpsc::channel();
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![
FailAuthenticationMethod.id().to_string(),
SuccessAuthenticationMethod.id().to_string(),
]
.into_iter()
.collect(),
})
}),
finished: Box::new(move || {
tx.send(()).unwrap();
Ok(())
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> = vec![
Box::new(FailAuthenticationMethod),
Box::new(SuccessAuthenticationMethod),
];
Verifier::from(methods)
.verify(&mut authenticator)
.await
.unwrap();
rx.try_recv().unwrap();
assert_eq!(rx.try_recv().unwrap_err(), mpsc::TryRecvError::Empty);
}
}

@ -0,0 +1,37 @@
use std::io;
use async_trait::async_trait;
use crate::authenticator::Authenticator;
use crate::methods::AuthenticationMethod;
/// Authenticaton method that skips authentication and approves anything.
#[derive(Clone, Debug)]
pub struct NoneAuthenticationMethod;
impl NoneAuthenticationMethod {
pub const ID: &str = "none";
#[inline]
pub fn new() -> Self {
Self
}
}
impl Default for NoneAuthenticationMethod {
#[inline]
fn default() -> Self {
Self
}
}
#[async_trait]
impl AuthenticationMethod for NoneAuthenticationMethod {
fn id(&self) -> &'static str {
Self::ID
}
async fn authenticate(&self, _: &mut dyn Authenticator) -> io::Result<()> {
Ok(())
}
}

@ -0,0 +1,133 @@
use std::io;
use std::str::FromStr;
use async_trait::async_trait;
use crate::authenticator::Authenticator;
use crate::methods::AuthenticationMethod;
use crate::msg::{Challenge, Error, Question};
/// Authenticaton method for a static secret key
#[derive(Clone, Debug)]
pub struct StaticKeyAuthenticationMethod<T> {
key: T,
}
impl<T> StaticKeyAuthenticationMethod<T> {
pub const ID: &str = "static_key";
#[inline]
pub fn new(key: T) -> Self {
Self { key }
}
}
#[async_trait]
impl<T> AuthenticationMethod for StaticKeyAuthenticationMethod<T>
where
T: FromStr + PartialEq + Send + Sync,
{
fn id(&self) -> &'static str {
Self::ID
}
async fn authenticate(&self, authenticator: &mut dyn Authenticator) -> io::Result<()> {
let response = authenticator
.challenge(Challenge {
questions: vec![Question {
label: "key".to_string(),
text: "Provide a key: ".to_string(),
options: Default::default(),
}],
options: Default::default(),
})
.await?;
if response.answers.is_empty() {
return Err(Error::non_fatal("missing answer").into_io_permission_denied());
}
match response.answers.into_iter().next().unwrap().parse::<T>() {
Ok(key) if key == self.key => Ok(()),
_ => Err(Error::non_fatal("answer does not match key").into_io_permission_denied()),
}
}
}
#[cfg(test)]
mod tests {
use test_log::test;
use super::*;
use crate::authenticator::TestAuthenticator;
use crate::msg::*;
#[test(tokio::test)]
async fn authenticate_should_fail_if_key_challenge_fails() {
let method = StaticKeyAuthenticationMethod::new(String::new());
let mut authenticator = TestAuthenticator {
challenge: Box::new(|_| Err(io::Error::new(io::ErrorKind::InvalidData, "test error"))),
..Default::default()
};
let err = method.authenticate(&mut authenticator).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
assert_eq!(err.to_string(), "test error");
}
#[test(tokio::test)]
async fn authenticate_should_fail_if_no_answer_included_in_challenge_response() {
let method = StaticKeyAuthenticationMethod::new(String::new());
let mut authenticator = TestAuthenticator {
challenge: Box::new(|_| {
Ok(ChallengeResponse {
answers: Vec::new(),
})
}),
..Default::default()
};
let err = method.authenticate(&mut authenticator).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::PermissionDenied);
assert_eq!(err.to_string(), "Error: missing answer");
}
#[test(tokio::test)]
async fn authenticate_should_fail_if_answer_does_not_match_key() {
let method = StaticKeyAuthenticationMethod::new(String::from("answer"));
let mut authenticator = TestAuthenticator {
challenge: Box::new(|_| {
Ok(ChallengeResponse {
answers: vec![String::from("other")],
})
}),
..Default::default()
};
let err = method.authenticate(&mut authenticator).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::PermissionDenied);
assert_eq!(err.to_string(), "Error: answer does not match key");
}
#[test(tokio::test)]
async fn authenticate_should_succeed_if_answer_matches_key() {
let method = StaticKeyAuthenticationMethod::new(String::from("answer"));
let mut authenticator = TestAuthenticator {
challenge: Box::new(|_| {
Ok(ChallengeResponse {
answers: vec![String::from("answer")],
})
}),
..Default::default()
};
method.authenticate(&mut authenticator).await.unwrap();
}
}

@ -0,0 +1,217 @@
use std::collections::HashMap;
use derive_more::{Display, Error, From};
use serde::{Deserialize, Serialize};
/// Represents messages from an authenticator that act as initiators such as providing
/// a challenge, verifying information, presenting information, or highlighting an error
#[derive(Clone, Debug, From, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", tag = "type")]
pub enum Authentication {
/// Indicates the beginning of authentication, providing available methods
#[serde(rename = "auth_initialization")]
Initialization(Initialization),
/// Indicates that authentication is starting for the specific `method`
#[serde(rename = "auth_start_method")]
StartMethod(StartMethod),
/// Issues a challenge to be answered
#[serde(rename = "auth_challenge")]
Challenge(Challenge),
/// Requests verification of some text
#[serde(rename = "auth_verification")]
Verification(Verification),
/// Reports some information associated with authentication
#[serde(rename = "auth_info")]
Info(Info),
/// Reports an error occurrred during authentication
#[serde(rename = "auth_error")]
Error(Error),
/// Indicates that the authentication of all methods is finished
#[serde(rename = "auth_finished")]
Finished,
}
/// Represents the beginning of the authentication procedure
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Initialization {
/// Available methods to use for authentication
pub methods: Vec<String>,
}
/// Represents the start of authentication for some method
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StartMethod {
pub method: String,
}
/// Represents a challenge comprising a series of questions to be presented
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Challenge {
pub questions: Vec<Question>,
pub options: HashMap<String, String>,
}
/// Represents an ask to verify some information
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Verification {
pub kind: VerificationKind,
pub text: String,
}
/// Represents some information to be presented related to authentication
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Info {
pub text: String,
}
/// Represents authentication messages that are responses to authenticator requests such
/// as answers to challenges or verifying information
#[derive(Clone, Debug, From, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", tag = "type")]
pub enum AuthenticationResponse {
/// Contains response to initialization, providing details about which methods to use
#[serde(rename = "auth_initialization_response")]
Initialization(InitializationResponse),
/// Contains answers to challenge request
#[serde(rename = "auth_challenge_response")]
Challenge(ChallengeResponse),
/// Contains response to a verification request
#[serde(rename = "auth_verification_response")]
Verification(VerificationResponse),
}
/// Represents a response to initialization to specify which authentication methods to pursue
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct InitializationResponse {
/// Methods to use (in order as provided)
pub methods: Vec<String>,
}
/// Represents the answers to a previously-asked challenge associated with authentication
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct ChallengeResponse {
/// Answers to challenge questions (in order relative to questions)
pub answers: Vec<String>,
}
/// Represents the answer to a previously-asked verification associated with authentication
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct VerificationResponse {
/// Whether or not the verification was deemed valid
pub valid: bool,
}
/// Represents the type of verification being requested
#[derive(Copy, Clone, Debug, Display, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum VerificationKind {
/// An ask to verify the host such as with SSH
#[display(fmt = "host")]
Host,
/// When the verification is unknown (happens when other side is unaware of the kind)
#[display(fmt = "unknown")]
#[serde(other)]
Unknown,
}
impl VerificationKind {
/// Returns all variants except "unknown"
pub const fn known_variants() -> &'static [Self] {
&[Self::Host]
}
}
/// Represents a single question in a challenge associated with authentication
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Question {
/// Label associated with the question for more programmatic usage
pub label: String,
/// The text of the question (used for display purposes)
pub text: String,
/// Any options information specific to a particular auth domain
/// such as including a username and instructions for SSH authentication
pub options: HashMap<String, String>,
}
impl Question {
/// Creates a new question without any options data using `text` for both label and text
pub fn new(text: impl Into<String>) -> Self {
let text = text.into();
Self {
label: text.clone(),
text,
options: HashMap::new(),
}
}
}
/// Represents some error that occurred during authentication
#[derive(Clone, Debug, Display, Error, PartialEq, Eq, Serialize, Deserialize)]
#[display(fmt = "{kind}: {text}")]
pub struct Error {
/// Represents the kind of error
pub kind: ErrorKind,
/// Description of the error
pub text: String,
}
impl Error {
/// Creates a fatal error
pub fn fatal(text: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Fatal,
text: text.into(),
}
}
/// Creates a non-fatal error
pub fn non_fatal(text: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Error,
text: text.into(),
}
}
/// Returns true if error represents a fatal error, meaning that there is no recovery possible
/// from this error
pub fn is_fatal(&self) -> bool {
self.kind.is_fatal()
}
/// Converts the error into a [`std::io::Error`] representing permission denied
pub fn into_io_permission_denied(self) -> std::io::Error {
std::io::Error::new(std::io::ErrorKind::PermissionDenied, self)
}
}
/// Represents the type of error encountered during authentication
#[derive(Copy, Clone, Debug, Display, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ErrorKind {
/// Error is unrecoverable
Fatal,
/// Error is recoverable
Error,
}
impl ErrorKind {
/// Returns true if error kind represents a fatal error, meaning that there is no recovery
/// possible from this error
pub fn is_fatal(self) -> bool {
matches!(self, Self::Fatal)
}
}

@ -3,7 +3,7 @@ name = "distant-core"
description = "Core library for distant, enabling operation on a remote computer through file and process manipulation"
categories = ["network-programming"]
keywords = ["api", "async"]
version = "0.17.5"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -11,39 +11,26 @@ repository = "https://github.com/chipsenkbeil/distant"
readme = "README.md"
license = "MIT OR Apache-2.0"
[features]
schemars = ["dep:schemars", "distant-net/schemars"]
[dependencies]
async-trait = "0.1.57"
bitflags = "1.3.2"
bytes = "1.2.1"
async-trait = "0.1.68"
bitflags = "2.3.1"
bytes = "1.4.0"
derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] }
distant-net = { version = "=0.17.5", path = "../distant-net" }
futures = "0.3.21"
distant-net = { version = "=0.20.0", path = "../distant-net" }
distant-protocol = { version = "=0.20.0", path = "../distant-protocol" }
futures = "0.3.28"
hex = "0.4.3"
log = "0.4.17"
notify = { version = "=5.0.0-pre.15", features = ["serde"] }
once_cell = "1.13.0"
portable-pty = "0.7.0"
log = "0.4.18"
num_cpus = "1.15.0"
once_cell = "1.17.2"
rand = { version = "0.8.5", features = ["getrandom"] }
serde = { version = "1.0.142", features = ["derive"] }
serde_bytes = "0.11.7"
serde_json = "1.0.83"
shell-words = "1.1.0"
regex = "1.8.3"
serde = { version = "1.0.163", features = ["derive"] }
serde_bytes = "0.11.9"
serde_json = "1.0.96"
strum = { version = "0.24.1", features = ["derive"] }
tokio = { version = "1.20.1", features = ["full"] }
tokio-util = { version = "0.7.3", features = ["codec"] }
walkdir = "2.3.2"
winsplit = "0.1.0"
# Optional dependencies based on features
clap = { version = "3.2.16", features = ["derive"], optional = true }
schemars = { version = "0.8.10", optional = true }
tokio = { version = "1.28.2", features = ["full"] }
[dev-dependencies]
assert_fs = "1.0.7"
flexi_logger = "0.23.0"
indoc = "1.0.7"
predicates = "2.1.1"
rstest = "0.15.0"
env_logger = "0.10.0"
test-log = "0.2.11"

@ -1,26 +1,20 @@
# distant core
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.61.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-core.svg
[distant_crates_lnk]: https://crates.io/crates/distant-core
[distant_doc_img]: https://docs.rs/distant-core/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-core
[distant_rustc_img]: https://img.shields.io/badge/distant_core-rustc_1.61+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2022/05/19/Rust-1.61.0.html
Library that powers the [`distant`](https://github.com/chipsenkbeil/distant)
binary.
🚧 **(Alpha stage software) This library is in rapid development and may break or change frequently!** 🚧
[distant_rustc_img]: https://img.shields.io/badge/distant_core-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details
The `distant-core` library supplies the client, manager, and server
implementations for use with the distant API in order to communicate with
remote machines and perform actions. This library acts as the primary
implementation that powers the CLI, but is also available for other extensions
like `distant-ssh2`.
The `distant-core` library supplies the client and server interfaces along with
a client implementation for distant. The library exposes an API that downstream
libraries such as `distant-local` and `distant-ssh2` can implement to provide a
distant-compatible interface.
## Installation
@ -28,43 +22,7 @@ You can import the dependency by adding the following to your `Cargo.toml`:
```toml
[dependencies]
distant-core = "0.17"
```
## Features
Currently, the library supports the following features:
- `clap`: generates [`Clap`](https://github.com/clap-rs) bindings for
`DistantRequestData` (used by cli to expose request actions)
- `schemars`: derives the `schemars::JsonSchema` interface on
`DistantMsg`, `DistantRequestData`, and `DistantResponseData` data types
By default, no features are enabled on the library.
## Examples
Below is an example of connecting to a distant server over TCP without any
encryption or authentication:
```rust
use distant_core::{
DistantClient,
DistantChannelExt,
net::{PlainCodec, TcpClientExt},
};
use std::{net::SocketAddr, path::Path};
// Connect to a server located at example.com on port 8080 that is using
// no encryption or authentication (PlainCodec)
let addr: SocketAddr = "example.com:8080".parse().unwrap();
let mut client = DistantClient::connect(addr, PlainCodec).await
.expect("Failed to connect");
// Append text to a file
// NOTE: This method comes from DistantChannelExt
client.append_file_text(Path::new("path/to/file.txt"), "new contents").await
.expect("Failed to append to file");
distant-core = "0.20"
```
## License

@ -1,48 +1,40 @@
use crate::{
data::{ChangeKind, DirEntry, Environment, Error, Metadata, ProcessId, PtySize, SystemInfo},
ConnectionId, DistantMsg, DistantRequestData, DistantResponseData,
};
use std::io;
use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
use distant_net::{Reply, Server, ServerCtx};
use distant_net::common::ConnectionId;
use distant_net::server::{Reply, RequestCtx, ServerHandler};
use log::*;
use std::{io, path::PathBuf, sync::Arc};
mod local;
pub use local::LocalDistantApi;
use crate::protocol::{
self, ChangeKind, DirEntry, Environment, Error, Metadata, Permissions, ProcessId, PtySize,
SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, Version,
};
mod reply;
use reply::DistantSingleReply;
/// Represents the context provided to the [`DistantApi`] for incoming requests
pub struct DistantCtx<T> {
pub struct DistantCtx {
pub connection_id: ConnectionId,
pub reply: Box<dyn Reply<Data = DistantResponseData>>,
pub local_data: Arc<T>,
pub reply: Box<dyn Reply<Data = protocol::Response>>,
}
/// Represents a server that leverages an API compliant with `distant`
pub struct DistantApiServer<T, D>
/// Represents a [`ServerHandler`] that leverages an API compliant with `distant`
pub struct DistantApiServerHandler<T>
where
T: DistantApi<LocalData = D>,
T: DistantApi,
{
api: T,
api: Arc<T>,
}
impl<T, D> DistantApiServer<T, D>
impl<T> DistantApiServerHandler<T>
where
T: DistantApi<LocalData = D>,
T: DistantApi,
{
pub fn new(api: T) -> Self {
Self { api }
}
}
impl DistantApiServer<LocalDistantApi, <LocalDistantApi as DistantApi>::LocalData> {
/// Creates a new server using the [`LocalDistantApi`] implementation
pub fn local() -> io::Result<Self> {
Ok(Self {
api: LocalDistantApi::initialize()?,
})
Self { api: Arc::new(api) }
}
}
@ -50,7 +42,7 @@ impl DistantApiServer<LocalDistantApi, <LocalDistantApi as DistantApi>::LocalDat
fn unsupported<T>(label: &str) -> io::Result<T> {
Err(io::Error::new(
io::ErrorKind::Unsupported,
format!("{} is unsupported", label),
format!("{label} is unsupported"),
))
}
@ -58,12 +50,25 @@ fn unsupported<T>(label: &str) -> io::Result<T> {
/// which can be used to build other servers that are compatible with distant
#[async_trait]
pub trait DistantApi {
type LocalData: Send + Sync;
/// Invoked whenever a new connection is established.
#[allow(unused_variables)]
async fn on_connect(&self, id: ConnectionId) -> io::Result<()> {
Ok(())
}
/// Invoked whenever a new connection is established, providing a mutable reference to the
/// newly-created local data. This is a way to support modifying local data before it is used.
/// Invoked whenever an existing connection is dropped.
#[allow(unused_variables)]
async fn on_accept(&self, local_data: &mut Self::LocalData) {}
async fn on_disconnect(&self, id: ConnectionId) -> io::Result<()> {
Ok(())
}
/// Retrieves information about the server's capabilities.
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn version(&self, ctx: DistantCtx) -> io::Result<Version> {
unsupported("version")
}
/// Reads bytes from a file.
///
@ -71,11 +76,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn read_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
) -> io::Result<Vec<u8>> {
async fn read_file(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
unsupported("read_file")
}
@ -85,11 +86,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn read_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
) -> io::Result<String> {
async fn read_file_text(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<String> {
unsupported("read_file_text")
}
@ -100,12 +97,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn write_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
data: Vec<u8>,
) -> io::Result<()> {
async fn write_file(&self, ctx: DistantCtx, path: PathBuf, data: Vec<u8>) -> io::Result<()> {
unsupported("write_file")
}
@ -118,7 +110,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn write_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
data: String,
) -> io::Result<()> {
@ -132,12 +124,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn append_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
data: Vec<u8>,
) -> io::Result<()> {
async fn append_file(&self, ctx: DistantCtx, path: PathBuf, data: Vec<u8>) -> io::Result<()> {
unsupported("append_file")
}
@ -150,7 +137,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn append_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
data: String,
) -> io::Result<()> {
@ -169,7 +156,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn read_dir(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
depth: usize,
absolute: bool,
@ -186,12 +173,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn create_dir(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
all: bool,
) -> io::Result<()> {
async fn create_dir(&self, ctx: DistantCtx, path: PathBuf, all: bool) -> io::Result<()> {
unsupported("create_dir")
}
@ -202,12 +184,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn copy(
&self,
ctx: DistantCtx<Self::LocalData>,
src: PathBuf,
dst: PathBuf,
) -> io::Result<()> {
async fn copy(&self, ctx: DistantCtx, src: PathBuf, dst: PathBuf) -> io::Result<()> {
unsupported("copy")
}
@ -218,12 +195,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn remove(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
force: bool,
) -> io::Result<()> {
async fn remove(&self, ctx: DistantCtx, path: PathBuf, force: bool) -> io::Result<()> {
unsupported("remove")
}
@ -234,12 +206,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn rename(
&self,
ctx: DistantCtx<Self::LocalData>,
src: PathBuf,
dst: PathBuf,
) -> io::Result<()> {
async fn rename(&self, ctx: DistantCtx, src: PathBuf, dst: PathBuf) -> io::Result<()> {
unsupported("rename")
}
@ -254,7 +221,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn watch(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
recursive: bool,
only: Vec<ChangeKind>,
@ -269,7 +236,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn unwatch(&self, ctx: DistantCtx<Self::LocalData>, path: PathBuf) -> io::Result<()> {
async fn unwatch(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<()> {
unsupported("unwatch")
}
@ -279,7 +246,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn exists(&self, ctx: DistantCtx<Self::LocalData>, path: PathBuf) -> io::Result<bool> {
async fn exists(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<bool> {
unsupported("exists")
}
@ -293,7 +260,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn metadata(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
canonicalize: bool,
resolve_file_type: bool,
@ -301,24 +268,59 @@ pub trait DistantApi {
unsupported("metadata")
}
/// Sets permissions for a file, directory, or symlink.
///
/// * `path` - the path to the file, directory, or symlink
/// * `resolve_symlink` - if true, will resolve the path to the underlying file/directory
/// * `permissions` - the new permissions to apply
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn set_permissions(
&self,
ctx: DistantCtx,
path: PathBuf,
permissions: Permissions,
options: SetPermissionsOptions,
) -> io::Result<()> {
unsupported("set_permissions")
}
/// Searches files for matches based on a query.
///
/// * `query` - the specific query to perform
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn search(&self, ctx: DistantCtx, query: SearchQuery) -> io::Result<SearchId> {
unsupported("search")
}
/// Cancels an actively-ongoing search.
///
/// * `id` - the id of the search to cancel
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn cancel_search(&self, ctx: DistantCtx, id: SearchId) -> io::Result<()> {
unsupported("cancel_search")
}
/// Spawns a new process, returning its id.
///
/// * `cmd` - the full command to run as a new process (including arguments)
/// * `environment` - the environment variables to associate with the process
/// * `current_dir` - the alternative current directory to use with the process
/// * `persist` - if true, the process will continue running even after the connection that
/// spawned the process has terminated
/// * `pty` - if provided, will run the process within a PTY of the given size
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn proc_spawn(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
cmd: String,
environment: Environment,
current_dir: Option<PathBuf>,
persist: bool,
pty: Option<PtySize>,
) -> io::Result<ProcessId> {
unsupported("proc_spawn")
@ -330,7 +332,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn proc_kill(&self, ctx: DistantCtx<Self::LocalData>, id: ProcessId) -> io::Result<()> {
async fn proc_kill(&self, ctx: DistantCtx, id: ProcessId) -> io::Result<()> {
unsupported("proc_kill")
}
@ -341,12 +343,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn proc_stdin(
&self,
ctx: DistantCtx<Self::LocalData>,
id: ProcessId,
data: Vec<u8>,
) -> io::Result<()> {
async fn proc_stdin(&self, ctx: DistantCtx, id: ProcessId, data: Vec<u8>) -> io::Result<()> {
unsupported("proc_stdin")
}
@ -359,7 +356,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn proc_resize_pty(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
id: ProcessId,
size: PtySize,
) -> io::Result<()> {
@ -370,32 +367,34 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn system_info(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<SystemInfo> {
async fn system_info(&self, ctx: DistantCtx) -> io::Result<SystemInfo> {
unsupported("system_info")
}
}
#[async_trait]
impl<T, D> Server for DistantApiServer<T, D>
impl<T> ServerHandler for DistantApiServerHandler<T>
where
T: DistantApi<LocalData = D> + Send + Sync,
D: Send + Sync,
T: DistantApi + Send + Sync + 'static,
{
type Request = DistantMsg<DistantRequestData>;
type Response = DistantMsg<DistantResponseData>;
type LocalData = D;
type Request = protocol::Msg<protocol::Request>;
type Response = protocol::Msg<protocol::Response>;
/// Overridden to leverage [`DistantApi`] implementation of `on_accept`
async fn on_accept(&self, local_data: &mut Self::LocalData) {
T::on_accept(&self.api, local_data).await
/// Overridden to leverage [`DistantApi`] implementation of `on_connect`.
async fn on_connect(&self, id: ConnectionId) -> io::Result<()> {
T::on_connect(&self.api, id).await
}
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
let ServerCtx {
/// Overridden to leverage [`DistantApi`] implementation of `on_disconnect`.
async fn on_disconnect(&self, id: ConnectionId) -> io::Result<()> {
T::on_disconnect(&self.api, id).await
}
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
let RequestCtx {
connection_id,
request,
reply,
local_data,
} = ctx;
// Convert our reply to a queued reply so we can ensure that the result
@ -404,61 +403,104 @@ where
// Process single vs batch requests
let response = match request.payload {
DistantMsg::Single(data) => {
protocol::Msg::Single(data) => {
let ctx = DistantCtx {
connection_id,
reply: Box::new(DistantSingleReply::from(reply.clone_reply())),
local_data,
};
let data = handle_request(self, ctx, data).await;
let data = handle_request(Arc::clone(&self.api), ctx, data).await;
// Report outgoing errors in our debug logs
if let DistantResponseData::Error(x) = &data {
if let protocol::Response::Error(x) = &data {
debug!("[Conn {}] {}", connection_id, x);
}
DistantMsg::Single(data)
protocol::Msg::Single(data)
}
DistantMsg::Batch(list) => {
protocol::Msg::Batch(list)
if matches!(request.header.get_as("sequence"), Some(Ok(true))) =>
{
let mut out = Vec::new();
let mut has_failed = false;
for data in list {
// Once we hit a failure, all remaining requests return interrupted
if has_failed {
out.push(protocol::Response::Error(protocol::Error {
kind: protocol::ErrorKind::Interrupted,
description: String::from("Canceled due to earlier error"),
}));
continue;
}
let ctx = DistantCtx {
connection_id,
reply: Box::new(DistantSingleReply::from(reply.clone_reply())),
local_data: Arc::clone(&local_data),
};
// TODO: This does not run in parallel, meaning that the next item in the
// batch will not be queued until the previous item completes! This
// would be useful if we wanted to chain requests where the previous
// request feeds into the current request, but not if we just want
// to run everything together. So we should instead rewrite this
// to spawn a task per request and then await completion of all tasks
let data = handle_request(self, ctx, data).await;
let data = handle_request(Arc::clone(&self.api), ctx, data).await;
// Report outgoing errors in our debug logs
if let DistantResponseData::Error(x) = &data {
// Report outgoing errors in our debug logs and mark as failed
// to cancel any future tasks being run
if let protocol::Response::Error(x) = &data {
debug!("[Conn {}] {}", connection_id, x);
has_failed = true;
}
out.push(data);
}
DistantMsg::Batch(out)
protocol::Msg::Batch(out)
}
protocol::Msg::Batch(list) => {
let mut tasks = Vec::new();
// If sequence specified as true, we want to process in order, otherwise we can
// process in any order
for data in list {
let api = Arc::clone(&self.api);
let ctx = DistantCtx {
connection_id,
reply: Box::new(DistantSingleReply::from(reply.clone_reply())),
};
let task = tokio::spawn(async move {
let data = handle_request(api, ctx, data).await;
// Report outgoing errors in our debug logs
if let protocol::Response::Error(x) = &data {
debug!("[Conn {}] {}", connection_id, x);
}
data
});
tasks.push(task);
}
let out = futures::future::join_all(tasks)
.await
.into_iter()
.map(|x| match x {
Ok(x) => x,
Err(x) => protocol::Response::Error(x.to_string().into()),
})
.collect();
protocol::Msg::Batch(out)
}
};
// Queue up our result to go before ANY of the other messages that might be sent.
// This is important to avoid situations such as when a process is started, but before
// the confirmation can be sent some stdout or stderr is captured and sent first.
if let Err(x) = reply.send_before(response).await {
if let Err(x) = reply.send_before(response) {
error!("[Conn {}] Failed to send response: {}", connection_id, x);
}
// Flush out all of our replies thus far and toggle to no longer hold submissions
if let Err(x) = reply.flush(false).await {
if let Err(x) = reply.flush(false) {
error!(
"[Conn {}] Failed to flush response queue: {}",
connection_id, x
@ -468,159 +510,161 @@ where
}
/// Processes an incoming request
async fn handle_request<T, D>(
server: &DistantApiServer<T, D>,
ctx: DistantCtx<D>,
request: DistantRequestData,
) -> DistantResponseData
async fn handle_request<T>(
api: Arc<T>,
ctx: DistantCtx,
request: protocol::Request,
) -> protocol::Response
where
T: DistantApi<LocalData = D> + Send + Sync,
D: Send + Sync,
T: DistantApi + Send + Sync,
{
match request {
DistantRequestData::FileRead { path } => server
.api
protocol::Request::Version {} => api
.version(ctx)
.await
.map(protocol::Response::Version)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileRead { path } => api
.read_file(ctx, path)
.await
.map(|data| DistantResponseData::Blob { data })
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::FileReadText { path } => server
.api
.map(|data| protocol::Response::Blob { data })
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileReadText { path } => api
.read_file_text(ctx, path)
.await
.map(|data| DistantResponseData::Text { data })
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::FileWrite { path, data } => server
.api
.map(|data| protocol::Response::Text { data })
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileWrite { path, data } => api
.write_file(ctx, path, data)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::FileWriteText { path, text } => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileWriteText { path, text } => api
.write_file_text(ctx, path, text)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::FileAppend { path, data } => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileAppend { path, data } => api
.append_file(ctx, path, data)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::FileAppendText { path, text } => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileAppendText { path, text } => api
.append_file_text(ctx, path, text)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::DirRead {
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::DirRead {
path,
depth,
absolute,
canonicalize,
include_root,
} => server
.api
} => api
.read_dir(ctx, path, depth, absolute, canonicalize, include_root)
.await
.map(|(entries, errors)| DistantResponseData::DirEntries {
.map(|(entries, errors)| protocol::Response::DirEntries {
entries,
errors: errors.into_iter().map(Error::from).collect(),
})
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::DirCreate { path, all } => server
.api
.unwrap_or_else(protocol::Response::from),
protocol::Request::DirCreate { path, all } => api
.create_dir(ctx, path, all)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::Remove { path, force } => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Remove { path, force } => api
.remove(ctx, path, force)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::Copy { src, dst } => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Copy { src, dst } => api
.copy(ctx, src, dst)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::Rename { src, dst } => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Rename { src, dst } => api
.rename(ctx, src, dst)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::Watch {
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Watch {
path,
recursive,
only,
except,
} => server
.api
} => api
.watch(ctx, path, recursive, only, except)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::Unwatch { path } => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Unwatch { path } => api
.unwatch(ctx, path)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::Exists { path } => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Exists { path } => api
.exists(ctx, path)
.await
.map(|value| DistantResponseData::Exists { value })
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::Metadata {
.map(|value| protocol::Response::Exists { value })
.unwrap_or_else(protocol::Response::from),
protocol::Request::Metadata {
path,
canonicalize,
resolve_file_type,
} => server
.api
} => api
.metadata(ctx, path, canonicalize, resolve_file_type)
.await
.map(DistantResponseData::Metadata)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::ProcSpawn {
.map(protocol::Response::Metadata)
.unwrap_or_else(protocol::Response::from),
protocol::Request::SetPermissions {
path,
permissions,
options,
} => api
.set_permissions(ctx, path, permissions, options)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Search { query } => api
.search(ctx, query)
.await
.map(|id| protocol::Response::SearchStarted { id })
.unwrap_or_else(protocol::Response::from),
protocol::Request::CancelSearch { id } => api
.cancel_search(ctx, id)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::ProcSpawn {
cmd,
environment,
current_dir,
persist,
pty,
} => server
.api
.proc_spawn(ctx, cmd.into(), environment, current_dir, persist, pty)
} => api
.proc_spawn(ctx, cmd.into(), environment, current_dir, pty)
.await
.map(|id| DistantResponseData::ProcSpawned { id })
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::ProcKill { id } => server
.api
.map(|id| protocol::Response::ProcSpawned { id })
.unwrap_or_else(protocol::Response::from),
protocol::Request::ProcKill { id } => api
.proc_kill(ctx, id)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::ProcStdin { id, data } => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::ProcStdin { id, data } => api
.proc_stdin(ctx, id, data)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::ProcResizePty { id, size } => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::ProcResizePty { id, size } => api
.proc_resize_pty(ctx, id, size)
.await
.map(|_| DistantResponseData::Ok)
.unwrap_or_else(DistantResponseData::from),
DistantRequestData::SystemInfo {} => server
.api
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::SystemInfo {} => api
.system_info(ctx)
.await
.map(DistantResponseData::SystemInfo)
.unwrap_or_else(DistantResponseData::from),
.map(protocol::Response::SystemInfo)
.unwrap_or_else(protocol::Response::from),
}
}

@ -1,68 +0,0 @@
use crate::{data::ProcessId, ConnectionId};
use std::{io, path::PathBuf};
mod process;
pub use process::*;
mod watcher;
pub use watcher::*;
/// Holds global state state managed by the server
pub struct GlobalState {
/// State that holds information about processes running on the server
pub process: ProcessState,
/// Watcher used for filesystem events
pub watcher: WatcherState,
}
impl GlobalState {
pub fn initialize() -> io::Result<Self> {
Ok(Self {
process: ProcessState::new(),
watcher: WatcherState::initialize()?,
})
}
}
/// Holds connection-specific state managed by the server
#[derive(Default)]
pub struct ConnectionState {
/// Unique id associated with connection
id: ConnectionId,
/// Channel connected to global process state
pub(crate) process_channel: ProcessChannel,
/// Channel connected to global watcher state
pub(crate) watcher_channel: WatcherChannel,
/// Contains ids of processes that will be terminated when the connection is closed
processes: Vec<ProcessId>,
/// Contains paths being watched that will be unwatched when the connection is closed
paths: Vec<PathBuf>,
}
impl Drop for ConnectionState {
fn drop(&mut self) {
let id = self.id;
let processes: Vec<ProcessId> = self.processes.drain(..).collect();
let paths: Vec<PathBuf> = self.paths.drain(..).collect();
let process_channel = self.process_channel.clone();
let watcher_channel = self.watcher_channel.clone();
// NOTE: We cannot (and should not) block during drop to perform cleanup,
// instead spawning a task that will do the cleanup async
tokio::spawn(async move {
for id in processes {
let _ = process_channel.kill(id).await;
}
for path in paths {
let _ = watcher_channel.unwatch(id, path).await;
}
});
}
}

@ -1,311 +0,0 @@
use crate::{constants::SERVER_WATCHER_CAPACITY, data::ChangeKind, ConnectionId};
use log::*;
use notify::{
Config as WatcherConfig, Error as WatcherError, ErrorKind as WatcherErrorKind,
Event as WatcherEvent, PollWatcher, RecursiveMode, Watcher,
};
use std::{
collections::HashMap,
io,
ops::Deref,
path::{Path, PathBuf},
};
use tokio::{
sync::{
mpsc::{self, error::TrySendError},
oneshot,
},
task::JoinHandle,
};
mod path;
pub use path::*;
/// Holds information related to watched paths on the server
pub struct WatcherState {
channel: WatcherChannel,
task: JoinHandle<()>,
}
impl Drop for WatcherState {
/// Aborts the task that handles watcher path operations and management
fn drop(&mut self) {
self.abort();
}
}
impl WatcherState {
/// Will create a watcher and initialize watched paths to be empty
pub fn initialize() -> io::Result<Self> {
// NOTE: Cannot be something small like 1 as this seems to cause a deadlock sometimes
// with a large volume of watch requests
let (tx, rx) = mpsc::channel(SERVER_WATCHER_CAPACITY);
macro_rules! configure_and_spawn {
($watcher:ident) => {{
// Attempt to configure watcher, but don't fail if these configurations fail
match $watcher.configure(WatcherConfig::PreciseEvents(true)) {
Ok(true) => debug!("Watcher configured for precise events"),
Ok(false) => debug!("Watcher not configured for precise events",),
Err(x) => error!("Watcher configuration for precise events failed: {}", x),
}
// Attempt to configure watcher, but don't fail if these configurations fail
match $watcher.configure(WatcherConfig::NoticeEvents(true)) {
Ok(true) => debug!("Watcher configured for notice events"),
Ok(false) => debug!("Watcher not configured for notice events",),
Err(x) => error!("Watcher configuration for notice events failed: {}", x),
}
Ok(Self {
channel: WatcherChannel { tx },
task: tokio::spawn(watcher_task($watcher, rx)),
})
}};
}
macro_rules! event_handler {
($tx:ident) => {
move |res| match $tx.try_send(match res {
Ok(x) => InnerWatcherMsg::Event { ev: x },
Err(x) => InnerWatcherMsg::Error { err: x },
}) {
Ok(_) => (),
Err(TrySendError::Full(_)) => {
warn!(
"Reached watcher capacity of {}! Dropping watcher event!",
SERVER_WATCHER_CAPACITY,
);
}
Err(TrySendError::Closed(_)) => {
warn!("Skipping watch event because watcher channel closed");
}
}
};
}
let tx = tx.clone();
let result = {
let tx = tx.clone();
notify::recommended_watcher(event_handler!(tx))
};
match result {
Ok(mut watcher) => configure_and_spawn!(watcher),
Err(x) => match x.kind {
// notify-rs has a bug on Mac M1 with Docker and Linux, so we detect that error
// and fall back to the poll watcher if this occurs
//
// https://github.com/notify-rs/notify/issues/423
WatcherErrorKind::Io(x) if x.raw_os_error() == Some(38) => {
warn!("Recommended watcher is unsupported! Falling back to polling watcher!");
let mut watcher = PollWatcher::new(event_handler!(tx))
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
configure_and_spawn!(watcher)
}
_ => Err(io::Error::new(io::ErrorKind::Other, x)),
},
}
}
pub fn clone_channel(&self) -> WatcherChannel {
self.channel.clone()
}
/// Aborts the watcher task
pub fn abort(&self) {
self.task.abort();
}
}
impl Deref for WatcherState {
type Target = WatcherChannel;
fn deref(&self) -> &Self::Target {
&self.channel
}
}
#[derive(Clone)]
pub struct WatcherChannel {
tx: mpsc::Sender<InnerWatcherMsg>,
}
impl Default for WatcherChannel {
/// Creates a new channel that is closed by default
fn default() -> Self {
let (tx, _) = mpsc::channel(1);
Self { tx }
}
}
impl WatcherChannel {
/// Watch a path for a specific connection denoted by the id within the registered path
pub async fn watch(&self, registered_path: RegisteredPath) -> io::Result<()> {
let (cb, rx) = oneshot::channel();
self.tx
.send(InnerWatcherMsg::Watch {
registered_path,
cb,
})
.await
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Internal watcher task closed"))?;
rx.await
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Response to watch dropped"))?
}
/// Unwatch a path for a specific connection denoted by the id
pub async fn unwatch(&self, id: ConnectionId, path: impl AsRef<Path>) -> io::Result<()> {
let (cb, rx) = oneshot::channel();
let path = tokio::fs::canonicalize(path.as_ref())
.await
.unwrap_or_else(|_| path.as_ref().to_path_buf());
self.tx
.send(InnerWatcherMsg::Unwatch { id, path, cb })
.await
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Internal watcher task closed"))?;
rx.await
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Response to unwatch dropped"))?
}
}
/// Internal message to pass to our task below to perform some action
enum InnerWatcherMsg {
Watch {
registered_path: RegisteredPath,
cb: oneshot::Sender<io::Result<()>>,
},
Unwatch {
id: ConnectionId,
path: PathBuf,
cb: oneshot::Sender<io::Result<()>>,
},
Event {
ev: WatcherEvent,
},
Error {
err: WatcherError,
},
}
async fn watcher_task(mut watcher: impl Watcher, mut rx: mpsc::Receiver<InnerWatcherMsg>) {
// TODO: Optimize this in some way to be more performant than
// checking every path whenever an event comes in
let mut registered_paths: Vec<RegisteredPath> = Vec::new();
let mut path_cnt: HashMap<PathBuf, usize> = HashMap::new();
while let Some(msg) = rx.recv().await {
match msg {
InnerWatcherMsg::Watch {
registered_path,
cb,
} => {
// Check if we are tracking the path across any connection
if let Some(cnt) = path_cnt.get_mut(registered_path.path()) {
// Increment the count of times we are watching that path
*cnt += 1;
// Store the registered path in our collection without worry
// since we are already watching a path that impacts this one
registered_paths.push(registered_path);
// Send an okay because we always succeed in this case
let _ = cb.send(Ok(()));
} else {
let res = watcher
.watch(
registered_path.path(),
if registered_path.is_recursive() {
RecursiveMode::Recursive
} else {
RecursiveMode::NonRecursive
},
)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x));
// If we succeeded, store our registered path and set the tracking cnt to 1
if res.is_ok() {
path_cnt.insert(registered_path.path().to_path_buf(), 1);
registered_paths.push(registered_path);
}
// Send the result of the watch, but don't worry if the channel was closed
let _ = cb.send(res);
}
}
InnerWatcherMsg::Unwatch { id, path, cb } => {
// Check if we are tracking the path across any connection
if let Some(cnt) = path_cnt.get(path.as_path()) {
// Cycle through and remove all paths that match the given id and path,
// capturing how many paths we removed
let removed_cnt = {
let old_len = registered_paths.len();
registered_paths
.retain(|p| p.id() != id || (p.path() != path && p.raw_path() != path));
let new_len = registered_paths.len();
old_len - new_len
};
// 1. If we are now at zero cnt for our path, we want to actually unwatch the
// path with our watcher
// 2. If we removed nothing from our path list, we want to return an error
// 3. Otherwise, we return okay because we succeeded
if *cnt <= removed_cnt {
let _ = cb.send(
watcher
.unwatch(&path)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x)),
);
} else if removed_cnt == 0 {
// Send a failure as there was nothing to unwatch for this connection
let _ = cb.send(Err(io::Error::new(
io::ErrorKind::Other,
format!("{:?} is not being watched", path),
)));
} else {
// Send a success as we removed some paths
let _ = cb.send(Ok(()));
}
} else {
// Send a failure as there was nothing to unwatch
let _ = cb.send(Err(io::Error::new(
io::ErrorKind::Other,
format!("{:?} is not being watched", path),
)));
}
}
InnerWatcherMsg::Event { ev } => {
let kind = ChangeKind::from(ev.kind);
for registered_path in registered_paths.iter() {
match registered_path.filter_and_send(kind, &ev.paths).await {
Ok(_) => (),
Err(x) => error!(
"[Conn {}] Failed to forward changes to paths: {}",
registered_path.id(),
x
),
}
}
}
InnerWatcherMsg::Error { err } => {
let msg = err.to_string();
error!("Watcher encountered an error {} for {:?}", msg, err.paths);
for registered_path in registered_paths.iter() {
match registered_path
.filter_and_send_error(&msg, &err.paths, !err.paths.is_empty())
.await
{
Ok(_) => (),
Err(x) => error!(
"[Conn {}] Failed to forward changes to paths: {}",
registered_path.id(),
x
),
}
}
}
}
}
}

@ -1,26 +1,24 @@
use crate::{api::DistantMsg, data::DistantResponseData};
use distant_net::Reply;
use std::{future::Future, io, pin::Pin};
use std::io;
use distant_net::server::Reply;
use crate::protocol;
/// Wrapper around a reply that can be batch or single, converting
/// a single data into the wrapped type
pub struct DistantSingleReply(Box<dyn Reply<Data = DistantMsg<DistantResponseData>>>);
pub struct DistantSingleReply(Box<dyn Reply<Data = protocol::Msg<protocol::Response>>>);
impl From<Box<dyn Reply<Data = DistantMsg<DistantResponseData>>>> for DistantSingleReply {
fn from(reply: Box<dyn Reply<Data = DistantMsg<DistantResponseData>>>) -> Self {
impl From<Box<dyn Reply<Data = protocol::Msg<protocol::Response>>>> for DistantSingleReply {
fn from(reply: Box<dyn Reply<Data = protocol::Msg<protocol::Response>>>) -> Self {
Self(reply)
}
}
impl Reply for DistantSingleReply {
type Data = DistantResponseData;
fn send(&self, data: Self::Data) -> Pin<Box<dyn Future<Output = io::Result<()>> + Send + '_>> {
self.0.send(DistantMsg::Single(data))
}
type Data = protocol::Response;
fn blocking_send(&self, data: Self::Data) -> io::Result<()> {
self.0.blocking_send(DistantMsg::Single(data))
fn send(&self, data: Self::Data) -> io::Result<()> {
self.0.send(protocol::Msg::Single(data))
}
fn clone_reply(&self) -> Box<dyn Reply<Data = Self::Data>> {

@ -1,18 +1,24 @@
use crate::{DistantMsg, DistantRequestData, DistantResponseData};
use distant_net::{Channel, Client};
use distant_net::client::Channel;
use distant_net::Client;
use crate::protocol;
mod ext;
mod lsp;
mod process;
mod searcher;
mod watcher;
/// Represents a [`Client`] that communicates using the distant protocol
pub type DistantClient = Client<DistantMsg<DistantRequestData>, DistantMsg<DistantResponseData>>;
pub type DistantClient =
Client<protocol::Msg<protocol::Request>, protocol::Msg<protocol::Response>>;
/// Represents a [`Channel`] that communicates using the distant protocol
pub type DistantChannel = Channel<DistantMsg<DistantRequestData>, DistantMsg<DistantResponseData>>;
pub type DistantChannel =
Channel<protocol::Msg<protocol::Request>, protocol::Msg<protocol::Response>>;
pub use ext::*;
pub use lsp::*;
pub use process::*;
pub use searcher::*;
pub use watcher::*;

@ -1,15 +1,19 @@
use crate::{
client::{
RemoteCommand, RemoteLspCommand, RemoteLspProcess, RemoteOutput, RemoteProcess, Watcher,
},
data::{
ChangeKindSet, DirEntry, DistantRequestData, DistantResponseData, Environment,
Error as Failure, Metadata, PtySize, SystemInfo,
},
DistantMsg,
use std::future::Future;
use std::io;
use std::path::PathBuf;
use std::pin::Pin;
use distant_net::client::Channel;
use distant_net::common::Request;
use crate::client::{
RemoteCommand, RemoteLspCommand, RemoteLspProcess, RemoteOutput, RemoteProcess, Searcher,
Watcher,
};
use crate::protocol::{
self, ChangeKindSet, DirEntry, Environment, Error as Failure, Metadata, Permissions, PtySize,
SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, Version,
};
use distant_net::{Channel, Request};
use std::{future::Future, io, path::PathBuf, pin::Pin};
pub type AsyncReturn<'a, T, E = io::Error> =
Pin<Box<dyn Future<Output = Result<T, E>> + Send + 'a>>;
@ -18,7 +22,7 @@ fn mismatched_response() -> io::Error {
io::Error::new(io::ErrorKind::Other, "Mismatched response")
}
/// Provides convenience functions on top of a [`SessionChannel`]
/// Provides convenience functions on top of a [`Channel`]
pub trait DistantChannelExt {
/// Appends to a remote file using the data from a collection of bytes
fn append_file(
@ -40,8 +44,12 @@ pub trait DistantChannelExt {
/// Creates a remote directory, optionally creating all parent components if specified
fn create_dir(&mut self, path: impl Into<PathBuf>, all: bool) -> AsyncReturn<'_, ()>;
/// Checks whether the `path` exists on the remote machine
fn exists(&mut self, path: impl Into<PathBuf>) -> AsyncReturn<'_, bool>;
/// Checks whether this client is compatible with the remote server
fn is_compatible(&mut self) -> AsyncReturn<'_, bool>;
/// Retrieves metadata about a path on a remote machine
fn metadata(
&mut self,
@ -50,6 +58,20 @@ pub trait DistantChannelExt {
resolve_file_type: bool,
) -> AsyncReturn<'_, Metadata>;
/// Sets permissions for a path on a remote machine
fn set_permissions(
&mut self,
path: impl Into<PathBuf>,
permissions: Permissions,
options: SetPermissionsOptions,
) -> AsyncReturn<'_, ()>;
/// Perform a search
fn search(&mut self, query: impl Into<SearchQuery>) -> AsyncReturn<'_, Searcher>;
/// Cancel an active search query
fn cancel_search(&mut self, id: SearchId) -> AsyncReturn<'_, ()>;
/// Reads entries from a directory, returning a tuple of directory entries and failures
fn read_dir(
&mut self,
@ -91,7 +113,6 @@ pub trait DistantChannelExt {
cmd: impl Into<String>,
environment: Environment,
current_dir: Option<PathBuf>,
persist: bool,
pty: Option<PtySize>,
) -> AsyncReturn<'_, RemoteProcess>;
@ -101,7 +122,6 @@ pub trait DistantChannelExt {
cmd: impl Into<String>,
environment: Environment,
current_dir: Option<PathBuf>,
persist: bool,
pty: Option<PtySize>,
) -> AsyncReturn<'_, RemoteLspProcess>;
@ -117,6 +137,12 @@ pub trait DistantChannelExt {
/// Retrieves information about the remote system
fn system_info(&mut self) -> AsyncReturn<'_, SystemInfo>;
/// Retrieves server version information
fn version(&mut self) -> AsyncReturn<'_, Version>;
/// Returns version of protocol that the client uses
fn protocol_version(&self) -> protocol::semver::Version;
/// Writes a remote file with the data from a collection of bytes
fn write_file(
&mut self,
@ -136,21 +162,21 @@ macro_rules! make_body {
($self:expr, $data:expr, @ok) => {
make_body!($self, $data, |data| {
match data {
DistantResponseData::Ok => Ok(()),
DistantResponseData::Error(x) => Err(io::Error::from(x)),
protocol::Response::Ok => Ok(()),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
}
})
};
($self:expr, $data:expr, $and_then:expr) => {{
let req = Request::new(DistantMsg::Single($data));
let req = Request::new(protocol::Msg::Single($data));
Box::pin(async move {
$self
.send(req)
.await
.and_then(|res| match res.payload {
DistantMsg::Single(x) => Ok(x),
protocol::Msg::Single(x) => Ok(x),
_ => Err(mismatched_response()),
})
.and_then($and_then)
@ -159,7 +185,7 @@ macro_rules! make_body {
}
impl DistantChannelExt
for Channel<DistantMsg<DistantRequestData>, DistantMsg<DistantResponseData>>
for Channel<protocol::Msg<protocol::Request>, protocol::Msg<protocol::Response>>
{
fn append_file(
&mut self,
@ -168,7 +194,7 @@ impl DistantChannelExt
) -> AsyncReturn<'_, ()> {
make_body!(
self,
DistantRequestData::FileAppend { path: path.into(), data: data.into() },
protocol::Request::FileAppend { path: path.into(), data: data.into() },
@ok
)
}
@ -180,7 +206,7 @@ impl DistantChannelExt
) -> AsyncReturn<'_, ()> {
make_body!(
self,
DistantRequestData::FileAppendText { path: path.into(), text: data.into() },
protocol::Request::FileAppendText { path: path.into(), text: data.into() },
@ok
)
}
@ -188,7 +214,7 @@ impl DistantChannelExt
fn copy(&mut self, src: impl Into<PathBuf>, dst: impl Into<PathBuf>) -> AsyncReturn<'_, ()> {
make_body!(
self,
DistantRequestData::Copy { src: src.into(), dst: dst.into() },
protocol::Request::Copy { src: src.into(), dst: dst.into() },
@ok
)
}
@ -196,7 +222,7 @@ impl DistantChannelExt
fn create_dir(&mut self, path: impl Into<PathBuf>, all: bool) -> AsyncReturn<'_, ()> {
make_body!(
self,
DistantRequestData::DirCreate { path: path.into(), all },
protocol::Request::DirCreate { path: path.into(), all },
@ok
)
}
@ -204,15 +230,24 @@ impl DistantChannelExt
fn exists(&mut self, path: impl Into<PathBuf>) -> AsyncReturn<'_, bool> {
make_body!(
self,
DistantRequestData::Exists { path: path.into() },
protocol::Request::Exists { path: path.into() },
|data| match data {
DistantResponseData::Exists { value } => Ok(value),
DistantResponseData::Error(x) => Err(io::Error::from(x)),
protocol::Response::Exists { value } => Ok(value),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
}
)
}
fn is_compatible(&mut self) -> AsyncReturn<'_, bool> {
make_body!(self, protocol::Request::Version {}, |data| match data {
protocol::Response::Version(version) =>
Ok(protocol::is_compatible_with(&version.protocol_version)),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
})
}
fn metadata(
&mut self,
path: impl Into<PathBuf>,
@ -221,19 +256,49 @@ impl DistantChannelExt
) -> AsyncReturn<'_, Metadata> {
make_body!(
self,
DistantRequestData::Metadata {
protocol::Request::Metadata {
path: path.into(),
canonicalize,
resolve_file_type
},
|data| match data {
DistantResponseData::Metadata(x) => Ok(x),
DistantResponseData::Error(x) => Err(io::Error::from(x)),
protocol::Response::Metadata(x) => Ok(x),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
}
)
}
fn set_permissions(
&mut self,
path: impl Into<PathBuf>,
permissions: Permissions,
options: SetPermissionsOptions,
) -> AsyncReturn<'_, ()> {
make_body!(
self,
protocol::Request::SetPermissions {
path: path.into(),
permissions,
options,
},
@ok
)
}
fn search(&mut self, query: impl Into<SearchQuery>) -> AsyncReturn<'_, Searcher> {
let query = query.into();
Box::pin(async move { Searcher::search(self.clone(), query).await })
}
fn cancel_search(&mut self, id: SearchId) -> AsyncReturn<'_, ()> {
make_body!(
self,
protocol::Request::CancelSearch { id },
@ok
)
}
fn read_dir(
&mut self,
path: impl Into<PathBuf>,
@ -244,7 +309,7 @@ impl DistantChannelExt
) -> AsyncReturn<'_, (Vec<DirEntry>, Vec<Failure>)> {
make_body!(
self,
DistantRequestData::DirRead {
protocol::Request::DirRead {
path: path.into(),
depth,
absolute,
@ -252,8 +317,8 @@ impl DistantChannelExt
include_root
},
|data| match data {
DistantResponseData::DirEntries { entries, errors } => Ok((entries, errors)),
DistantResponseData::Error(x) => Err(io::Error::from(x)),
protocol::Response::DirEntries { entries, errors } => Ok((entries, errors)),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
}
)
@ -262,10 +327,10 @@ impl DistantChannelExt
fn read_file(&mut self, path: impl Into<PathBuf>) -> AsyncReturn<'_, Vec<u8>> {
make_body!(
self,
DistantRequestData::FileRead { path: path.into() },
protocol::Request::FileRead { path: path.into() },
|data| match data {
DistantResponseData::Blob { data } => Ok(data),
DistantResponseData::Error(x) => Err(io::Error::from(x)),
protocol::Response::Blob { data } => Ok(data),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
}
)
@ -274,10 +339,10 @@ impl DistantChannelExt
fn read_file_text(&mut self, path: impl Into<PathBuf>) -> AsyncReturn<'_, String> {
make_body!(
self,
DistantRequestData::FileReadText { path: path.into() },
protocol::Request::FileReadText { path: path.into() },
|data| match data {
DistantResponseData::Text { data } => Ok(data),
DistantResponseData::Error(x) => Err(io::Error::from(x)),
protocol::Response::Text { data } => Ok(data),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
}
)
@ -286,7 +351,7 @@ impl DistantChannelExt
fn remove(&mut self, path: impl Into<PathBuf>, force: bool) -> AsyncReturn<'_, ()> {
make_body!(
self,
DistantRequestData::Remove { path: path.into(), force },
protocol::Request::Remove { path: path.into(), force },
@ok
)
}
@ -294,7 +359,7 @@ impl DistantChannelExt
fn rename(&mut self, src: impl Into<PathBuf>, dst: impl Into<PathBuf>) -> AsyncReturn<'_, ()> {
make_body!(
self,
DistantRequestData::Rename { src: src.into(), dst: dst.into() },
protocol::Request::Rename { src: src.into(), dst: dst.into() },
@ok
)
}
@ -314,12 +379,15 @@ impl DistantChannelExt
fn unwatch(&mut self, path: impl Into<PathBuf>) -> AsyncReturn<'_, ()> {
fn inner_unwatch(
channel: &mut Channel<DistantMsg<DistantRequestData>, DistantMsg<DistantResponseData>>,
channel: &mut Channel<
protocol::Msg<protocol::Request>,
protocol::Msg<protocol::Response>,
>,
path: impl Into<PathBuf>,
) -> AsyncReturn<'_, ()> {
make_body!(
channel,
DistantRequestData::Unwatch { path: path.into() },
protocol::Request::Unwatch { path: path.into() },
@ok
)
}
@ -334,7 +402,6 @@ impl DistantChannelExt
cmd: impl Into<String>,
environment: Environment,
current_dir: Option<PathBuf>,
persist: bool,
pty: Option<PtySize>,
) -> AsyncReturn<'_, RemoteProcess> {
let cmd = cmd.into();
@ -342,7 +409,6 @@ impl DistantChannelExt
RemoteCommand::new()
.environment(environment)
.current_dir(current_dir)
.persist(persist)
.pty(pty)
.spawn(self.clone(), cmd)
.await
@ -354,7 +420,6 @@ impl DistantChannelExt
cmd: impl Into<String>,
environment: Environment,
current_dir: Option<PathBuf>,
persist: bool,
pty: Option<PtySize>,
) -> AsyncReturn<'_, RemoteLspProcess> {
let cmd = cmd.into();
@ -362,7 +427,6 @@ impl DistantChannelExt
RemoteLspCommand::new()
.environment(environment)
.current_dir(current_dir)
.persist(persist)
.pty(pty)
.spawn(self.clone(), cmd)
.await
@ -381,7 +445,6 @@ impl DistantChannelExt
RemoteCommand::new()
.environment(environment)
.current_dir(current_dir)
.persist(false)
.pty(pty)
.spawn(self.clone(), cmd)
.await?
@ -391,13 +454,25 @@ impl DistantChannelExt
}
fn system_info(&mut self) -> AsyncReturn<'_, SystemInfo> {
make_body!(self, DistantRequestData::SystemInfo {}, |data| match data {
DistantResponseData::SystemInfo(x) => Ok(x),
DistantResponseData::Error(x) => Err(io::Error::from(x)),
make_body!(self, protocol::Request::SystemInfo {}, |data| match data {
protocol::Response::SystemInfo(x) => Ok(x),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
})
}
fn version(&mut self) -> AsyncReturn<'_, Version> {
make_body!(self, protocol::Request::Version {}, |data| match data {
protocol::Response::Version(x) => Ok(x),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
})
}
fn protocol_version(&self) -> protocol::semver::Version {
protocol::PROTOCOL_VERSION
}
fn write_file(
&mut self,
path: impl Into<PathBuf>,
@ -405,7 +480,7 @@ impl DistantChannelExt
) -> AsyncReturn<'_, ()> {
make_body!(
self,
DistantRequestData::FileWrite { path: path.into(), data: data.into() },
protocol::Request::FileWrite { path: path.into(), data: data.into() },
@ok
)
}
@ -417,7 +492,7 @@ impl DistantChannelExt
) -> AsyncReturn<'_, ()> {
make_body!(
self,
DistantRequestData::FileWriteText { path: path.into(), text: data.into() },
protocol::Request::FileWriteText { path: path.into(), text: data.into() },
@ok
)
}

@ -1,20 +1,17 @@
use crate::{
client::{
DistantChannel, RemoteCommand, RemoteProcess, RemoteStatus, RemoteStderr, RemoteStdin,
RemoteStdout,
},
data::{Environment, PtySize},
};
use std::io::{self, Cursor, Read};
use std::ops::{Deref, DerefMut};
use std::path::PathBuf;
use futures::stream::{Stream, StreamExt};
use std::{
io::{self, Cursor, Read},
ops::{Deref, DerefMut},
path::PathBuf,
};
use tokio::{
sync::mpsc::{self, error::TryRecvError},
task::JoinHandle,
use tokio::sync::mpsc;
use tokio::sync::mpsc::error::TryRecvError;
use tokio::task::JoinHandle;
use crate::client::{
DistantChannel, RemoteCommand, RemoteProcess, RemoteStatus, RemoteStderr, RemoteStdin,
RemoteStdout,
};
use crate::protocol::{Environment, PtySize};
mod msg;
pub use msg::*;
@ -22,10 +19,10 @@ pub use msg::*;
/// A [`RemoteLspProcess`] builder providing support to configure
/// before spawning the process on a remote machine
pub struct RemoteLspCommand {
persist: bool,
pty: Option<PtySize>,
environment: Environment,
current_dir: Option<PathBuf>,
scheme: Option<String>,
}
impl Default for RemoteLspCommand {
@ -38,21 +35,13 @@ impl RemoteLspCommand {
/// Creates a new set of options for a remote LSP process
pub fn new() -> Self {
Self {
persist: false,
pty: None,
environment: Environment::new(),
current_dir: None,
scheme: None,
}
}
/// Sets whether or not the process will be persistent,
/// meaning that it will not be terminated when the
/// connection to the remote machine is terminated
pub fn persist(&mut self, persist: bool) -> &mut Self {
self.persist = persist;
self
}
/// Configures the process to leverage a PTY with the specified size
pub fn pty(&mut self, pty: Option<PtySize>) -> &mut Self {
self.pty = pty;
@ -71,6 +60,12 @@ impl RemoteLspCommand {
self
}
/// Configures the process with a specific scheme to convert rather than `distant://`
pub fn scheme(&mut self, scheme: Option<String>) -> &mut Self {
self.scheme = scheme;
self
}
/// Spawns the specified process on the remote machine using the given session, treating
/// the process like an LSP server
pub async fn spawn(
@ -81,13 +76,21 @@ impl RemoteLspCommand {
let mut command = RemoteCommand::new();
command.environment(self.environment.clone());
command.current_dir(self.current_dir.clone());
command.persist(self.persist);
command.pty(self.pty);
let mut inner = command.spawn(channel, cmd).await?;
let stdin = inner.stdin.take().map(RemoteLspStdin::new);
let stdout = inner.stdout.take().map(RemoteLspStdout::new);
let stderr = inner.stderr.take().map(RemoteLspStderr::new);
let stdin = inner
.stdin
.take()
.map(|x| RemoteLspStdin::new(x, self.scheme.clone()));
let stdout = inner
.stdout
.take()
.map(|x| RemoteLspStdout::new(x, self.scheme.clone()));
let stderr = inner
.stderr
.take()
.map(|x| RemoteLspStderr::new(x, self.scheme.clone()));
Ok(RemoteLspProcess {
inner,
@ -133,11 +136,16 @@ impl DerefMut for RemoteLspProcess {
pub struct RemoteLspStdin {
inner: RemoteStdin,
buf: Option<Vec<u8>>,
scheme: Option<String>,
}
impl RemoteLspStdin {
pub fn new(inner: RemoteStdin) -> Self {
Self { inner, buf: None }
pub fn new(inner: RemoteStdin, scheme: impl Into<Option<String>>) -> Self {
Self {
inner,
buf: None,
scheme: scheme.into(),
}
}
/// Tries to write data to the stdin of a specific remote process
@ -147,7 +155,10 @@ impl RemoteLspStdin {
// Process and then send out each LSP message in our queue
for mut data in queue {
// Convert distant:// to file://
data.mut_content().convert_distant_scheme_to_local();
match self.scheme.as_mut() {
Some(scheme) => data.mut_content().convert_scheme_to_local(scheme),
None => data.mut_content().convert_distant_scheme_to_local(),
}
data.refresh_content_length();
self.inner.try_write_str(data.to_string())?;
}
@ -166,7 +177,10 @@ impl RemoteLspStdin {
// Process and then send out each LSP message in our queue
for mut data in queue {
// Convert distant:// to file://
data.mut_content().convert_distant_scheme_to_local();
match self.scheme.as_mut() {
Some(scheme) => data.mut_content().convert_scheme_to_local(scheme),
None => data.mut_content().convert_distant_scheme_to_local(),
}
data.refresh_content_length();
self.inner.write_str(data.to_string()).await?;
}
@ -211,16 +225,16 @@ pub struct RemoteLspStdout {
}
impl RemoteLspStdout {
pub fn new(inner: RemoteStdout) -> Self {
let (read_task, rx) = spawn_read_task(Box::pin(futures::stream::unfold(
inner,
|mut inner| async move {
pub fn new(inner: RemoteStdout, scheme: impl Into<Option<String>>) -> Self {
let (read_task, rx) = spawn_read_task(
Box::pin(futures::stream::unfold(inner, |mut inner| async move {
match inner.read().await {
Ok(res) => Some((res, inner)),
Err(_) => None,
}
},
)));
})),
scheme,
);
Self { read_task, rx }
}
@ -277,16 +291,16 @@ pub struct RemoteLspStderr {
}
impl RemoteLspStderr {
pub fn new(inner: RemoteStderr) -> Self {
let (read_task, rx) = spawn_read_task(Box::pin(futures::stream::unfold(
inner,
|mut inner| async move {
pub fn new(inner: RemoteStderr, scheme: impl Into<Option<String>>) -> Self {
let (read_task, rx) = spawn_read_task(
Box::pin(futures::stream::unfold(inner, |mut inner| async move {
match inner.read().await {
Ok(res) => Some((res, inner)),
Err(_) => None,
}
},
)));
})),
scheme,
);
Self { read_task, rx }
}
@ -335,10 +349,14 @@ impl Drop for RemoteLspStderr {
}
}
fn spawn_read_task<S>(mut stream: S) -> (JoinHandle<()>, mpsc::Receiver<io::Result<Vec<u8>>>)
fn spawn_read_task<S>(
mut stream: S,
scheme: impl Into<Option<String>>,
) -> (JoinHandle<()>, mpsc::Receiver<io::Result<Vec<u8>>>)
where
S: Stream<Item = Vec<u8>> + Send + Unpin + 'static,
{
let mut scheme = scheme.into();
let (tx, rx) = mpsc::channel::<io::Result<Vec<u8>>>(1);
let read_task = tokio::spawn(async move {
let mut task_buf: Option<Vec<u8>> = None;
@ -366,7 +384,10 @@ where
let mut out = Vec::new();
for mut data in queue {
// Convert file:// to distant://
data.mut_content().convert_local_scheme_to_distant();
match scheme.as_mut() {
Some(scheme) => data.mut_content().convert_local_scheme_to(scheme),
None => data.mut_content().convert_local_scheme_to_distant(),
}
data.refresh_content_length();
out.extend(data.to_bytes());
}
@ -408,38 +429,39 @@ fn read_lsp_messages(input: &[u8]) -> io::Result<(Option<Vec<u8>>, Vec<LspMsg>)>
#[cfg(test)]
mod tests {
use std::future::Future;
use std::time::Duration;
use distant_net::common::{FramedTransport, InmemoryTransport, Request, Response};
use distant_net::Client;
use test_log::test;
use super::*;
use crate::data::{DistantRequestData, DistantResponseData};
use distant_net::{
Client, FramedTransport, InmemoryTransport, IntoSplit, PlainCodec, Request, Response,
TypedAsyncRead, TypedAsyncWrite,
};
use std::{future::Future, time::Duration};
use crate::protocol;
/// Timeout used with timeout function
const TIMEOUT: Duration = Duration::from_millis(50);
// Configures an lsp process with a means to send & receive data from outside
async fn spawn_lsp_process() -> (
FramedTransport<InmemoryTransport, PlainCodec>,
RemoteLspProcess,
) {
async fn spawn_lsp_process() -> (FramedTransport<InmemoryTransport>, RemoteLspProcess) {
let (mut t1, t2) = FramedTransport::pair(100);
let (writer, reader) = t2.into_split();
let session = Client::new(writer, reader).unwrap();
let spawn_task = tokio::spawn(async move {
RemoteLspCommand::new()
.spawn(session.clone_channel(), String::from("cmd arg"))
.await
let client = Client::spawn_inmemory(t2, Default::default());
let spawn_task = tokio::spawn({
let channel = client.clone_channel();
async move {
RemoteLspCommand::new()
.spawn(channel, String::from("cmd arg"))
.await
}
});
// Wait until we get the request from the session
let req: Request<DistantRequestData> = t1.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = t1.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
t1.write(Response::new(
t1.write_frame_for(&Response::new(
req.id,
DistantResponseData::ProcSpawned { id: rand::random() },
protocol::Response::ProcSpawned { id: rand::random() },
))
.await
.unwrap();
@ -471,7 +493,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn stdin_write_should_only_send_out_complete_lsp_messages() {
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -486,9 +508,9 @@ mod tests {
.unwrap();
// Validate that the outgoing req is a complete LSP message
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
match req.payload {
DistantRequestData::ProcStdin { data, .. } => {
protocol::Request::ProcStdin { data, .. } => {
assert_eq!(
data,
make_lsp_msg(serde_json::json!({
@ -501,7 +523,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn stdin_write_should_support_buffering_output_until_a_complete_lsp_message_is_composed()
{
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -520,7 +542,7 @@ mod tests {
tokio::task::yield_now().await;
let result = timeout(
TIMEOUT,
TypedAsyncRead::<Request<DistantRequestData>>::read(&mut transport),
transport.read_frame_as::<Request<protocol::Request>>(),
)
.await;
assert!(result.is_err(), "Unexpectedly got data: {:?}", result);
@ -529,9 +551,9 @@ mod tests {
proc.stdin.as_mut().unwrap().write(msg_b).await.unwrap();
// Validate that the outgoing req is a complete LSP message
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
match req.payload {
DistantRequestData::ProcStdin { data, .. } => {
protocol::Request::ProcStdin { data, .. } => {
assert_eq!(
data,
make_lsp_msg(serde_json::json!({
@ -544,7 +566,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn stdin_write_should_only_consume_a_complete_lsp_message_even_if_more_is_written() {
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -564,9 +586,9 @@ mod tests {
.unwrap();
// Validate that the outgoing req is a complete LSP message
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
match req.payload {
DistantRequestData::ProcStdin { data, .. } => {
protocol::Request::ProcStdin { data, .. } => {
assert_eq!(
data,
make_lsp_msg(serde_json::json!({
@ -586,7 +608,7 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stdin_write_should_support_sending_out_multiple_lsp_messages_if_all_received_at_once()
{
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -613,9 +635,9 @@ mod tests {
.unwrap();
// Validate that the first outgoing req is a complete LSP message matching first
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
match req.payload {
DistantRequestData::ProcStdin { data, .. } => {
protocol::Request::ProcStdin { data, .. } => {
assert_eq!(
data,
make_lsp_msg(serde_json::json!({
@ -628,9 +650,9 @@ mod tests {
}
// Validate that the second outgoing req is a complete LSP message matching second
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
match req.payload {
DistantRequestData::ProcStdin { data, .. } => {
protocol::Request::ProcStdin { data, .. } => {
assert_eq!(
data,
make_lsp_msg(serde_json::json!({
@ -643,7 +665,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn stdin_write_should_convert_content_with_distant_scheme_to_file_scheme() {
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -658,9 +680,9 @@ mod tests {
.unwrap();
// Validate that the outgoing req is a complete LSP message
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
match req.payload {
DistantRequestData::ProcStdin { data, .. } => {
protocol::Request::ProcStdin { data, .. } => {
// Verify the contents AND headers are as expected; in this case,
// this will also ensure that the Content-Length is adjusted
// when the distant scheme was changed to file
@ -676,15 +698,15 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn stdout_read_should_yield_lsp_messages_as_strings() {
let (mut transport, mut proc) = spawn_lsp_process().await;
// Send complete LSP message as stdout to process
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStdout {
protocol::Response::ProcStdout {
id: proc.id(),
data: make_lsp_msg(serde_json::json!({
"field1": "a",
@ -706,7 +728,7 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stdout_read_should_only_yield_complete_lsp_messages() {
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -718,9 +740,9 @@ mod tests {
// Send half of LSP message over stdout
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStdout {
protocol::Response::ProcStdout {
id: proc.id(),
data: msg_a.to_vec(),
},
@ -736,9 +758,9 @@ mod tests {
// Send other half of LSP message over stdout
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStdout {
protocol::Response::ProcStdout {
id: proc.id(),
data: msg_b.to_vec(),
},
@ -757,7 +779,7 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stdout_read_should_only_consume_a_complete_lsp_message_even_if_more_output_is_available(
) {
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -770,9 +792,9 @@ mod tests {
// Send complete LSP message as stdout to process
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStdout {
protocol::Response::ProcStdout {
id: proc.id(),
data: format!("{}{}", String::from_utf8(msg).unwrap(), extra).into_bytes(),
},
@ -798,7 +820,7 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stdout_read_should_support_yielding_multiple_lsp_messages_if_all_received_at_once() {
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -813,9 +835,9 @@ mod tests {
// Send complete LSP message as stdout to process
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStdout {
protocol::Response::ProcStdout {
id: proc.id(),
data: format!(
"{}{}",
@ -849,15 +871,15 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stdout_read_should_convert_content_with_file_scheme_to_distant_scheme() {
let (mut transport, mut proc) = spawn_lsp_process().await;
// Send complete LSP message as stdout to process
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStdout {
protocol::Response::ProcStdout {
id: proc.id(),
data: make_lsp_msg(serde_json::json!({
"field1": "distant://some/path",
@ -879,15 +901,15 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stderr_read_should_yield_lsp_messages_as_strings() {
let (mut transport, mut proc) = spawn_lsp_process().await;
// Send complete LSP message as stderr to process
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStderr {
protocol::Response::ProcStderr {
id: proc.id(),
data: make_lsp_msg(serde_json::json!({
"field1": "a",
@ -909,7 +931,7 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stderr_read_should_only_yield_complete_lsp_messages() {
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -921,9 +943,9 @@ mod tests {
// Send half of LSP message over stderr
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStderr {
protocol::Response::ProcStderr {
id: proc.id(),
data: msg_a.to_vec(),
},
@ -939,9 +961,9 @@ mod tests {
// Send other half of LSP message over stderr
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStderr {
protocol::Response::ProcStderr {
id: proc.id(),
data: msg_b.to_vec(),
},
@ -960,7 +982,7 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stderr_read_should_only_consume_a_complete_lsp_message_even_if_more_errput_is_available(
) {
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -973,9 +995,9 @@ mod tests {
// Send complete LSP message as stderr to process
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStderr {
protocol::Response::ProcStderr {
id: proc.id(),
data: format!("{}{}", String::from_utf8(msg).unwrap(), extra).into_bytes(),
},
@ -1001,7 +1023,7 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stderr_read_should_support_yielding_multiple_lsp_messages_if_all_received_at_once() {
let (mut transport, mut proc) = spawn_lsp_process().await;
@ -1016,9 +1038,9 @@ mod tests {
// Send complete LSP message as stderr to process
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStderr {
protocol::Response::ProcStderr {
id: proc.id(),
data: format!(
"{}{}",
@ -1052,15 +1074,15 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stderr_read_should_convert_content_with_file_scheme_to_distant_scheme() {
let (mut transport, mut proc) = spawn_lsp_process().await;
// Send complete LSP message as stderr to process
transport
.write(Response::new(
.write_frame_for(&Response::new(
proc.origin_id().to_string(),
DistantResponseData::ProcStderr {
protocol::Response::ProcStderr {
id: proc.id(),
data: make_lsp_msg(serde_json::json!({
"field1": "distant://some/path",

@ -1,13 +1,12 @@
use std::fmt;
use std::io::{self, BufRead};
use std::ops::{Deref, DerefMut};
use std::str::FromStr;
use std::string::FromUtf8Error;
use derive_more::{Display, Error, From};
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use std::{
fmt,
io::{self, BufRead},
ops::{Deref, DerefMut},
str::FromStr,
string::FromUtf8Error,
};
/// Represents some data being communicated to/from an LSP consisting of a header and content part
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
@ -61,6 +60,7 @@ impl LspMsg {
pub fn header(&self) -> &LspHeader {
&self.header
}
/// Returns a mutable reference to the header part
pub fn mut_header(&mut self) -> &mut LspHeader {
&mut self.header
@ -208,7 +208,7 @@ impl fmt::Display for LspHeader {
write!(f, "Content-Length: {}\r\n", self.content_length)?;
if let Some(ty) = self.content_type.as_ref() {
write!(f, "Content-Type: {}\r\n", ty)?;
write!(f, "Content-Type: {ty}\r\n")?;
}
write!(f, "\r\n")
@ -310,7 +310,7 @@ fn swap_prefix(obj: &mut Map<String, Value>, old: &str, new: &str) {
let check = |s: &String| s.starts_with(old);
let mut mutate = |s: &mut String| {
if let Some(pos) = s.find(old) {
s.replace_range(pos..old.len(), new);
s.replace_range(pos..pos + old.len(), new);
}
};
@ -333,14 +333,24 @@ fn swap_prefix(obj: &mut Map<String, Value>, old: &str, new: &str) {
}
impl LspContent {
/// Converts all URIs with `file://` as the scheme to `distant://` instead
/// Converts all URIs with `file` as the scheme to `distant` instead
pub fn convert_local_scheme_to_distant(&mut self) {
swap_prefix(&mut self.0, "file:", "distant:");
self.convert_local_scheme_to("distant")
}
/// Converts all URIs with `distant://` as the scheme to `file://` instead
/// Converts all URIs with `file` as the scheme to `scheme` instead
pub fn convert_local_scheme_to(&mut self, scheme: &str) {
swap_prefix(&mut self.0, "file:", &format!("{scheme}:"));
}
/// Converts all URIs with `distant` as the scheme to `file` instead
pub fn convert_distant_scheme_to_local(&mut self) {
swap_prefix(&mut self.0, "distant:", "file:");
self.convert_scheme_to_local("distant")
}
/// Converts all URIs with `scheme` as the scheme to `file` instead
pub fn convert_scheme_to_local(&mut self, scheme: &str) {
swap_prefix(&mut self.0, &format!("{scheme}:"), "file:");
}
}
@ -395,6 +405,8 @@ impl FromStr for LspContent {
#[cfg(test)]
mod tests {
use test_log::test;
use super::*;
macro_rules! make_obj {
@ -686,6 +698,51 @@ mod tests {
);
}
#[test]
fn content_convert_local_scheme_to_should_convert_keys_and_values() {
let mut content = LspContent(make_obj!({
"distant://key1": "file://value1",
"file://key2": "distant://value2",
"key3": ["file://value3", "distant://value4"],
"key4": {
"distant://key5": "file://value5",
"file://key6": "distant://value6",
"key7": [
{
"distant://key8": "file://value8",
"file://key9": "distant://value9",
}
]
},
"key10": null,
"key11": 123,
"key12": true,
}));
content.convert_local_scheme_to("custom");
assert_eq!(
content.0,
make_obj!({
"distant://key1": "custom://value1",
"custom://key2": "distant://value2",
"key3": ["custom://value3", "distant://value4"],
"key4": {
"distant://key5": "custom://value5",
"custom://key6": "distant://value6",
"key7": [
{
"distant://key8": "custom://value8",
"custom://key9": "distant://value9",
}
]
},
"key10": null,
"key11": 123,
"key12": true,
})
);
}
#[test]
fn content_convert_distant_scheme_to_local_should_convert_keys_and_values() {
let mut content = LspContent(make_obj!({
@ -730,4 +787,49 @@ mod tests {
})
);
}
#[test]
fn content_convert_scheme_to_local_should_convert_keys_and_values() {
let mut content = LspContent(make_obj!({
"custom://key1": "file://value1",
"file://key2": "custom://value2",
"key3": ["file://value3", "custom://value4"],
"key4": {
"custom://key5": "file://value5",
"file://key6": "custom://value6",
"key7": [
{
"custom://key8": "file://value8",
"file://key9": "custom://value9",
}
]
},
"key10": null,
"key11": 123,
"key12": true,
}));
content.convert_scheme_to_local("custom");
assert_eq!(
content.0,
make_obj!({
"file://key1": "file://value1",
"file://key2": "file://value2",
"key3": ["file://value3", "file://value4"],
"key4": {
"file://key5": "file://value5",
"file://key6": "file://value6",
"key7": [
{
"file://key8": "file://value8",
"file://key9": "file://value9",
}
]
},
"key10": null,
"key11": 123,
"key12": true,
})
);
}
}

@ -1,23 +1,17 @@
use crate::{
client::DistantChannel,
constants::CLIENT_PIPE_CAPACITY,
data::{Cmd, DistantRequestData, DistantResponseData, Environment, ProcessId, PtySize},
DistantMsg,
};
use distant_net::{Mailbox, Request, Response};
use std::path::PathBuf;
use std::sync::Arc;
use distant_net::client::Mailbox;
use distant_net::common::{Request, Response};
use log::*;
use std::{path::PathBuf, sync::Arc};
use tokio::{
io,
sync::{
mpsc::{
self,
error::{TryRecvError, TrySendError},
},
RwLock,
},
task::JoinHandle,
};
use tokio::io;
use tokio::sync::mpsc::error::{TryRecvError, TrySendError};
use tokio::sync::{mpsc, RwLock};
use tokio::task::JoinHandle;
use crate::client::DistantChannel;
use crate::constants::CLIENT_PIPE_CAPACITY;
use crate::protocol::{self, Cmd, Environment, ProcessId, PtySize};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RemoteOutput {
@ -44,7 +38,6 @@ type StatusResult = io::Result<RemoteStatus>;
/// A [`RemoteProcess`] builder providing support to configure
/// before spawning the process on a remote machine
pub struct RemoteCommand {
persist: bool,
pty: Option<PtySize>,
environment: Environment,
current_dir: Option<PathBuf>,
@ -60,21 +53,12 @@ impl RemoteCommand {
/// Creates a new set of options for a remote process
pub fn new() -> Self {
Self {
persist: false,
pty: None,
environment: Environment::new(),
current_dir: None,
}
}
/// Sets whether or not the process will be persistent,
/// meaning that it will not be terminated when the
/// connection to the remote machine is terminated
pub fn persist(&mut self, persist: bool) -> &mut Self {
self.persist = persist;
self
}
/// Configures the process to leverage a PTY with the specified size
pub fn pty(&mut self, pty: Option<PtySize>) -> &mut Self {
self.pty = pty;
@ -103,10 +87,9 @@ impl RemoteCommand {
// Submit our run request and get back a mailbox for responses
let mut mailbox = channel
.mail(Request::new(DistantMsg::Single(
DistantRequestData::ProcSpawn {
.mail(Request::new(protocol::Msg::Single(
protocol::Request::ProcSpawn {
cmd: Cmd::from(cmd),
persist: self.persist,
pty: self.pty,
environment: self.environment.clone(),
current_dir: self.current_dir.clone(),
@ -119,15 +102,17 @@ impl RemoteCommand {
Some(res) => {
let origin_id = res.origin_id;
match res.payload {
DistantMsg::Single(DistantResponseData::ProcSpawned { id }) => (id, origin_id),
DistantMsg::Single(DistantResponseData::Error(x)) => return Err(x.into()),
DistantMsg::Single(x) => {
protocol::Msg::Single(protocol::Response::ProcSpawned { id }) => {
(id, origin_id)
}
protocol::Msg::Single(protocol::Response::Error(x)) => return Err(x.into()),
protocol::Msg::Single(x) => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Got response type of {}", x.as_ref()),
))
}
DistantMsg::Batch(_) => {
protocol::Msg::Batch(_) => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Got batch instead of single response",
@ -507,7 +492,7 @@ async fn process_outgoing_requests(
match data {
Some(data) => channel.fire(
Request::new(
DistantMsg::Single(DistantRequestData::ProcStdin { id, data })
protocol::Msg::Single(protocol::Request::ProcStdin { id, data })
)
).await?,
None => break Err(errors::dead_channel()),
@ -517,7 +502,7 @@ async fn process_outgoing_requests(
match size {
Some(size) => channel.fire(
Request::new(
DistantMsg::Single(DistantRequestData::ProcResizePty { id, size })
protocol::Msg::Single(protocol::Request::ProcResizePty { id, size })
)
).await?,
None => break Err(errors::dead_channel()),
@ -526,7 +511,7 @@ async fn process_outgoing_requests(
msg = kill_rx.recv() => {
if msg.is_some() {
channel.fire(Request::new(
DistantMsg::Single(DistantRequestData::ProcKill { id })
protocol::Msg::Single(protocol::Request::ProcKill { id })
)).await?;
break Ok(());
} else {
@ -543,7 +528,7 @@ async fn process_outgoing_requests(
/// Helper function that loops, processing incoming stdout & stderr requests from a remote process
async fn process_incoming_responses(
proc_id: ProcessId,
mut mailbox: Mailbox<Response<DistantMsg<DistantResponseData>>>,
mut mailbox: Mailbox<Response<protocol::Msg<protocol::Response>>>,
stdout_tx: mpsc::Sender<Vec<u8>>,
stderr_tx: mpsc::Sender<Vec<u8>>,
kill_tx: mpsc::Sender<()>,
@ -553,7 +538,7 @@ async fn process_incoming_responses(
// Check if any of the payload data is the termination
let exit_status = payload.iter().find_map(|data| match data {
DistantResponseData::ProcDone { id, success, code } if *id == proc_id => {
protocol::Response::ProcDone { id, success, code } if *id == proc_id => {
Some((*success, *code))
}
_ => None,
@ -563,10 +548,10 @@ async fn process_incoming_responses(
// TODO: What should we do about unexpected data? For now, just ignore
for data in payload {
match data {
DistantResponseData::ProcStdout { id, data } if id == proc_id => {
protocol::Response::ProcStdout { id, data } if id == proc_id => {
let _ = stdout_tx.send(data).await;
}
DistantResponseData::ProcStderr { id, data } if id == proc_id => {
protocol::Response::ProcStderr { id, data } if id == proc_id => {
let _ = stderr_tx.send(data).await;
}
_ => {}
@ -603,27 +588,22 @@ mod errors {
#[cfg(test)]
mod tests {
use super::*;
use crate::{
client::DistantClient,
data::{Error, ErrorKind},
};
use distant_net::{
Client, FramedTransport, InmemoryTransport, IntoSplit, PlainCodec, Response,
TypedAsyncRead, TypedAsyncWrite,
};
use std::time::Duration;
fn make_session() -> (
FramedTransport<InmemoryTransport, PlainCodec>,
DistantClient,
) {
use distant_net::common::{FramedTransport, InmemoryTransport, Response};
use distant_net::Client;
use test_log::test;
use super::*;
use crate::client::DistantClient;
use crate::protocol::{Error, ErrorKind};
fn make_session() -> (FramedTransport<InmemoryTransport>, DistantClient) {
let (t1, t2) = FramedTransport::pair(100);
let (writer, reader) = t2.into_split();
(t1, Client::new(writer, reader).unwrap())
(t1, Client::spawn_inmemory(t2, Default::default()))
}
#[tokio::test]
#[test(tokio::test)]
async fn spawn_should_return_invalid_data_if_received_batch_response() {
let (mut transport, session) = make_session();
@ -636,13 +616,14 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Batch(vec![DistantResponseData::ProcSpawned { id: 1 }]),
protocol::Msg::Batch(vec![protocol::Response::ProcSpawned { id: 1 }]),
))
.await
.unwrap();
@ -654,7 +635,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn spawn_should_return_invalid_data_if_did_not_get_a_indicator_that_process_started() {
let (mut transport, session) = make_session();
@ -667,13 +648,14 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::Error(Error {
protocol::Msg::Single(protocol::Response::Error(Error {
kind: ErrorKind::BrokenPipe,
description: String::from("some error"),
})),
@ -688,7 +670,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn kill_should_return_error_if_internal_tasks_already_completed() {
let (mut transport, session) = make_session();
@ -701,14 +683,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -726,7 +709,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn kill_should_send_proc_kill_request_and_then_cause_stdin_forwarding_to_close() {
let (mut transport, session) = make_session();
@ -739,14 +722,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -756,9 +740,10 @@ mod tests {
assert!(proc.kill().await.is_ok(), "Failed to send kill request");
// Verify the kill request was sent
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
match req.payload {
DistantMsg::Single(DistantRequestData::ProcKill { id: proc_id }) => {
protocol::Msg::Single(protocol::Request::ProcKill { id: proc_id }) => {
assert_eq!(proc_id, id)
}
x => panic!("Unexpected request: {:?}", x),
@ -777,7 +762,7 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn stdin_should_be_forwarded_from_receiver_field() {
let (mut transport, session) = make_session();
@ -790,14 +775,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -812,9 +798,10 @@ mod tests {
.unwrap();
// Verify that a request is made through the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
match req.payload {
DistantMsg::Single(DistantRequestData::ProcStdin { id, data }) => {
protocol::Msg::Single(protocol::Request::ProcStdin { id, data }) => {
assert_eq!(id, 12345);
assert_eq!(data, b"some input");
}
@ -822,7 +809,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn stdout_should_be_forwarded_to_receiver_field() {
let (mut transport, session) = make_session();
@ -835,14 +822,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id.clone(),
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -851,9 +839,9 @@ mod tests {
let mut proc = spawn_task.await.unwrap().unwrap();
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcStdout {
protocol::Msg::Single(protocol::Response::ProcStdout {
id,
data: b"some out".to_vec(),
}),
@ -865,7 +853,7 @@ mod tests {
assert_eq!(out, b"some out");
}
#[tokio::test]
#[test(tokio::test)]
async fn stderr_should_be_forwarded_to_receiver_field() {
let (mut transport, session) = make_session();
@ -878,14 +866,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id.clone(),
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -894,9 +883,9 @@ mod tests {
let mut proc = spawn_task.await.unwrap().unwrap();
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcStderr {
protocol::Msg::Single(protocol::Response::ProcStderr {
id,
data: b"some err".to_vec(),
}),
@ -908,7 +897,7 @@ mod tests {
assert_eq!(out, b"some err");
}
#[tokio::test]
#[test(tokio::test)]
async fn status_should_return_none_if_not_done() {
let (mut transport, session) = make_session();
@ -921,14 +910,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -940,7 +930,7 @@ mod tests {
assert_eq!(result, None, "Unexpectedly got proc status: {:?}", result);
}
#[tokio::test]
#[test(tokio::test)]
async fn status_should_return_false_for_success_if_internal_tasks_fail() {
let (mut transport, session) = make_session();
@ -953,14 +943,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -986,7 +977,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn status_should_return_process_status_when_done() {
let (mut transport, session) = make_session();
@ -999,14 +990,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id.clone(),
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -1016,9 +1008,9 @@ mod tests {
// Send a process completion response to pass along exit status and conclude wait
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcDone {
protocol::Msg::Single(protocol::Response::ProcDone {
id,
success: true,
code: Some(123),
@ -1040,7 +1032,7 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn wait_should_return_error_if_internal_tasks_fail() {
let (mut transport, session) = make_session();
@ -1053,14 +1045,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -1075,7 +1068,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn wait_should_return_error_if_connection_terminates_before_receiving_done_response() {
let (mut transport, session) = make_session();
@ -1088,14 +1081,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -1117,7 +1111,7 @@ mod tests {
}
}
#[tokio::test]
#[test(tokio::test)]
async fn receiving_done_response_should_result_in_wait_returning_exit_information() {
let (mut transport, session) = make_session();
@ -1130,14 +1124,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id.clone(),
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -1148,9 +1143,9 @@ mod tests {
// Send a process completion response to pass along exit status and conclude wait
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcDone {
protocol::Msg::Single(protocol::Response::ProcDone {
id,
success: false,
code: Some(123),
@ -1169,7 +1164,7 @@ mod tests {
);
}
#[tokio::test]
#[test(tokio::test)]
async fn receiving_done_response_should_result_in_output_returning_exit_information() {
let (mut transport, session) = make_session();
@ -1182,14 +1177,15 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantMsg<DistantRequestData>> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Msg<protocol::Request>> =
transport.read_frame_as().await.unwrap().unwrap();
// Send back a response through the session
let id = 12345;
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id.clone(),
DistantMsg::Single(DistantResponseData::ProcSpawned { id }),
protocol::Msg::Single(protocol::Response::ProcSpawned { id }),
))
.await
.unwrap();
@ -1200,9 +1196,9 @@ mod tests {
// Send some stdout
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id.clone(),
DistantMsg::Single(DistantResponseData::ProcStdout {
protocol::Msg::Single(protocol::Response::ProcStdout {
id,
data: b"some out".to_vec(),
}),
@ -1212,9 +1208,9 @@ mod tests {
// Send some stderr
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id.clone(),
DistantMsg::Single(DistantResponseData::ProcStderr {
protocol::Msg::Single(protocol::Response::ProcStderr {
id,
data: b"some err".to_vec(),
}),
@ -1224,9 +1220,9 @@ mod tests {
// Send a process completion response to pass along exit status and conclude wait
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantMsg::Single(DistantResponseData::ProcDone {
protocol::Msg::Single(protocol::Response::ProcDone {
id,
success: false,
code: Some(123),

@ -0,0 +1,624 @@
use std::{fmt, io};
use distant_net::common::Request;
use log::*;
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use crate::client::{DistantChannel, DistantChannelExt};
use crate::constants::CLIENT_SEARCHER_CAPACITY;
use crate::protocol::{self, SearchId, SearchQuery, SearchQueryMatch};
/// Represents a searcher for files, directories, and symlinks on the filesystem
pub struct Searcher {
channel: DistantChannel,
id: SearchId,
query: SearchQuery,
task: JoinHandle<()>,
rx: mpsc::Receiver<SearchQueryMatch>,
}
impl fmt::Debug for Searcher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Searcher")
.field("id", &self.id)
.field("query", &self.query)
.finish()
}
}
impl Searcher {
/// Creates a searcher for some query
pub async fn search(mut channel: DistantChannel, query: SearchQuery) -> io::Result<Self> {
trace!("Searching using {query:?}",);
// Submit our run request and get back a mailbox for responses
let mut mailbox = channel
.mail(Request::new(protocol::Msg::Single(
protocol::Request::Search {
query: query.clone(),
},
)))
.await?;
let (tx, rx) = mpsc::channel(CLIENT_SEARCHER_CAPACITY);
// Wait to get the confirmation of watch as either ok or error
let mut queue: Vec<SearchQueryMatch> = Vec::new();
let mut search_id = None;
while let Some(res) = mailbox.next().await {
for data in res.payload.into_vec() {
match data {
// If we get results before the started indicator, queue them up
protocol::Response::SearchResults { matches, .. } => {
queue.extend(matches);
}
// Once we get the started indicator, mark as ready to go
protocol::Response::SearchStarted { id } => {
trace!("[Query {id}] Searcher has started");
search_id = Some(id);
}
// If we get an explicit error, convert and return it
protocol::Response::Error(x) => return Err(io::Error::from(x)),
// Otherwise, we got something unexpected, and report as such
x => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Unexpected response: {x:?}"),
))
}
}
}
// Exit if we got the confirmation
// NOTE: Doing this later because we want to make sure the entire payload is processed
// first before exiting the loop
if search_id.is_some() {
break;
}
}
let search_id = match search_id {
// Send out any of our queued changes that we got prior to the acknowledgement
Some(id) => {
trace!("[Query {id}] Forwarding {} queued matches", queue.len());
for r#match in queue.drain(..) {
if tx.send(r#match).await.is_err() {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("[Query {id}] Queue search match dropped"),
));
}
}
id
}
// If we never received an acknowledgement of search before the mailbox closed,
// fail with a missing confirmation error
None => {
return Err(io::Error::new(
io::ErrorKind::Other,
"Search query missing started confirmation",
))
}
};
// Spawn a task that continues to look for search result events and the conclusion of the
// search, discarding anything else that it gets
let task = tokio::spawn({
async move {
while let Some(res) = mailbox.next().await {
let mut done = false;
for data in res.payload.into_vec() {
match data {
protocol::Response::SearchResults { matches, .. } => {
// If we can't queue up a match anymore, we've
// been closed and therefore want to quit
if tx.is_closed() {
break;
}
// Otherwise, send over the matches
for r#match in matches {
if let Err(x) = tx.send(r#match).await {
error!(
"[Query {search_id}] Searcher failed to send match {:?}",
x.0
);
break;
}
}
}
// Received completion indicator, so close out
protocol::Response::SearchDone { .. } => {
trace!("[Query {search_id}] Searcher has finished");
done = true;
break;
}
_ => continue,
}
}
if done {
break;
}
}
}
});
Ok(Self {
id: search_id,
query,
channel,
task,
rx,
})
}
/// Returns a reference to the query this searcher is running
pub fn query(&self) -> &SearchQuery {
&self.query
}
/// Returns true if the searcher is still actively searching
pub fn is_active(&self) -> bool {
!self.task.is_finished()
}
/// Returns the next match detected by the searcher, or none if the searcher has concluded
pub async fn next(&mut self) -> Option<SearchQueryMatch> {
self.rx.recv().await
}
/// Cancels the search being performed by the watcher
pub async fn cancel(&mut self) -> io::Result<()> {
trace!("[Query {}] Cancelling search", self.id);
self.channel.cancel_search(self.id).await?;
// Kill our task that processes inbound matches if we have successfully stopped searching
self.task.abort();
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use std::sync::Arc;
use distant_net::common::{FramedTransport, InmemoryTransport, Response};
use distant_net::Client;
use test_log::test;
use tokio::sync::Mutex;
use super::*;
use crate::protocol::{
SearchQueryCondition, SearchQueryMatchData, SearchQueryOptions, SearchQueryPathMatch,
SearchQuerySubmatch, SearchQueryTarget,
};
use crate::DistantClient;
fn make_session() -> (FramedTransport<InmemoryTransport>, DistantClient) {
let (t1, t2) = FramedTransport::pair(100);
(t1, Client::spawn_inmemory(t2, Default::default()))
}
#[test(tokio::test)]
async fn searcher_should_have_query_reflect_ongoing_query() {
let (mut transport, session) = make_session();
let test_query = SearchQuery {
paths: vec![PathBuf::from("/some/test/path")],
target: SearchQueryTarget::Path,
condition: SearchQueryCondition::Regex {
value: String::from("."),
},
options: SearchQueryOptions::default(),
};
// Create a task for searcher as we need to handle the request and a response
// in a separate async block
let search_task = {
let test_query = test_query.clone();
tokio::spawn(async move { Searcher::search(session.clone_channel(), test_query).await })
};
// Wait until we get the request from the session
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
// Send back an acknowledgement that a search was started
transport
.write_frame_for(&Response::new(
req.id,
protocol::Response::SearchStarted { id: rand::random() },
))
.await
.unwrap();
// Get the searcher and verify the query
let searcher = search_task.await.unwrap().unwrap();
assert_eq!(searcher.query(), &test_query);
}
#[test(tokio::test)]
async fn searcher_should_support_getting_next_match() {
let (mut transport, session) = make_session();
let test_query = SearchQuery {
paths: vec![PathBuf::from("/some/test/path")],
target: SearchQueryTarget::Path,
condition: SearchQueryCondition::Regex {
value: String::from("."),
},
options: SearchQueryOptions::default(),
};
// Create a task for searcher as we need to handle the request and a response
// in a separate async block
let search_task =
tokio::spawn(
async move { Searcher::search(session.clone_channel(), test_query).await },
);
// Wait until we get the request from the session
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
// Send back an acknowledgement that a searcher was created
let id = rand::random::<SearchId>();
transport
.write_frame_for(&Response::new(
req.id.clone(),
protocol::Response::SearchStarted { id },
))
.await
.unwrap();
// Get the searcher
let mut searcher = search_task.await.unwrap().unwrap();
// Send some matches related to the file
transport
.write_frame_for(&Response::new(
req.id,
vec![
protocol::Response::SearchResults {
id,
matches: vec![
SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/1"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match".to_string()),
start: 3,
end: 7,
}],
}),
SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/2"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match 2".to_string()),
start: 88,
end: 99,
}],
}),
],
},
protocol::Response::SearchResults {
id,
matches: vec![SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/3"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match 3".to_string()),
start: 5,
end: 9,
}],
})],
},
],
))
.await
.unwrap();
// Verify that the searcher gets the matches, one at a time
let m = searcher.next().await.expect("Searcher closed unexpectedly");
assert_eq!(
m,
SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/1"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match".to_string()),
start: 3,
end: 7,
}],
})
);
let m = searcher.next().await.expect("Searcher closed unexpectedly");
assert_eq!(
m,
SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/2"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match 2".to_string()),
start: 88,
end: 99,
}],
}),
);
let m = searcher.next().await.expect("Searcher closed unexpectedly");
assert_eq!(
m,
SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/3"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match 3".to_string()),
start: 5,
end: 9,
}],
})
);
}
#[test(tokio::test)]
async fn searcher_should_distinguish_match_events_and_only_receive_matches_for_itself() {
let (mut transport, session) = make_session();
let test_query = SearchQuery {
paths: vec![PathBuf::from("/some/test/path")],
target: SearchQueryTarget::Path,
condition: SearchQueryCondition::Regex {
value: String::from("."),
},
options: SearchQueryOptions::default(),
};
// Create a task for searcher as we need to handle the request and a response
// in a separate async block
let search_task =
tokio::spawn(
async move { Searcher::search(session.clone_channel(), test_query).await },
);
// Wait until we get the request from the session
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
// Send back an acknowledgement that a searcher was created
let id = rand::random();
transport
.write_frame_for(&Response::new(
req.id.clone(),
protocol::Response::SearchStarted { id },
))
.await
.unwrap();
// Get the searcher
let mut searcher = search_task.await.unwrap().unwrap();
// Send a match from the appropriate origin
transport
.write_frame_for(&Response::new(
req.id.clone(),
protocol::Response::SearchResults {
id,
matches: vec![SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/1"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match".to_string()),
start: 3,
end: 7,
}],
})],
},
))
.await
.unwrap();
// Send a chanmatchge from a different origin
transport
.write_frame_for(&Response::new(
req.id.clone() + "1",
protocol::Response::SearchResults {
id,
matches: vec![SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/2"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match 2".to_string()),
start: 88,
end: 99,
}],
})],
},
))
.await
.unwrap();
// Send a chanmatchge from the appropriate origin
transport
.write_frame_for(&Response::new(
req.id,
protocol::Response::SearchResults {
id,
matches: vec![SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/3"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match 3".to_string()),
start: 5,
end: 9,
}],
})],
},
))
.await
.unwrap();
// Verify that the searcher gets the matches, one at a time
let m = searcher.next().await.expect("Searcher closed unexpectedly");
assert_eq!(
m,
SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/1"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match".to_string()),
start: 3,
end: 7,
}],
})
);
let m = searcher.next().await.expect("Watcher closed unexpectedly");
assert_eq!(
m,
SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/3"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match 3".to_string()),
start: 5,
end: 9,
}],
})
);
}
#[test(tokio::test)]
async fn searcher_should_stop_receiving_events_if_cancelled() {
let (mut transport, session) = make_session();
let test_query = SearchQuery {
paths: vec![PathBuf::from("/some/test/path")],
target: SearchQueryTarget::Path,
condition: SearchQueryCondition::Regex {
value: String::from("."),
},
options: SearchQueryOptions::default(),
};
// Create a task for searcher as we need to handle the request and a response
// in a separate async block
let search_task =
tokio::spawn(
async move { Searcher::search(session.clone_channel(), test_query).await },
);
// Wait until we get the request from the session
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
// Send back an acknowledgement that a watcher was created
let id = rand::random::<SearchId>();
transport
.write_frame_for(&Response::new(
req.id.clone(),
protocol::Response::SearchStarted { id },
))
.await
.unwrap();
// Send some matches from the appropriate origin
transport
.write_frame_for(&Response::new(
req.id,
protocol::Response::SearchResults {
id,
matches: vec![
SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/1"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match".to_string()),
start: 3,
end: 7,
}],
}),
SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/2"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match 2".to_string()),
start: 88,
end: 99,
}],
}),
],
},
))
.await
.unwrap();
// Wait a little bit for all matches to be queued
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
// Create a task for for cancelling as we need to handle the request and a response
// in a separate async block
let searcher = Arc::new(Mutex::new(search_task.await.unwrap().unwrap()));
// Verify that the searcher gets the first match
let m = searcher
.lock()
.await
.next()
.await
.expect("Searcher closed unexpectedly");
assert_eq!(
m,
SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/1"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match".to_string()),
start: 3,
end: 7,
}],
}),
);
// Cancel the search, verify the request is sent out, and respond with ok
let searcher_2 = Arc::clone(&searcher);
let cancel_task = tokio::spawn(async move { searcher_2.lock().await.cancel().await });
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
transport
.write_frame_for(&Response::new(req.id.clone(), protocol::Response::Ok))
.await
.unwrap();
// Wait for the cancel to complete
cancel_task.await.unwrap().unwrap();
// Send a match that will get ignored
transport
.write_frame_for(&Response::new(
req.id,
protocol::Response::SearchResults {
id,
matches: vec![SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/3"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match 3".to_string()),
start: 5,
end: 9,
}],
})],
},
))
.await
.unwrap();
// Verify that we get any remaining matches that were received before cancel,
// but nothing new after that
assert_eq!(
searcher.lock().await.next().await,
Some(SearchQueryMatch::Path(SearchQueryPathMatch {
path: PathBuf::from("/some/path/2"),
submatches: vec![SearchQuerySubmatch {
r#match: SearchQueryMatchData::Text("test match 2".to_string()),
start: 88,
end: 99,
}],
}))
);
assert_eq!(searcher.lock().await.next().await, None);
}
}

@ -1,16 +1,14 @@
use crate::{
client::{DistantChannel, DistantChannelExt},
constants::CLIENT_WATCHER_CAPACITY,
data::{Change, ChangeKindSet, DistantRequestData, DistantResponseData},
DistantMsg,
};
use distant_net::Request;
use std::path::{Path, PathBuf};
use std::{fmt, io};
use distant_net::common::Request;
use log::*;
use std::{
fmt, io,
path::{Path, PathBuf},
};
use tokio::{sync::mpsc, task::JoinHandle};
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use crate::client::{DistantChannel, DistantChannelExt};
use crate::constants::CLIENT_WATCHER_CAPACITY;
use crate::protocol::{self, Change, ChangeKindSet};
/// Represents a watcher of some path on a remote machine
pub struct Watcher {
@ -46,23 +44,23 @@ impl Watcher {
if only.is_empty() {
String::new()
} else {
format!(" (only = {})", only)
format!(" (only = {only})")
},
if except.is_empty() {
String::new()
} else {
format!(" (except = {})", except)
format!(" (except = {except})")
},
);
// Submit our run request and get back a mailbox for responses
let mut mailbox = channel
.mail(Request::new(DistantMsg::Single(
DistantRequestData::Watch {
.mail(Request::new(protocol::Msg::Single(
protocol::Request::Watch {
path: path.to_path_buf(),
recursive,
only: only.into_vec(),
except: except.into_vec(),
only: only.into_sorted_vec(),
except: except.into_sorted_vec(),
},
)))
.await?;
@ -75,15 +73,15 @@ impl Watcher {
while let Some(res) = mailbox.next().await {
for data in res.payload.into_vec() {
match data {
DistantResponseData::Changed(change) => queue.push(change),
DistantResponseData::Ok => {
protocol::Response::Changed(change) => queue.push(change),
protocol::Response::Ok => {
confirmed = true;
}
DistantResponseData::Error(x) => return Err(io::Error::from(x)),
protocol::Response::Error(x) => return Err(io::Error::from(x)),
x => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Unexpected response: {:?}", x),
format!("Unexpected response: {x:?}"),
))
}
}
@ -119,7 +117,7 @@ impl Watcher {
while let Some(res) = mailbox.next().await {
for data in res.payload.into_vec() {
match data {
DistantResponseData::Changed(change) => {
protocol::Response::Changed(change) => {
// If we can't queue up a change anymore, we've
// been closed and therefore want to quit
if tx.is_closed() {
@ -181,26 +179,23 @@ impl Watcher {
#[cfg(test)]
mod tests {
use super::*;
use crate::data::ChangeKind;
use crate::DistantClient;
use distant_net::{
Client, FramedTransport, InmemoryTransport, IntoSplit, PlainCodec, Response,
TypedAsyncRead, TypedAsyncWrite,
};
use std::sync::Arc;
use distant_net::common::{FramedTransport, InmemoryTransport, Response};
use distant_net::Client;
use test_log::test;
use tokio::sync::Mutex;
fn make_session() -> (
FramedTransport<InmemoryTransport, PlainCodec>,
DistantClient,
) {
use super::*;
use crate::protocol::ChangeKind;
use crate::DistantClient;
fn make_session() -> (FramedTransport<InmemoryTransport>, DistantClient) {
let (t1, t2) = FramedTransport::pair(100);
let (writer, reader) = t2.into_split();
(t1, Client::new(writer, reader).unwrap())
(t1, Client::spawn_inmemory(t2, Default::default()))
}
#[tokio::test]
#[test(tokio::test)]
async fn watcher_should_have_path_reflect_watched_path() {
let (mut transport, session) = make_session();
let test_path = Path::new("/some/test/path");
@ -219,11 +214,11 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
// Send back an acknowledgement that a watcher was created
transport
.write(Response::new(req.id, DistantResponseData::Ok))
.write_frame_for(&Response::new(req.id, protocol::Response::Ok))
.await
.unwrap();
@ -232,7 +227,7 @@ mod tests {
assert_eq!(watcher.path(), test_path);
}
#[tokio::test]
#[test(tokio::test)]
async fn watcher_should_support_getting_next_change() {
let (mut transport, session) = make_session();
let test_path = Path::new("/some/test/path");
@ -251,11 +246,11 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
// Send back an acknowledgement that a watcher was created
transport
.write(Response::new(req.id.clone(), DistantResponseData::Ok))
.write_frame_for(&Response::new(req.id.clone(), protocol::Response::Ok))
.await
.unwrap();
@ -264,16 +259,20 @@ mod tests {
// Send some changes related to the file
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
vec![
DistantResponseData::Changed(Change {
protocol::Response::Changed(Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
DistantResponseData::Changed(Change {
kind: ChangeKind::Content,
paths: vec![test_path.to_path_buf()],
protocol::Response::Changed(Change {
timestamp: 1,
kind: ChangeKind::Modify,
path: test_path.to_path_buf(),
details: Default::default(),
}),
],
))
@ -285,8 +284,10 @@ mod tests {
assert_eq!(
change,
Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()]
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -294,13 +295,15 @@ mod tests {
assert_eq!(
change,
Change {
kind: ChangeKind::Content,
paths: vec![test_path.to_path_buf()]
timestamp: 1,
kind: ChangeKind::Modify,
path: test_path.to_path_buf(),
details: Default::default(),
}
);
}
#[tokio::test]
#[test(tokio::test)]
async fn watcher_should_distinguish_change_events_and_only_receive_changes_for_itself() {
let (mut transport, session) = make_session();
let test_path = Path::new("/some/test/path");
@ -319,11 +322,11 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
// Send back an acknowledgement that a watcher was created
transport
.write(Response::new(req.id.clone(), DistantResponseData::Ok))
.write_frame_for(&Response::new(req.id.clone(), protocol::Response::Ok))
.await
.unwrap();
@ -332,11 +335,13 @@ mod tests {
// Send a change from the appropriate origin
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id.clone(),
DistantResponseData::Changed(Change {
protocol::Response::Changed(Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
.await
@ -344,11 +349,13 @@ mod tests {
// Send a change from a different origin
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id.clone() + "1",
DistantResponseData::Changed(Change {
kind: ChangeKind::Content,
paths: vec![test_path.to_path_buf()],
protocol::Response::Changed(Change {
timestamp: 1,
kind: ChangeKind::Modify,
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
.await
@ -356,11 +363,13 @@ mod tests {
// Send a change from the appropriate origin
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantResponseData::Changed(Change {
kind: ChangeKind::Remove,
paths: vec![test_path.to_path_buf()],
protocol::Response::Changed(Change {
timestamp: 2,
kind: ChangeKind::Delete,
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
.await
@ -371,8 +380,10 @@ mod tests {
assert_eq!(
change,
Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()]
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -380,13 +391,15 @@ mod tests {
assert_eq!(
change,
Change {
kind: ChangeKind::Remove,
paths: vec![test_path.to_path_buf()]
timestamp: 2,
kind: ChangeKind::Delete,
path: test_path.to_path_buf(),
details: Default::default(),
}
);
}
#[tokio::test]
#[test(tokio::test)]
async fn watcher_should_stop_receiving_events_if_unwatched() {
let (mut transport, session) = make_session();
let test_path = Path::new("/some/test/path");
@ -405,30 +418,36 @@ mod tests {
});
// Wait until we get the request from the session
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
// Send back an acknowledgement that a watcher was created
transport
.write(Response::new(req.id.clone(), DistantResponseData::Ok))
.write_frame_for(&Response::new(req.id.clone(), protocol::Response::Ok))
.await
.unwrap();
// Send some changes from the appropriate origin
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
vec![
DistantResponseData::Changed(Change {
protocol::Response::Changed(Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
DistantResponseData::Changed(Change {
kind: ChangeKind::Content,
paths: vec![test_path.to_path_buf()],
protocol::Response::Changed(Change {
timestamp: 1,
kind: ChangeKind::Modify,
path: test_path.to_path_buf(),
details: Default::default(),
}),
DistantResponseData::Changed(Change {
kind: ChangeKind::Remove,
paths: vec![test_path.to_path_buf()],
protocol::Response::Changed(Change {
timestamp: 2,
kind: ChangeKind::Delete,
path: test_path.to_path_buf(),
details: Default::default(),
}),
],
))
@ -452,8 +471,10 @@ mod tests {
assert_eq!(
change,
Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()]
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -461,10 +482,10 @@ mod tests {
let watcher_2 = Arc::clone(&watcher);
let unwatch_task = tokio::spawn(async move { watcher_2.lock().await.unwatch().await });
let req: Request<DistantRequestData> = transport.read().await.unwrap().unwrap();
let req: Request<protocol::Request> = transport.read_frame_as().await.unwrap().unwrap();
transport
.write(Response::new(req.id.clone(), DistantResponseData::Ok))
.write_frame_for(&Response::new(req.id.clone(), protocol::Response::Ok))
.await
.unwrap();
@ -472,11 +493,13 @@ mod tests {
unwatch_task.await.unwrap().unwrap();
transport
.write(Response::new(
.write_frame_for(&Response::new(
req.id,
DistantResponseData::Changed(Change {
protocol::Response::Changed(Change {
timestamp: 3,
kind: ChangeKind::Unknown,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
.await
@ -487,15 +510,19 @@ mod tests {
assert_eq!(
watcher.lock().await.next().await,
Some(Change {
kind: ChangeKind::Content,
paths: vec![test_path.to_path_buf()]
timestamp: 1,
kind: ChangeKind::Modify,
path: test_path.to_path_buf(),
details: Default::default(),
})
);
assert_eq!(
watcher.lock().await.next().await,
Some(Change {
kind: ChangeKind::Remove,
paths: vec![test_path.to_path_buf()]
timestamp: 2,
kind: ChangeKind::Delete,
path: test_path.to_path_buf(),
details: Default::default(),
})
);
assert_eq!(watcher.lock().await.next().await, None);

@ -4,15 +4,5 @@ pub const CLIENT_PIPE_CAPACITY: usize = 10000;
/// Capacity associated with a client watcher receiving changes
pub const CLIENT_WATCHER_CAPACITY: usize = 100;
/// Capacity associated with the server's file watcher to pass events outbound
pub const SERVER_WATCHER_CAPACITY: usize = 10000;
/// Represents the maximum size (in bytes) that data will be read from pipes
/// per individual `read` call
///
/// Current setting is 16k size
pub const MAX_PIPE_CHUNK_SIZE: usize = 16384;
/// Duration in milliseconds to sleep between reading stdout/stderr chunks
/// to avoid sending many small messages to clients
pub const READ_PAUSE_MILLIS: u64 = 50;
/// Capacity associated with a client searcher receiving matches
pub const CLIENT_SEARCHER_CAPACITY: usize = 10000;

@ -1,10 +1,13 @@
use crate::{
serde_str::{deserialize_from_str, serialize_to_str},
Destination, Host,
};
use distant_net::SecretKey32;
use serde::{de::Deserializer, ser::Serializer, Deserialize, Serialize};
use std::{convert::TryFrom, fmt, io, str::FromStr};
use std::convert::TryFrom;
use std::str::FromStr;
use std::{fmt, io};
use distant_net::common::{Destination, Host, SecretKey32};
use serde::de::Deserializer;
use serde::ser::Serializer;
use serde::{Deserialize, Serialize};
use crate::serde_str::{deserialize_from_str, serialize_to_str};
const SCHEME: &str = "distant";
const SCHEME_WITH_SEP: &str = "distant://";
@ -25,7 +28,7 @@ impl fmt::Display for DistantSingleKeyCredentials {
write!(f, "{SCHEME}://")?;
if let Some(username) = self.username.as_ref() {
write!(f, "{}", username)?;
write!(f, "{username}")?;
}
write!(f, ":{}@", self.key)?;
@ -98,8 +101,11 @@ impl<'de> Deserialize<'de> for DistantSingleKeyCredentials {
impl DistantSingleKeyCredentials {
/// Searches a str for `distant://[username]:{key}@{host}:{port}`, returning the first matching
/// credentials set if found
pub fn find(s: &str) -> Option<DistantSingleKeyCredentials> {
/// credentials set if found, failing if anything is found immediately before or after the
/// credentials that is not whitespace or control characters
///
/// If `strict` is false, then the scheme can be preceded by any character
pub fn find(s: &str, strict: bool) -> Option<DistantSingleKeyCredentials> {
let is_boundary = |c| char::is_whitespace(c) || char::is_control(c);
for (i, _) in s.match_indices(SCHEME_WITH_SEP) {
@ -108,11 +114,11 @@ impl DistantSingleKeyCredentials {
// Check character preceding the scheme to make sure it isn't a different scheme
// Only whitespace or control characters preceding are okay, anything else is skipped
if !before.is_empty() && !before.ends_with(is_boundary) {
if strict && !before.is_empty() && !before.ends_with(is_boundary) {
continue;
}
// Consume until we reach whitespace, which indicates the potential end
// Consume until we reach whitespace or control, which indicates the potential end
let s = match s.find(is_boundary) {
Some(i) => &s[..i],
None => s,
@ -127,6 +133,22 @@ impl DistantSingleKeyCredentials {
None
}
/// Equivalent to [`find(s, true)`].
///
/// [`find(s, true)`]: DistantSingleKeyCredentials::find
#[inline]
pub fn find_strict(s: &str) -> Option<DistantSingleKeyCredentials> {
Self::find(s, true)
}
/// Equivalent to [`find(s, false)`].
///
/// [`find(s, false)`]: DistantSingleKeyCredentials::find
#[inline]
pub fn find_lax(s: &str) -> Option<DistantSingleKeyCredentials> {
Self::find(s, false)
}
/// Converts credentials into a [`Destination`] of the form
/// `distant://[username]:{key}@{host}:{port}`, failing if the credentials would not produce a
/// valid [`Destination`]
@ -151,10 +173,13 @@ impl TryFrom<DistantSingleKeyCredentials> for Destination {
#[cfg(test)]
mod tests {
use super::*;
use once_cell::sync::Lazy;
use std::net::{Ipv4Addr, Ipv6Addr};
use once_cell::sync::Lazy;
use test_log::test;
use super::*;
const HOST: &str = "testhost";
const PORT: u16 = 12345;
@ -177,29 +202,29 @@ mod tests {
#[test]
fn find_should_return_some_key_if_string_is_exact_match() {
let credentials = DistantSingleKeyCredentials::find(CREDENTIALS_STR_NO_USER.as_str());
let credentials = DistantSingleKeyCredentials::find(CREDENTIALS_STR_NO_USER.as_str(), true);
assert_eq!(credentials.unwrap(), *CREDENTIALS_NO_USER);
let credentials = DistantSingleKeyCredentials::find(CREDENTIALS_STR_USER.as_str());
let credentials = DistantSingleKeyCredentials::find(CREDENTIALS_STR_USER.as_str(), true);
assert_eq!(credentials.unwrap(), *CREDENTIALS_USER);
}
#[test]
fn find_should_return_some_key_if_there_is_a_match_with_only_whitespace_on_either_side() {
let s = format!(" {} ", CREDENTIALS_STR_NO_USER.as_str());
let credentials = DistantSingleKeyCredentials::find(&s);
let credentials = DistantSingleKeyCredentials::find(&s, true);
assert_eq!(credentials.unwrap(), *CREDENTIALS_NO_USER);
let s = format!("\r{}\r", CREDENTIALS_STR_NO_USER.as_str());
let credentials = DistantSingleKeyCredentials::find(&s);
let credentials = DistantSingleKeyCredentials::find(&s, true);
assert_eq!(credentials.unwrap(), *CREDENTIALS_NO_USER);
let s = format!("\t{}\t", CREDENTIALS_STR_NO_USER.as_str());
let credentials = DistantSingleKeyCredentials::find(&s);
let credentials = DistantSingleKeyCredentials::find(&s, true);
assert_eq!(credentials.unwrap(), *CREDENTIALS_NO_USER);
let s = format!("\n{}\n", CREDENTIALS_STR_NO_USER.as_str());
let credentials = DistantSingleKeyCredentials::find(&s);
let credentials = DistantSingleKeyCredentials::find(&s, true);
assert_eq!(credentials.unwrap(), *CREDENTIALS_NO_USER);
}
@ -207,7 +232,7 @@ mod tests {
fn find_should_return_some_key_if_there_is_a_match_with_only_control_characters_on_either_side()
{
let s = format!("\x1b{} \x1b", CREDENTIALS_STR_NO_USER.as_str());
let credentials = DistantSingleKeyCredentials::find(&s);
let credentials = DistantSingleKeyCredentials::find(&s, true);
assert_eq!(credentials.unwrap(), *CREDENTIALS_NO_USER);
}
@ -218,7 +243,7 @@ mod tests {
CREDENTIALS_STR_NO_USER.as_str(),
CREDENTIALS_STR_USER.as_str()
);
let credentials = DistantSingleKeyCredentials::find(&s);
let credentials = DistantSingleKeyCredentials::find(&s, true);
assert_eq!(credentials.unwrap(), *CREDENTIALS_NO_USER);
}
@ -230,14 +255,29 @@ mod tests {
CREDENTIALS_STR_NO_USER.as_str(),
CREDENTIALS_STR_NO_USER.as_str()
);
let credentials = DistantSingleKeyCredentials::find(&s);
let credentials = DistantSingleKeyCredentials::find(&s, true);
assert_eq!(credentials.unwrap(), *CREDENTIALS_NO_USER);
}
#[test]
fn find_should_return_none_if_no_match_found() {
fn find_with_strict_false_should_ignore_any_character_preceding_scheme() {
let s = format!("a{}", CREDENTIALS_STR_NO_USER.as_str());
let credentials = DistantSingleKeyCredentials::find(&s, false);
assert_eq!(credentials.unwrap(), *CREDENTIALS_NO_USER);
let s = format!(
"a{} b{}",
CREDENTIALS_STR_NO_USER.as_str(),
CREDENTIALS_STR_NO_USER.as_str()
);
let credentials = DistantSingleKeyCredentials::find(&s, false);
assert_eq!(credentials.unwrap(), *CREDENTIALS_NO_USER);
}
#[test]
fn find_with_strict_true_should_not_find_if_non_whitespace_and_control_preceding_scheme() {
let s = format!("a{}", CREDENTIALS_STR_NO_USER.as_str());
let credentials = DistantSingleKeyCredentials::find(&s);
let credentials = DistantSingleKeyCredentials::find(&s, true);
assert_eq!(credentials, None);
let s = format!(
@ -245,7 +285,18 @@ mod tests {
CREDENTIALS_STR_NO_USER.as_str(),
CREDENTIALS_STR_NO_USER.as_str()
);
let credentials = DistantSingleKeyCredentials::find(&s);
let credentials = DistantSingleKeyCredentials::find(&s, true);
assert_eq!(credentials, None);
}
#[test]
fn find_should_return_none_if_no_match_found() {
let s = "abc";
let credentials = DistantSingleKeyCredentials::find(s, true);
assert_eq!(credentials, None);
let s = "abc";
let credentials = DistantSingleKeyCredentials::find(s, false);
assert_eq!(credentials, None);
}

@ -1,515 +0,0 @@
use derive_more::{From, IsVariant};
use serde::{Deserialize, Serialize};
use std::{io, path::PathBuf};
use strum::AsRefStr;
#[cfg(feature = "clap")]
use strum::VariantNames;
mod change;
pub use change::*;
mod cmd;
pub use cmd::*;
#[cfg(feature = "clap")]
mod clap_impl;
mod error;
pub use error::*;
mod filesystem;
pub use filesystem::*;
mod map;
pub use map::Map;
mod metadata;
pub use metadata::*;
mod pty;
pub use pty::*;
mod system;
pub use system::*;
mod utils;
pub(crate) use utils::*;
/// Id for a remote process
pub type ProcessId = u32;
/// Mapping of environment variables
pub type Environment = Map;
/// Type alias for a vec of bytes
///
/// NOTE: This only exists to support properly parsing a Vec<u8> from an entire string
/// with clap rather than trying to parse a string as a singular u8
pub type ByteVec = Vec<u8>;
#[cfg(feature = "clap")]
fn parse_byte_vec(src: &str) -> ByteVec {
src.as_bytes().to_vec()
}
/// Represents a wrapper around a distant message, supporting single and batch requests
#[derive(Clone, Debug, From, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(untagged)]
pub enum DistantMsg<T> {
Single(T),
Batch(Vec<T>),
}
impl<T> DistantMsg<T> {
/// Returns true if msg has a single payload
pub fn is_single(&self) -> bool {
matches!(self, Self::Single(_))
}
/// Returns reference to single value if msg is single variant
pub fn as_single(&self) -> Option<&T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns mutable reference to single value if msg is single variant
pub fn as_mut_single(&mut self) -> Option<&T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns the single value if msg is single variant
pub fn into_single(self) -> Option<T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns true if msg has a batch of payloads
pub fn is_batch(&self) -> bool {
matches!(self, Self::Batch(_))
}
/// Returns reference to batch value if msg is batch variant
pub fn as_batch(&self) -> Option<&[T]> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Returns mutable reference to batch value if msg is batch variant
pub fn as_mut_batch(&mut self) -> Option<&mut [T]> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Returns the batch value if msg is batch variant
pub fn into_batch(self) -> Option<Vec<T>> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Convert into a collection of payload data
pub fn into_vec(self) -> Vec<T> {
match self {
Self::Single(x) => vec![x],
Self::Batch(x) => x,
}
}
}
#[cfg(feature = "schemars")]
impl<T: schemars::JsonSchema> DistantMsg<T> {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(DistantMsg<T>)
}
}
/// Represents the payload of a request to be performed on the remote machine
#[derive(Clone, Debug, PartialEq, Eq, IsVariant, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[cfg_attr(feature = "clap", derive(clap::Subcommand))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
#[cfg_attr(feature = "clap", clap(rename_all = "kebab-case"))]
pub enum DistantRequestData {
/// Reads a file from the specified path on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["cat"]))]
FileRead {
/// The path to the file on the remote machine
path: PathBuf,
},
/// Reads a file from the specified path on the remote machine
/// and treats the contents as text
FileReadText {
/// The path to the file on the remote machine
path: PathBuf,
},
/// Writes a file, creating it if it does not exist, and overwriting any existing content
/// on the remote machine
FileWrite {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
#[cfg_attr(feature = "clap", clap(parse(from_str = parse_byte_vec)))]
data: ByteVec,
},
/// Writes a file using text instead of bytes, creating it if it does not exist,
/// and overwriting any existing content on the remote machine
FileWriteText {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
text: String,
},
/// Appends to a file, creating it if it does not exist, on the remote machine
FileAppend {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
#[cfg_attr(feature = "clap", clap(parse(from_str = parse_byte_vec)))]
data: ByteVec,
},
/// Appends text to a file, creating it if it does not exist, on the remote machine
FileAppendText {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
text: String,
},
/// Reads a directory from the specified path on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["ls"]))]
DirRead {
/// The path to the directory on the remote machine
path: PathBuf,
/// Maximum depth to traverse with 0 indicating there is no maximum
/// depth and 1 indicating the most immediate children within the
/// directory
#[serde(default = "one")]
#[cfg_attr(feature = "clap", clap(long, default_value = "1"))]
depth: usize,
/// Whether or not to return absolute or relative paths
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
absolute: bool,
/// Whether or not to canonicalize the resulting paths, meaning
/// returning the canonical, absolute form of a path with all
/// intermediate components normalized and symbolic links resolved
///
/// Note that the flag absolute must be true to have absolute paths
/// returned, even if canonicalize is flagged as true
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
canonicalize: bool,
/// Whether or not to include the root directory in the retrieved
/// entries
///
/// If included, the root directory will also be a canonicalized,
/// absolute path and will not follow any of the other flags
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
include_root: bool,
},
/// Creates a directory on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["mkdir"]))]
DirCreate {
/// The path to the directory on the remote machine
path: PathBuf,
/// Whether or not to create all parent directories
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
all: bool,
},
/// Removes a file or directory on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["rm"]))]
Remove {
/// The path to the file or directory on the remote machine
path: PathBuf,
/// Whether or not to remove all contents within directory if is a directory.
/// Does nothing different for files
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
force: bool,
},
/// Copies a file or directory on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["cp"]))]
Copy {
/// The path to the file or directory on the remote machine
src: PathBuf,
/// New location on the remote machine for copy of file or directory
dst: PathBuf,
},
/// Moves/renames a file or directory on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["mv"]))]
Rename {
/// The path to the file or directory on the remote machine
src: PathBuf,
/// New location on the remote machine for the file or directory
dst: PathBuf,
},
/// Watches a path for changes
Watch {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
/// If true, will recursively watch for changes within directories, othewise
/// will only watch for changes immediately within directories
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
recursive: bool,
/// Filter to only report back specified changes
#[serde(default)]
#[cfg_attr(
feature = "clap",
clap(long, possible_values = ChangeKind::VARIANTS)
)]
only: Vec<ChangeKind>,
/// Filter to report back changes except these specified changes
#[serde(default)]
#[cfg_attr(
feature = "clap",
clap(long, possible_values = ChangeKind::VARIANTS)
)]
except: Vec<ChangeKind>,
},
/// Unwatches a path for changes, meaning no additional changes will be reported
Unwatch {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
},
/// Checks whether the given path exists
Exists {
/// The path to the file or directory on the remote machine
path: PathBuf,
},
/// Retrieves filesystem metadata for the specified path on the remote machine
Metadata {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
/// Whether or not to include a canonicalized version of the path, meaning
/// returning the canonical, absolute form of a path with all
/// intermediate components normalized and symbolic links resolved
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
canonicalize: bool,
/// Whether or not to follow symlinks to determine absolute file type (dir/file)
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
resolve_file_type: bool,
},
/// Spawns a new process on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["spawn", "run"]))]
ProcSpawn {
/// The full command to run including arguments
#[cfg_attr(feature = "clap", clap(flatten))]
cmd: Cmd,
/// Environment to provide to the remote process
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long, default_value_t = Environment::default()))]
environment: Environment,
/// Alternative current directory for the remote process
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
current_dir: Option<PathBuf>,
/// Whether or not the process should be persistent, meaning that the process will not be
/// killed when the associated client disconnects
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
persist: bool,
/// If provided, will spawn process in a pty, otherwise spawns directly
#[serde(default)]
#[cfg_attr(feature = "clap", clap(long))]
pty: Option<PtySize>,
},
/// Kills a process running on the remote machine
#[cfg_attr(feature = "clap", clap(visible_aliases = &["kill"]))]
ProcKill {
/// Id of the actively-running process
id: ProcessId,
},
/// Sends additional data to stdin of running process
ProcStdin {
/// Id of the actively-running process to send stdin data
id: ProcessId,
/// Data to send to a process's stdin pipe
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Resize pty of remote process
ProcResizePty {
/// Id of the actively-running process whose pty to resize
id: ProcessId,
/// The new pty dimensions
size: PtySize,
},
/// Retrieve information about the server and the system it is on
SystemInfo {},
}
#[cfg(feature = "schemars")]
impl DistantRequestData {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(DistantRequestData)
}
}
/// Represents the payload of a successful response
#[derive(Clone, Debug, PartialEq, Eq, AsRefStr, IsVariant, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
#[strum(serialize_all = "snake_case")]
pub enum DistantResponseData {
/// General okay with no extra data, returned in cases like
/// creating or removing a directory, copying a file, or renaming
/// a file
Ok,
/// General-purpose failure that occurred from some request
Error(Error),
/// Response containing some arbitrary, binary data
Blob {
/// Binary data associated with the response
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Response containing some arbitrary, text data
Text {
/// Text data associated with the response
data: String,
},
/// Response to reading a directory
DirEntries {
/// Entries contained within the requested directory
entries: Vec<DirEntry>,
/// Errors encountered while scanning for entries
errors: Vec<Error>,
},
/// Response to a filesystem change for some watched file, directory, or symlink
Changed(Change),
/// Response to checking if a path exists
Exists { value: bool },
/// Represents metadata about some filesystem object (file, directory, symlink) on remote machine
Metadata(Metadata),
/// Response to starting a new process
ProcSpawned {
/// Arbitrary id associated with running process
id: ProcessId,
},
/// Actively-transmitted stdout as part of running process
ProcStdout {
/// Arbitrary id associated with running process
id: ProcessId,
/// Data read from a process' stdout pipe
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Actively-transmitted stderr as part of running process
ProcStderr {
/// Arbitrary id associated with running process
id: ProcessId,
/// Data read from a process' stderr pipe
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Response to a process finishing
ProcDone {
/// Arbitrary id associated with running process
id: ProcessId,
/// Whether or not termination was successful
success: bool,
/// Exit code associated with termination, will be missing if terminated by signal
code: Option<i32>,
},
/// Response to retrieving information about the server and the system it is on
SystemInfo(SystemInfo),
}
#[cfg(feature = "schemars")]
impl DistantResponseData {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(DistantResponseData)
}
}
impl From<io::Error> for DistantResponseData {
fn from(x: io::Error) -> Self {
Self::Error(Error::from(x))
}
}
/// Used to provide a default serde value of 1
const fn one() -> usize {
1
}

@ -1,506 +0,0 @@
use derive_more::{Deref, DerefMut, IntoIterator};
use notify::{event::Event as NotifyEvent, EventKind as NotifyEventKind};
use serde::{Deserialize, Serialize};
use std::{
collections::HashSet,
fmt,
hash::{Hash, Hasher},
iter::FromIterator,
ops::{BitOr, Sub},
path::PathBuf,
str::FromStr,
};
use strum::{EnumString, EnumVariantNames};
/// Change to one or more paths on the filesystem
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Change {
/// Label describing the kind of change
pub kind: ChangeKind,
/// Paths that were changed
pub paths: Vec<PathBuf>,
}
#[cfg(feature = "schemars")]
impl Change {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Change)
}
}
impl From<NotifyEvent> for Change {
fn from(x: NotifyEvent) -> Self {
Self {
kind: x.kind.into(),
paths: x.paths,
}
}
}
#[derive(
Copy,
Clone,
Debug,
strum::Display,
EnumString,
EnumVariantNames,
Hash,
PartialEq,
Eq,
PartialOrd,
Ord,
Serialize,
Deserialize,
)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
#[strum(serialize_all = "snake_case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "clap", clap(rename_all = "snake_case"))]
pub enum ChangeKind {
/// Something about a file or directory was accessed, but
/// no specific details were known
Access,
/// A file was closed for executing
AccessCloseExecute,
/// A file was closed for reading
AccessCloseRead,
/// A file was closed for writing
AccessCloseWrite,
/// A file was opened for executing
AccessOpenExecute,
/// A file was opened for reading
AccessOpenRead,
/// A file was opened for writing
AccessOpenWrite,
/// A file or directory was read
AccessRead,
/// The access time of a file or directory was changed
AccessTime,
/// A file, directory, or something else was created
Create,
/// The content of a file or directory changed
Content,
/// The data of a file or directory was modified, but
/// no specific details were known
Data,
/// The metadata of a file or directory was modified, but
/// no specific details were known
Metadata,
/// Something about a file or directory was modified, but
/// no specific details were known
Modify,
/// A file, directory, or something else was removed
Remove,
/// A file or directory was renamed, but no specific details were known
Rename,
/// A file or directory was renamed, and the provided paths
/// are the source and target in that order (from, to)
RenameBoth,
/// A file or directory was renamed, and the provided path
/// is the origin of the rename (before being renamed)
RenameFrom,
/// A file or directory was renamed, and the provided path
/// is the result of the rename
RenameTo,
/// A file's size changed
Size,
/// The ownership of a file or directory was changed
Ownership,
/// The permissions of a file or directory was changed
Permissions,
/// The write or modify time of a file or directory was changed
WriteTime,
// Catchall in case we have no insight as to the type of change
Unknown,
}
impl ChangeKind {
/// Returns true if the change is a kind of access
pub fn is_access_kind(&self) -> bool {
self.is_open_access_kind()
|| self.is_close_access_kind()
|| matches!(self, Self::Access | Self::AccessRead)
}
/// Returns true if the change is a kind of open access
pub fn is_open_access_kind(&self) -> bool {
matches!(
self,
Self::AccessOpenExecute | Self::AccessOpenRead | Self::AccessOpenWrite
)
}
/// Returns true if the change is a kind of close access
pub fn is_close_access_kind(&self) -> bool {
matches!(
self,
Self::AccessCloseExecute | Self::AccessCloseRead | Self::AccessCloseWrite
)
}
/// Returns true if the change is a kind of creation
pub fn is_create_kind(&self) -> bool {
matches!(self, Self::Create)
}
/// Returns true if the change is a kind of modification
pub fn is_modify_kind(&self) -> bool {
self.is_data_modify_kind() || self.is_metadata_modify_kind() || matches!(self, Self::Modify)
}
/// Returns true if the change is a kind of data modification
pub fn is_data_modify_kind(&self) -> bool {
matches!(self, Self::Content | Self::Data | Self::Size)
}
/// Returns true if the change is a kind of metadata modification
pub fn is_metadata_modify_kind(&self) -> bool {
matches!(
self,
Self::AccessTime
| Self::Metadata
| Self::Ownership
| Self::Permissions
| Self::WriteTime
)
}
/// Returns true if the change is a kind of rename
pub fn is_rename_kind(&self) -> bool {
matches!(
self,
Self::Rename | Self::RenameBoth | Self::RenameFrom | Self::RenameTo
)
}
/// Returns true if the change is a kind of removal
pub fn is_remove_kind(&self) -> bool {
matches!(self, Self::Remove)
}
/// Returns true if the change kind is unknown
pub fn is_unknown_kind(&self) -> bool {
matches!(self, Self::Unknown)
}
}
#[cfg(feature = "schemars")]
impl ChangeKind {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ChangeKind)
}
}
impl BitOr for ChangeKind {
type Output = ChangeKindSet;
fn bitor(self, rhs: Self) -> Self::Output {
let mut set = ChangeKindSet::empty();
set.insert(self);
set.insert(rhs);
set
}
}
impl From<NotifyEventKind> for ChangeKind {
fn from(x: NotifyEventKind) -> Self {
use notify::event::{
AccessKind, AccessMode, DataChange, MetadataKind, ModifyKind, RenameMode,
};
match x {
// File/directory access events
NotifyEventKind::Access(AccessKind::Read) => Self::AccessRead,
NotifyEventKind::Access(AccessKind::Open(AccessMode::Execute)) => {
Self::AccessOpenExecute
}
NotifyEventKind::Access(AccessKind::Open(AccessMode::Read)) => Self::AccessOpenRead,
NotifyEventKind::Access(AccessKind::Open(AccessMode::Write)) => Self::AccessOpenWrite,
NotifyEventKind::Access(AccessKind::Close(AccessMode::Execute)) => {
Self::AccessCloseExecute
}
NotifyEventKind::Access(AccessKind::Close(AccessMode::Read)) => Self::AccessCloseRead,
NotifyEventKind::Access(AccessKind::Close(AccessMode::Write)) => Self::AccessCloseWrite,
NotifyEventKind::Access(_) => Self::Access,
// File/directory creation events
NotifyEventKind::Create(_) => Self::Create,
// Rename-oriented events
NotifyEventKind::Modify(ModifyKind::Name(RenameMode::Both)) => Self::RenameBoth,
NotifyEventKind::Modify(ModifyKind::Name(RenameMode::From)) => Self::RenameFrom,
NotifyEventKind::Modify(ModifyKind::Name(RenameMode::To)) => Self::RenameTo,
NotifyEventKind::Modify(ModifyKind::Name(_)) => Self::Rename,
// Data-modification events
NotifyEventKind::Modify(ModifyKind::Data(DataChange::Content)) => Self::Content,
NotifyEventKind::Modify(ModifyKind::Data(DataChange::Size)) => Self::Size,
NotifyEventKind::Modify(ModifyKind::Data(_)) => Self::Data,
// Metadata-modification events
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::AccessTime)) => {
Self::AccessTime
}
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::WriteTime)) => {
Self::WriteTime
}
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::Permissions)) => {
Self::Permissions
}
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::Ownership)) => {
Self::Ownership
}
NotifyEventKind::Modify(ModifyKind::Metadata(_)) => Self::Metadata,
// General modification events
NotifyEventKind::Modify(_) => Self::Modify,
// File/directory removal events
NotifyEventKind::Remove(_) => Self::Remove,
// Catch-all for other events
NotifyEventKind::Any | NotifyEventKind::Other => Self::Unknown,
}
}
}
/// Represents a distinct set of different change kinds
#[derive(Clone, Debug, Deref, DerefMut, IntoIterator, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct ChangeKindSet(HashSet<ChangeKind>);
impl ChangeKindSet {
/// Produces an empty set of [`ChangeKind`]
pub fn empty() -> Self {
Self(HashSet::new())
}
/// Produces a set of all [`ChangeKind`]
pub fn all() -> Self {
vec![
ChangeKind::Access,
ChangeKind::AccessCloseExecute,
ChangeKind::AccessCloseRead,
ChangeKind::AccessCloseWrite,
ChangeKind::AccessOpenExecute,
ChangeKind::AccessOpenRead,
ChangeKind::AccessOpenWrite,
ChangeKind::AccessRead,
ChangeKind::AccessTime,
ChangeKind::Create,
ChangeKind::Content,
ChangeKind::Data,
ChangeKind::Metadata,
ChangeKind::Modify,
ChangeKind::Remove,
ChangeKind::Rename,
ChangeKind::RenameBoth,
ChangeKind::RenameFrom,
ChangeKind::RenameTo,
ChangeKind::Size,
ChangeKind::Ownership,
ChangeKind::Permissions,
ChangeKind::WriteTime,
ChangeKind::Unknown,
]
.into_iter()
.collect()
}
/// Produces a changeset containing all of the access kinds
pub fn access_set() -> Self {
Self::access_open_set()
| Self::access_close_set()
| ChangeKind::AccessRead
| ChangeKind::Access
}
/// Produces a changeset containing all of the open access kinds
pub fn access_open_set() -> Self {
ChangeKind::AccessOpenExecute | ChangeKind::AccessOpenRead | ChangeKind::AccessOpenWrite
}
/// Produces a changeset containing all of the close access kinds
pub fn access_close_set() -> Self {
ChangeKind::AccessCloseExecute | ChangeKind::AccessCloseRead | ChangeKind::AccessCloseWrite
}
// Produces a changeset containing all of the modification kinds
pub fn modify_set() -> Self {
Self::modify_data_set() | Self::modify_metadata_set() | ChangeKind::Modify
}
/// Produces a changeset containing all of the data modification kinds
pub fn modify_data_set() -> Self {
ChangeKind::Content | ChangeKind::Data | ChangeKind::Size
}
/// Produces a changeset containing all of the metadata modification kinds
pub fn modify_metadata_set() -> Self {
ChangeKind::AccessTime
| ChangeKind::Metadata
| ChangeKind::Ownership
| ChangeKind::Permissions
| ChangeKind::WriteTime
}
/// Produces a changeset containing all of the rename kinds
pub fn rename_set() -> Self {
ChangeKind::Rename | ChangeKind::RenameBoth | ChangeKind::RenameFrom | ChangeKind::RenameTo
}
/// Consumes set and returns a vec of the kinds of changes
pub fn into_vec(self) -> Vec<ChangeKind> {
self.0.into_iter().collect()
}
}
#[cfg(feature = "schemars")]
impl ChangeKindSet {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ChangeKindSet)
}
}
impl fmt::Display for ChangeKindSet {
/// Outputs a comma-separated series of [`ChangeKind`] as string that are sorted
/// such that this will always be consistent output
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut kinds = self
.0
.iter()
.map(ToString::to_string)
.collect::<Vec<String>>();
kinds.sort_unstable();
write!(f, "{}", kinds.join(","))
}
}
impl PartialEq for ChangeKindSet {
fn eq(&self, other: &Self) -> bool {
self.to_string() == other.to_string()
}
}
impl Eq for ChangeKindSet {}
impl Hash for ChangeKindSet {
/// Hashes based on the output of [`fmt::Display`]
fn hash<H: Hasher>(&self, state: &mut H) {
self.to_string().hash(state);
}
}
impl BitOr<ChangeKindSet> for ChangeKindSet {
type Output = Self;
fn bitor(mut self, rhs: ChangeKindSet) -> Self::Output {
self.extend(rhs.0);
self
}
}
impl BitOr<ChangeKind> for ChangeKindSet {
type Output = Self;
fn bitor(mut self, rhs: ChangeKind) -> Self::Output {
self.0.insert(rhs);
self
}
}
impl BitOr<ChangeKindSet> for ChangeKind {
type Output = ChangeKindSet;
fn bitor(self, rhs: ChangeKindSet) -> Self::Output {
rhs | self
}
}
impl Sub<ChangeKindSet> for ChangeKindSet {
type Output = Self;
fn sub(self, other: Self) -> Self::Output {
ChangeKindSet(&self.0 - &other.0)
}
}
impl Sub<&'_ ChangeKindSet> for &ChangeKindSet {
type Output = ChangeKindSet;
fn sub(self, other: &ChangeKindSet) -> Self::Output {
ChangeKindSet(&self.0 - &other.0)
}
}
impl FromStr for ChangeKindSet {
type Err = strum::ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut change_set = HashSet::new();
for word in s.split(',') {
change_set.insert(ChangeKind::from_str(word.trim())?);
}
Ok(ChangeKindSet(change_set))
}
}
impl FromIterator<ChangeKind> for ChangeKindSet {
fn from_iter<I: IntoIterator<Item = ChangeKind>>(iter: I) -> Self {
let mut change_set = HashSet::new();
for i in iter {
change_set.insert(i);
}
ChangeKindSet(change_set)
}
}
impl From<ChangeKind> for ChangeKindSet {
fn from(change_kind: ChangeKind) -> Self {
let mut set = Self::empty();
set.insert(change_kind);
set
}
}
impl From<Vec<ChangeKind>> for ChangeKindSet {
fn from(changes: Vec<ChangeKind>) -> Self {
changes.into_iter().collect()
}
}
impl Default for ChangeKindSet {
fn default() -> Self {
Self::empty()
}
}

@ -1,106 +0,0 @@
use crate::{data::Cmd, DistantMsg, DistantRequestData};
use clap::{
error::{Error, ErrorKind},
Arg, ArgAction, ArgMatches, Args, Command, FromArgMatches, Subcommand,
};
impl FromArgMatches for Cmd {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
let mut matches = matches.clone();
Self::from_arg_matches_mut(&mut matches)
}
fn from_arg_matches_mut(matches: &mut ArgMatches) -> Result<Self, Error> {
let cmd = matches.get_one::<String>("cmd").ok_or_else(|| {
Error::raw(
ErrorKind::MissingRequiredArgument,
"program must be specified",
)
})?;
let args: Vec<String> = matches
.get_many::<String>("arg")
.unwrap_or_default()
.map(ToString::to_string)
.collect();
Ok(Self::new(format!("{cmd} {}", args.join(" "))))
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
let mut matches = matches.clone();
self.update_from_arg_matches_mut(&mut matches)
}
fn update_from_arg_matches_mut(&mut self, _matches: &mut ArgMatches) -> Result<(), Error> {
Ok(())
}
}
impl Args for Cmd {
fn augment_args(cmd: Command<'_>) -> Command<'_> {
cmd.arg(
Arg::new("cmd")
.required(true)
.value_name("CMD")
.action(ArgAction::Set),
)
.trailing_var_arg(true)
.arg(
Arg::new("arg")
.value_name("ARGS")
.multiple_values(true)
.action(ArgAction::Append),
)
}
fn augment_args_for_update(cmd: Command<'_>) -> Command<'_> {
cmd
}
}
impl FromArgMatches for DistantMsg<DistantRequestData> {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
match matches.subcommand() {
Some(("single", args)) => Ok(Self::Single(DistantRequestData::from_arg_matches(args)?)),
Some((_, _)) => Err(Error::raw(
ErrorKind::UnrecognizedSubcommand,
"Valid subcommand is `single`",
)),
None => Err(Error::raw(
ErrorKind::MissingSubcommand,
"Valid subcommand is `single`",
)),
}
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
match matches.subcommand() {
Some(("single", args)) => {
*self = Self::Single(DistantRequestData::from_arg_matches(args)?)
}
Some((_, _)) => {
return Err(Error::raw(
ErrorKind::UnrecognizedSubcommand,
"Valid subcommand is `single`",
))
}
None => (),
};
Ok(())
}
}
impl Subcommand for DistantMsg<DistantRequestData> {
fn augment_subcommands(cmd: Command<'_>) -> Command<'_> {
cmd.subcommand(DistantRequestData::augment_subcommands(Command::new(
"single",
)))
.subcommand_required(true)
}
fn augment_subcommands_for_update(cmd: Command<'_>) -> Command<'_> {
cmd.subcommand(DistantRequestData::augment_subcommands(Command::new(
"single",
)))
.subcommand_required(true)
}
fn has_subcommand(name: &str) -> bool {
matches!(name, "single")
}
}

@ -1,52 +0,0 @@
use derive_more::{Display, From, Into};
use serde::{Deserialize, Serialize};
use std::ops::{Deref, DerefMut};
/// Represents some command with arguments to execute
#[derive(Clone, Debug, Display, From, Into, Hash, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Cmd(String);
impl Cmd {
/// Creates a new command from the given `cmd`
pub fn new(cmd: impl Into<String>) -> Self {
Self(cmd.into())
}
/// Returns reference to the program portion of the command
pub fn program(&self) -> &str {
match self.0.split_once(' ') {
Some((program, _)) => program.trim(),
None => self.0.trim(),
}
}
/// Returns reference to the arguments portion of the command
pub fn arguments(&self) -> &str {
match self.0.split_once(' ') {
Some((_, arguments)) => arguments.trim(),
None => "",
}
}
}
#[cfg(feature = "schemars")]
impl Cmd {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Cmd)
}
}
impl Deref for Cmd {
type Target = String;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Cmd {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

@ -1,45 +0,0 @@
use derive_more::IsVariant;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use strum::AsRefStr;
/// Represents information about a single entry within a directory
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct DirEntry {
/// Represents the full path to the entry
pub path: PathBuf,
/// Represents the type of the entry as a file/dir/symlink
pub file_type: FileType,
/// Depth at which this entry was created relative to the root (0 being immediately within
/// root)
pub depth: usize,
}
#[cfg(feature = "schemars")]
impl DirEntry {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(DirEntry)
}
}
/// Represents the type associated with a dir entry
#[derive(Copy, Clone, Debug, PartialEq, Eq, AsRefStr, IsVariant, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
#[strum(serialize_all = "snake_case")]
pub enum FileType {
Dir,
File,
Symlink,
}
#[cfg(feature = "schemars")]
impl FileType {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(FileType)
}
}

@ -1,404 +0,0 @@
use super::{deserialize_u128_option, serialize_u128_option, FileType};
use bitflags::bitflags;
use serde::{Deserialize, Serialize};
use std::{
io,
path::{Path, PathBuf},
time::SystemTime,
};
/// Represents metadata about some path on a remote machine
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Metadata {
/// Canonicalized path to the file or directory, resolving symlinks, only included
/// if flagged during the request
pub canonicalized_path: Option<PathBuf>,
/// Represents the type of the entry as a file/dir/symlink
pub file_type: FileType,
/// Size of the file/directory/symlink in bytes
pub len: u64,
/// Whether or not the file/directory/symlink is marked as unwriteable
pub readonly: bool,
/// Represents the last time (in milliseconds) when the file/directory/symlink was accessed;
/// can be optional as certain systems don't support this
#[serde(serialize_with = "serialize_u128_option")]
#[serde(deserialize_with = "deserialize_u128_option")]
pub accessed: Option<u128>,
/// Represents when (in milliseconds) the file/directory/symlink was created;
/// can be optional as certain systems don't support this
#[serde(serialize_with = "serialize_u128_option")]
#[serde(deserialize_with = "deserialize_u128_option")]
pub created: Option<u128>,
/// Represents the last time (in milliseconds) when the file/directory/symlink was modified;
/// can be optional as certain systems don't support this
#[serde(serialize_with = "serialize_u128_option")]
#[serde(deserialize_with = "deserialize_u128_option")]
pub modified: Option<u128>,
/// Represents metadata that is specific to a unix remote machine
pub unix: Option<UnixMetadata>,
/// Represents metadata that is specific to a windows remote machine
pub windows: Option<WindowsMetadata>,
}
impl Metadata {
pub async fn read(
path: impl AsRef<Path>,
canonicalize: bool,
resolve_file_type: bool,
) -> io::Result<Self> {
let metadata = tokio::fs::symlink_metadata(path.as_ref()).await?;
let canonicalized_path = if canonicalize {
Some(tokio::fs::canonicalize(path.as_ref()).await?)
} else {
None
};
// If asking for resolved file type and current type is symlink, then we want to refresh
// our metadata to get the filetype for the resolved link
let file_type = if resolve_file_type && metadata.file_type().is_symlink() {
tokio::fs::metadata(path).await?.file_type()
} else {
metadata.file_type()
};
Ok(Self {
canonicalized_path,
accessed: metadata
.accessed()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
created: metadata
.created()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
modified: metadata
.modified()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
len: metadata.len(),
readonly: metadata.permissions().readonly(),
file_type: if file_type.is_dir() {
FileType::Dir
} else if file_type.is_file() {
FileType::File
} else {
FileType::Symlink
},
#[cfg(unix)]
unix: Some({
use std::os::unix::prelude::*;
let mode = metadata.mode();
crate::data::UnixMetadata::from(mode)
}),
#[cfg(not(unix))]
unix: None,
#[cfg(windows)]
windows: Some({
use std::os::windows::prelude::*;
let attributes = metadata.file_attributes();
crate::data::WindowsMetadata::from(attributes)
}),
#[cfg(not(windows))]
windows: None,
})
}
}
#[cfg(feature = "schemars")]
impl Metadata {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Metadata)
}
}
/// Represents unix-specific metadata about some path on a remote machine
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct UnixMetadata {
/// Represents whether or not owner can read from the file
pub owner_read: bool,
/// Represents whether or not owner can write to the file
pub owner_write: bool,
/// Represents whether or not owner can execute the file
pub owner_exec: bool,
/// Represents whether or not associated group can read from the file
pub group_read: bool,
/// Represents whether or not associated group can write to the file
pub group_write: bool,
/// Represents whether or not associated group can execute the file
pub group_exec: bool,
/// Represents whether or not other can read from the file
pub other_read: bool,
/// Represents whether or not other can write to the file
pub other_write: bool,
/// Represents whether or not other can execute the file
pub other_exec: bool,
}
#[cfg(feature = "schemars")]
impl UnixMetadata {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(UnixMetadata)
}
}
impl From<u32> for UnixMetadata {
/// Create from a unix mode bitset
fn from(mode: u32) -> Self {
let flags = UnixFilePermissionFlags::from_bits_truncate(mode);
Self {
owner_read: flags.contains(UnixFilePermissionFlags::OWNER_READ),
owner_write: flags.contains(UnixFilePermissionFlags::OWNER_WRITE),
owner_exec: flags.contains(UnixFilePermissionFlags::OWNER_EXEC),
group_read: flags.contains(UnixFilePermissionFlags::GROUP_READ),
group_write: flags.contains(UnixFilePermissionFlags::GROUP_WRITE),
group_exec: flags.contains(UnixFilePermissionFlags::GROUP_EXEC),
other_read: flags.contains(UnixFilePermissionFlags::OTHER_READ),
other_write: flags.contains(UnixFilePermissionFlags::OTHER_WRITE),
other_exec: flags.contains(UnixFilePermissionFlags::OTHER_EXEC),
}
}
}
impl From<UnixMetadata> for u32 {
/// Convert to a unix mode bitset
fn from(metadata: UnixMetadata) -> Self {
let mut flags = UnixFilePermissionFlags::empty();
if metadata.owner_read {
flags.insert(UnixFilePermissionFlags::OWNER_READ);
}
if metadata.owner_write {
flags.insert(UnixFilePermissionFlags::OWNER_WRITE);
}
if metadata.owner_exec {
flags.insert(UnixFilePermissionFlags::OWNER_EXEC);
}
if metadata.group_read {
flags.insert(UnixFilePermissionFlags::GROUP_READ);
}
if metadata.group_write {
flags.insert(UnixFilePermissionFlags::GROUP_WRITE);
}
if metadata.group_exec {
flags.insert(UnixFilePermissionFlags::GROUP_EXEC);
}
if metadata.other_read {
flags.insert(UnixFilePermissionFlags::OTHER_READ);
}
if metadata.other_write {
flags.insert(UnixFilePermissionFlags::OTHER_WRITE);
}
if metadata.other_exec {
flags.insert(UnixFilePermissionFlags::OTHER_EXEC);
}
flags.bits
}
}
impl UnixMetadata {
pub fn is_readonly(self) -> bool {
!(self.owner_read || self.group_read || self.other_read)
}
}
bitflags! {
struct UnixFilePermissionFlags: u32 {
const OWNER_READ = 0o400;
const OWNER_WRITE = 0o200;
const OWNER_EXEC = 0o100;
const GROUP_READ = 0o40;
const GROUP_WRITE = 0o20;
const GROUP_EXEC = 0o10;
const OTHER_READ = 0o4;
const OTHER_WRITE = 0o2;
const OTHER_EXEC = 0o1;
}
}
/// Represents windows-specific metadata about some path on a remote machine
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct WindowsMetadata {
/// Represents whether or not a file or directory is an archive
pub archive: bool,
/// Represents whether or not a file or directory is compressed
pub compressed: bool,
/// Represents whether or not the file or directory is encrypted
pub encrypted: bool,
/// Represents whether or not a file or directory is hidden
pub hidden: bool,
/// Represents whether or not a directory or user data stream is configured with integrity
pub integrity_stream: bool,
/// Represents whether or not a file does not have other attributes set
pub normal: bool,
/// Represents whether or not a file or directory is not to be indexed by content indexing
/// service
pub not_content_indexed: bool,
/// Represents whether or not a user data stream is not to be read by the background data
/// integrity scanner
pub no_scrub_data: bool,
/// Represents whether or not the data of a file is not available immediately
pub offline: bool,
/// Represents whether or not a file or directory is not fully present locally
pub recall_on_data_access: bool,
/// Represents whether or not a file or directory has no physical representation on the local
/// system (is virtual)
pub recall_on_open: bool,
/// Represents whether or not a file or directory has an associated reparse point, or a file is
/// a symbolic link
pub reparse_point: bool,
/// Represents whether or not a file is a sparse file
pub sparse_file: bool,
/// Represents whether or not a file or directory is used partially or exclusively by the
/// operating system
pub system: bool,
/// Represents whether or not a file is being used for temporary storage
pub temporary: bool,
}
#[cfg(feature = "schemars")]
impl WindowsMetadata {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(WindowsMetadata)
}
}
impl From<u32> for WindowsMetadata {
/// Create from a windows file attribute bitset
fn from(file_attributes: u32) -> Self {
let flags = WindowsFileAttributeFlags::from_bits_truncate(file_attributes);
Self {
archive: flags.contains(WindowsFileAttributeFlags::ARCHIVE),
compressed: flags.contains(WindowsFileAttributeFlags::COMPRESSED),
encrypted: flags.contains(WindowsFileAttributeFlags::ENCRYPTED),
hidden: flags.contains(WindowsFileAttributeFlags::HIDDEN),
integrity_stream: flags.contains(WindowsFileAttributeFlags::INTEGRITY_SYSTEM),
normal: flags.contains(WindowsFileAttributeFlags::NORMAL),
not_content_indexed: flags.contains(WindowsFileAttributeFlags::NOT_CONTENT_INDEXED),
no_scrub_data: flags.contains(WindowsFileAttributeFlags::NO_SCRUB_DATA),
offline: flags.contains(WindowsFileAttributeFlags::OFFLINE),
recall_on_data_access: flags.contains(WindowsFileAttributeFlags::RECALL_ON_DATA_ACCESS),
recall_on_open: flags.contains(WindowsFileAttributeFlags::RECALL_ON_OPEN),
reparse_point: flags.contains(WindowsFileAttributeFlags::REPARSE_POINT),
sparse_file: flags.contains(WindowsFileAttributeFlags::SPARSE_FILE),
system: flags.contains(WindowsFileAttributeFlags::SYSTEM),
temporary: flags.contains(WindowsFileAttributeFlags::TEMPORARY),
}
}
}
impl From<WindowsMetadata> for u32 {
/// Convert to a windows file attribute bitset
fn from(metadata: WindowsMetadata) -> Self {
let mut flags = WindowsFileAttributeFlags::empty();
if metadata.archive {
flags.insert(WindowsFileAttributeFlags::ARCHIVE);
}
if metadata.compressed {
flags.insert(WindowsFileAttributeFlags::COMPRESSED);
}
if metadata.encrypted {
flags.insert(WindowsFileAttributeFlags::ENCRYPTED);
}
if metadata.hidden {
flags.insert(WindowsFileAttributeFlags::HIDDEN);
}
if metadata.integrity_stream {
flags.insert(WindowsFileAttributeFlags::INTEGRITY_SYSTEM);
}
if metadata.normal {
flags.insert(WindowsFileAttributeFlags::NORMAL);
}
if metadata.not_content_indexed {
flags.insert(WindowsFileAttributeFlags::NOT_CONTENT_INDEXED);
}
if metadata.no_scrub_data {
flags.insert(WindowsFileAttributeFlags::NO_SCRUB_DATA);
}
if metadata.offline {
flags.insert(WindowsFileAttributeFlags::OFFLINE);
}
if metadata.recall_on_data_access {
flags.insert(WindowsFileAttributeFlags::RECALL_ON_DATA_ACCESS);
}
if metadata.recall_on_open {
flags.insert(WindowsFileAttributeFlags::RECALL_ON_OPEN);
}
if metadata.reparse_point {
flags.insert(WindowsFileAttributeFlags::REPARSE_POINT);
}
if metadata.sparse_file {
flags.insert(WindowsFileAttributeFlags::SPARSE_FILE);
}
if metadata.system {
flags.insert(WindowsFileAttributeFlags::SYSTEM);
}
if metadata.temporary {
flags.insert(WindowsFileAttributeFlags::TEMPORARY);
}
flags.bits
}
}
bitflags! {
struct WindowsFileAttributeFlags: u32 {
const ARCHIVE = 0x20;
const COMPRESSED = 0x800;
const ENCRYPTED = 0x4000;
const HIDDEN = 0x2;
const INTEGRITY_SYSTEM = 0x8000;
const NORMAL = 0x80;
const NOT_CONTENT_INDEXED = 0x2000;
const NO_SCRUB_DATA = 0x20000;
const OFFLINE = 0x1000;
const RECALL_ON_DATA_ACCESS = 0x400000;
const RECALL_ON_OPEN = 0x40000;
const REPARSE_POINT = 0x400;
const SPARSE_FILE = 0x200;
const SYSTEM = 0x4;
const TEMPORARY = 0x100;
const VIRTUAL = 0x10000;
}
}

@ -1,137 +0,0 @@
use derive_more::{Display, Error};
use portable_pty::PtySize as PortablePtySize;
use serde::{Deserialize, Serialize};
use std::{fmt, num::ParseIntError, str::FromStr};
/// Represents the size associated with a remote PTY
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct PtySize {
/// Number of lines of text
pub rows: u16,
/// Number of columns of text
pub cols: u16,
/// Width of a cell in pixels. Note that some systems never fill this value and ignore it.
#[serde(default)]
pub pixel_width: u16,
/// Height of a cell in pixels. Note that some systems never fill this value and ignore it.
#[serde(default)]
pub pixel_height: u16,
}
impl PtySize {
/// Creates new size using just rows and columns
pub fn from_rows_and_cols(rows: u16, cols: u16) -> Self {
Self {
rows,
cols,
..Default::default()
}
}
}
#[cfg(feature = "schemars")]
impl PtySize {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(PtySize)
}
}
impl From<PortablePtySize> for PtySize {
fn from(size: PortablePtySize) -> Self {
Self {
rows: size.rows,
cols: size.cols,
pixel_width: size.pixel_width,
pixel_height: size.pixel_height,
}
}
}
impl From<PtySize> for PortablePtySize {
fn from(size: PtySize) -> Self {
Self {
rows: size.rows,
cols: size.cols,
pixel_width: size.pixel_width,
pixel_height: size.pixel_height,
}
}
}
impl fmt::Display for PtySize {
/// Prints out `rows,cols[,pixel_width,pixel_height]` where the
/// pixel width and pixel height are only included if either
/// one of them is not zero
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{},{}", self.rows, self.cols)?;
if self.pixel_width > 0 || self.pixel_height > 0 {
write!(f, ",{},{}", self.pixel_width, self.pixel_height)?;
}
Ok(())
}
}
impl Default for PtySize {
fn default() -> Self {
PtySize {
rows: 24,
cols: 80,
pixel_width: 0,
pixel_height: 0,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Display, Error)]
pub enum PtySizeParseError {
MissingRows,
MissingColumns,
InvalidRows(ParseIntError),
InvalidColumns(ParseIntError),
InvalidPixelWidth(ParseIntError),
InvalidPixelHeight(ParseIntError),
}
impl FromStr for PtySize {
type Err = PtySizeParseError;
/// Attempts to parse a str into PtySize using one of the following formats:
///
/// * rows,cols (defaults to 0 for pixel_width & pixel_height)
/// * rows,cols,pixel_width,pixel_height
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut tokens = s.split(',');
Ok(Self {
rows: tokens
.next()
.ok_or(PtySizeParseError::MissingRows)?
.trim()
.parse()
.map_err(PtySizeParseError::InvalidRows)?,
cols: tokens
.next()
.ok_or(PtySizeParseError::MissingColumns)?
.trim()
.parse()
.map_err(PtySizeParseError::InvalidColumns)?,
pixel_width: tokens
.next()
.map(|s| s.trim().parse())
.transpose()
.map_err(PtySizeParseError::InvalidPixelWidth)?
.unwrap_or(0),
pixel_height: tokens
.next()
.map(|s| s.trim().parse())
.transpose()
.map_err(PtySizeParseError::InvalidPixelHeight)?
.unwrap_or(0),
})
}
}

@ -1,45 +0,0 @@
use serde::{Deserialize, Serialize};
use std::{env, path::PathBuf};
/// Represents information about a system
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SystemInfo {
/// Family of the operating system as described in
/// https://doc.rust-lang.org/std/env/consts/constant.FAMILY.html
pub family: String,
/// Name of the specific operating system as described in
/// https://doc.rust-lang.org/std/env/consts/constant.OS.html
pub os: String,
/// Architecture of the CPI as described in
/// https://doc.rust-lang.org/std/env/consts/constant.ARCH.html
pub arch: String,
/// Current working directory of the running server process
pub current_dir: PathBuf,
/// Primary separator for path components for the current platform
/// as defined in https://doc.rust-lang.org/std/path/constant.MAIN_SEPARATOR.html
pub main_separator: char,
}
#[cfg(feature = "schemars")]
impl SystemInfo {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SystemInfo)
}
}
impl Default for SystemInfo {
fn default() -> Self {
Self {
family: env::consts::FAMILY.to_string(),
os: env::consts::OS.to_string(),
arch: env::consts::ARCH.to_string(),
current_dir: env::current_dir().unwrap_or_default(),
main_separator: std::path::MAIN_SEPARATOR,
}
}
}

@ -1,27 +0,0 @@
use serde::{Deserialize, Serialize};
pub(crate) fn deserialize_u128_option<'de, D>(deserializer: D) -> Result<Option<u128>, D::Error>
where
D: serde::Deserializer<'de>,
{
match Option::<String>::deserialize(deserializer)? {
Some(s) => match s.parse::<u128>() {
Ok(value) => Ok(Some(value)),
Err(error) => Err(serde::de::Error::custom(format!(
"Cannot convert to u128 with error: {:?}",
error
))),
},
None => Ok(None),
}
}
pub(crate) fn serialize_u128_option<S: serde::Serializer>(
val: &Option<u128>,
s: S,
) -> Result<S::Ok, S::Error> {
match val {
Some(v) => format!("{}", *v).serialize(s),
None => s.serialize_unit(),
}
}

@ -1,3 +1,9 @@
#![doc = include_str!("../README.md")]
#[doc = include_str!("../README.md")]
#[cfg(doctest)]
pub struct ReadmeDoctests;
mod api;
pub use api::*;
@ -7,14 +13,10 @@ pub use client::*;
mod credentials;
pub use credentials::*;
pub mod data;
pub use data::{DistantMsg, DistantRequestData, DistantResponseData, Map};
mod manager;
pub use manager::*;
mod constants;
mod serde_str;
/// Re-export of `distant-net` as `net`
/// Network functionality.
pub use distant_net as net;
/// Protocol structures.
pub use distant_protocol as protocol;

@ -1,7 +0,0 @@
mod client;
mod data;
mod server;
pub use client::*;
pub use data::*;
pub use server::*;

@ -1,761 +0,0 @@
use super::data::{
ConnectionId, ConnectionInfo, ConnectionList, Destination, Extra, ManagerRequest,
ManagerResponse,
};
use crate::{DistantChannel, DistantClient, DistantMsg, DistantRequestData, DistantResponseData};
use distant_net::{
router, Auth, AuthServer, Client, IntoSplit, MpscTransport, OneshotListener, Request, Response,
ServerExt, ServerRef, UntypedTransportRead, UntypedTransportWrite,
};
use log::*;
use std::{
collections::HashMap,
io,
ops::{Deref, DerefMut},
};
use tokio::task::JoinHandle;
mod config;
pub use config::*;
mod ext;
pub use ext::*;
router!(DistantManagerClientRouter {
auth_transport: Request<Auth> => Response<Auth>,
manager_transport: Response<ManagerResponse> => Request<ManagerRequest>,
});
/// Represents a client that can connect to a remote distant manager
pub struct DistantManagerClient {
auth: Box<dyn ServerRef>,
client: Client<ManagerRequest, ManagerResponse>,
distant_clients: HashMap<ConnectionId, ClientHandle>,
}
impl Drop for DistantManagerClient {
fn drop(&mut self) {
self.auth.abort();
self.client.abort();
}
}
/// Represents a raw channel between a manager client and some remote server
pub struct RawDistantChannel {
pub transport: MpscTransport<
Request<DistantMsg<DistantRequestData>>,
Response<DistantMsg<DistantResponseData>>,
>,
forward_task: JoinHandle<()>,
mailbox_task: JoinHandle<()>,
}
impl RawDistantChannel {
pub fn abort(&self) {
self.forward_task.abort();
self.mailbox_task.abort();
}
}
impl Deref for RawDistantChannel {
type Target = MpscTransport<
Request<DistantMsg<DistantRequestData>>,
Response<DistantMsg<DistantResponseData>>,
>;
fn deref(&self) -> &Self::Target {
&self.transport
}
}
impl DerefMut for RawDistantChannel {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.transport
}
}
struct ClientHandle {
client: DistantClient,
forward_task: JoinHandle<()>,
mailbox_task: JoinHandle<()>,
}
impl Drop for ClientHandle {
fn drop(&mut self) {
self.forward_task.abort();
self.mailbox_task.abort();
}
}
impl DistantManagerClient {
/// Initializes a client using the provided [`UntypedTransport`]
pub fn new<T>(config: DistantManagerClientConfig, transport: T) -> io::Result<Self>
where
T: IntoSplit + 'static,
T::Read: UntypedTransportRead + 'static,
T::Write: UntypedTransportWrite + 'static,
{
let DistantManagerClientRouter {
auth_transport,
manager_transport,
..
} = DistantManagerClientRouter::new(transport);
// Initialize our client with manager request/response transport
let (writer, reader) = manager_transport.into_split();
let client = Client::new(writer, reader)?;
// Initialize our auth handler with auth/auth transport
let auth = AuthServer {
on_challenge: config.on_challenge,
on_verify: config.on_verify,
on_info: config.on_info,
on_error: config.on_error,
}
.start(OneshotListener::from_value(auth_transport.into_split()))?;
Ok(Self {
auth,
client,
distant_clients: HashMap::new(),
})
}
/// Request that the manager launches a new server at the given `destination`
/// with `extra` being passed for destination-specific details, returning the new
/// `destination` of the spawned server to connect to
pub async fn launch(
&mut self,
destination: impl Into<Destination>,
extra: impl Into<Extra>,
) -> io::Result<Destination> {
let destination = Box::new(destination.into());
let extra = extra.into();
trace!("launch({}, {})", destination, extra);
let res = self
.client
.send(ManagerRequest::Launch { destination, extra })
.await?;
match res.payload {
ManagerResponse::Launched { destination } => Ok(destination),
ManagerResponse::Error(x) => Err(x.into()),
x => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Got unexpected response: {:?}", x),
)),
}
}
/// Request that the manager establishes a new connection at the given `destination`
/// with `extra` being passed for destination-specific details
pub async fn connect(
&mut self,
destination: impl Into<Destination>,
extra: impl Into<Extra>,
) -> io::Result<ConnectionId> {
let destination = Box::new(destination.into());
let extra = extra.into();
trace!("connect({}, {})", destination, extra);
let res = self
.client
.send(ManagerRequest::Connect { destination, extra })
.await?;
match res.payload {
ManagerResponse::Connected { id } => Ok(id),
ManagerResponse::Error(x) => Err(x.into()),
x => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Got unexpected response: {:?}", x),
)),
}
}
/// Establishes a channel with the server represented by the `connection_id`,
/// returning a [`DistantChannel`] acting as the connection
///
/// ### Note
///
/// Multiple calls to open a channel against the same connection will result in
/// clones of the same [`DistantChannel`] rather than establishing a duplicate
/// remote connection to the same server
pub async fn open_channel(
&mut self,
connection_id: ConnectionId,
) -> io::Result<DistantChannel> {
trace!("open_channel({})", connection_id);
if let Some(handle) = self.distant_clients.get(&connection_id) {
Ok(handle.client.clone_channel())
} else {
let RawDistantChannel {
transport,
forward_task,
mailbox_task,
} = self.open_raw_channel(connection_id).await?;
let (writer, reader) = transport.into_split();
let client = DistantClient::new(writer, reader)?;
let channel = client.clone_channel();
self.distant_clients.insert(
connection_id,
ClientHandle {
client,
forward_task,
mailbox_task,
},
);
Ok(channel)
}
}
/// Establishes a channel with the server represented by the `connection_id`,
/// returning a [`Transport`] acting as the connection
///
/// ### Note
///
/// Multiple calls to open a channel against the same connection will result in establishing a
/// duplicate remote connections to the same server, so take care when using this method
pub async fn open_raw_channel(
&mut self,
connection_id: ConnectionId,
) -> io::Result<RawDistantChannel> {
trace!("open_raw_channel({})", connection_id);
let mut mailbox = self
.client
.mail(ManagerRequest::OpenChannel { id: connection_id })
.await?;
// Wait for the first response, which should be channel confirmation
let channel_id = match mailbox.next().await {
Some(response) => match response.payload {
ManagerResponse::ChannelOpened { id } => Ok(id),
ManagerResponse::Error(x) => Err(x.into()),
x => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Got unexpected response: {:?}", x),
)),
},
None => Err(io::Error::new(
io::ErrorKind::ConnectionAborted,
"open_channel mailbox aborted",
)),
}?;
// Spawn reader and writer tasks to forward requests and replies
// using our opened channel
let (t1, t2) = MpscTransport::pair(1);
let (mut writer, mut reader) = t1.into_split();
let mailbox_task = tokio::spawn(async move {
use distant_net::TypedAsyncWrite;
while let Some(response) = mailbox.next().await {
match response.payload {
ManagerResponse::Channel { response, .. } => {
if let Err(x) = writer.write(response).await {
error!("[Conn {}] {}", connection_id, x);
}
}
ManagerResponse::ChannelClosed { .. } => break,
_ => continue,
}
}
});
let mut manager_channel = self.client.clone_channel();
let forward_task = tokio::spawn(async move {
use distant_net::TypedAsyncRead;
loop {
match reader.read().await {
Ok(Some(request)) => {
// NOTE: In this situation, we do not expect a response to this
// request (even if the server sends something back)
if let Err(x) = manager_channel
.fire(ManagerRequest::Channel {
id: channel_id,
request,
})
.await
{
error!("[Conn {}] {}", connection_id, x);
}
}
Ok(None) => break,
Err(x) => {
error!("[Conn {}] {}", connection_id, x);
continue;
}
}
}
});
Ok(RawDistantChannel {
transport: t2,
forward_task,
mailbox_task,
})
}
/// Retrieves information about a specific connection
pub async fn info(&mut self, id: ConnectionId) -> io::Result<ConnectionInfo> {
trace!("info({})", id);
let res = self.client.send(ManagerRequest::Info { id }).await?;
match res.payload {
ManagerResponse::Info(info) => Ok(info),
ManagerResponse::Error(x) => Err(x.into()),
x => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Got unexpected response: {:?}", x),
)),
}
}
/// Kills the specified connection
pub async fn kill(&mut self, id: ConnectionId) -> io::Result<()> {
trace!("kill({})", id);
let res = self.client.send(ManagerRequest::Kill { id }).await?;
match res.payload {
ManagerResponse::Killed => Ok(()),
ManagerResponse::Error(x) => Err(x.into()),
x => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Got unexpected response: {:?}", x),
)),
}
}
/// Retrieves a list of active connections
pub async fn list(&mut self) -> io::Result<ConnectionList> {
trace!("list()");
let res = self.client.send(ManagerRequest::List).await?;
match res.payload {
ManagerResponse::List(list) => Ok(list),
ManagerResponse::Error(x) => Err(x.into()),
x => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Got unexpected response: {:?}", x),
)),
}
}
/// Requests that the manager shuts down
pub async fn shutdown(&mut self) -> io::Result<()> {
trace!("shutdown()");
let res = self.client.send(ManagerRequest::Shutdown).await?;
match res.payload {
ManagerResponse::Shutdown => Ok(()),
ManagerResponse::Error(x) => Err(x.into()),
x => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Got unexpected response: {:?}", x),
)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::data::{Error, ErrorKind};
use distant_net::{
FramedTransport, InmemoryTransport, PlainCodec, UntypedTransportRead, UntypedTransportWrite,
};
fn setup() -> (
DistantManagerClient,
FramedTransport<InmemoryTransport, PlainCodec>,
) {
let (t1, t2) = FramedTransport::pair(100);
let client =
DistantManagerClient::new(DistantManagerClientConfig::with_empty_prompts(), t1)
.unwrap();
(client, t2)
}
#[inline]
fn test_error() -> Error {
Error {
kind: ErrorKind::Interrupted,
description: "test error".to_string(),
}
}
#[inline]
fn test_io_error() -> io::Error {
test_error().into()
}
#[tokio::test]
async fn connect_should_report_error_if_receives_error_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(
request.id,
ManagerResponse::Error(test_error()),
))
.await
.unwrap();
});
let err = client
.connect(
"scheme://host".parse::<Destination>().unwrap(),
"key=value".parse::<Extra>().unwrap(),
)
.await
.unwrap_err();
assert_eq!(err.kind(), test_io_error().kind());
assert_eq!(err.to_string(), test_io_error().to_string());
}
#[tokio::test]
async fn connect_should_report_error_if_receives_unexpected_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(request.id, ManagerResponse::Shutdown))
.await
.unwrap();
});
let err = client
.connect(
"scheme://host".parse::<Destination>().unwrap(),
"key=value".parse::<Extra>().unwrap(),
)
.await
.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
}
#[tokio::test]
async fn connect_should_return_id_from_successful_response() {
let (mut client, mut transport) = setup();
let expected_id = 999;
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(
request.id,
ManagerResponse::Connected { id: expected_id },
))
.await
.unwrap();
});
let id = client
.connect(
"scheme://host".parse::<Destination>().unwrap(),
"key=value".parse::<Extra>().unwrap(),
)
.await
.unwrap();
assert_eq!(id, expected_id);
}
#[tokio::test]
async fn info_should_report_error_if_receives_error_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(
request.id,
ManagerResponse::Error(test_error()),
))
.await
.unwrap();
});
let err = client.info(123).await.unwrap_err();
assert_eq!(err.kind(), test_io_error().kind());
assert_eq!(err.to_string(), test_io_error().to_string());
}
#[tokio::test]
async fn info_should_report_error_if_receives_unexpected_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(request.id, ManagerResponse::Shutdown))
.await
.unwrap();
});
let err = client.info(123).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
}
#[tokio::test]
async fn info_should_return_connection_info_from_successful_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
let info = ConnectionInfo {
id: 123,
destination: "scheme://host".parse::<Destination>().unwrap(),
extra: "key=value".parse::<Extra>().unwrap(),
};
transport
.write(Response::new(request.id, ManagerResponse::Info(info)))
.await
.unwrap();
});
let info = client.info(123).await.unwrap();
assert_eq!(info.id, 123);
assert_eq!(
info.destination,
"scheme://host".parse::<Destination>().unwrap()
);
assert_eq!(info.extra, "key=value".parse::<Extra>().unwrap());
}
#[tokio::test]
async fn list_should_report_error_if_receives_error_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(
request.id,
ManagerResponse::Error(test_error()),
))
.await
.unwrap();
});
let err = client.list().await.unwrap_err();
assert_eq!(err.kind(), test_io_error().kind());
assert_eq!(err.to_string(), test_io_error().to_string());
}
#[tokio::test]
async fn list_should_report_error_if_receives_unexpected_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(request.id, ManagerResponse::Shutdown))
.await
.unwrap();
});
let err = client.list().await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
}
#[tokio::test]
async fn list_should_return_connection_list_from_successful_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
let mut list = ConnectionList::new();
list.insert(123, "scheme://host".parse::<Destination>().unwrap());
transport
.write(Response::new(request.id, ManagerResponse::List(list)))
.await
.unwrap();
});
let list = client.list().await.unwrap();
assert_eq!(list.len(), 1);
assert_eq!(
list.get(&123).expect("Connection list missing item"),
&"scheme://host".parse::<Destination>().unwrap()
);
}
#[tokio::test]
async fn kill_should_report_error_if_receives_error_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(
request.id,
ManagerResponse::Error(test_error()),
))
.await
.unwrap();
});
let err = client.kill(123).await.unwrap_err();
assert_eq!(err.kind(), test_io_error().kind());
assert_eq!(err.to_string(), test_io_error().to_string());
}
#[tokio::test]
async fn kill_should_report_error_if_receives_unexpected_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(request.id, ManagerResponse::Shutdown))
.await
.unwrap();
});
let err = client.kill(123).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
}
#[tokio::test]
async fn kill_should_return_success_from_successful_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(request.id, ManagerResponse::Killed))
.await
.unwrap();
});
client.kill(123).await.unwrap();
}
#[tokio::test]
async fn shutdown_should_report_error_if_receives_error_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(
request.id,
ManagerResponse::Connected { id: 0 },
))
.await
.unwrap();
});
let err = client.shutdown().await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
}
#[tokio::test]
async fn shutdown_should_report_error_if_receives_unexpected_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(
request.id,
ManagerResponse::Error(test_error()),
))
.await
.unwrap();
});
let err = client.shutdown().await.unwrap_err();
assert_eq!(err.kind(), test_io_error().kind());
assert_eq!(err.to_string(), test_io_error().to_string());
}
#[tokio::test]
async fn shutdown_should_return_success_from_successful_response() {
let (mut client, mut transport) = setup();
tokio::spawn(async move {
let request = transport
.read::<Request<ManagerRequest>>()
.await
.unwrap()
.unwrap();
transport
.write(Response::new(request.id, ManagerResponse::Shutdown))
.await
.unwrap();
});
client.shutdown().await.unwrap();
}
}

@ -1,85 +0,0 @@
use distant_net::{AuthChallengeFn, AuthErrorFn, AuthInfoFn, AuthVerifyFn, AuthVerifyKind};
use log::*;
use std::io;
/// Configuration to use when creating a new [`DistantManagerClient`](super::DistantManagerClient)
pub struct DistantManagerClientConfig {
pub on_challenge: Box<AuthChallengeFn>,
pub on_verify: Box<AuthVerifyFn>,
pub on_info: Box<AuthInfoFn>,
pub on_error: Box<AuthErrorFn>,
}
impl DistantManagerClientConfig {
/// Creates a new config with prompts that return empty strings
pub fn with_empty_prompts() -> Self {
Self::with_prompts(|_| Ok("".to_string()), |_| Ok("".to_string()))
}
/// Creates a new config with two prompts
///
/// * `password_prompt` - used for prompting for a secret, and should not display what is typed
/// * `text_prompt` - used for general text, and is okay to display what is typed
pub fn with_prompts<PP, PT>(password_prompt: PP, text_prompt: PT) -> Self
where
PP: Fn(&str) -> io::Result<String> + Send + Sync + 'static,
PT: Fn(&str) -> io::Result<String> + Send + Sync + 'static,
{
Self {
on_challenge: Box::new(move |questions, _extra| {
trace!("[manager client] on_challenge({questions:?}, {_extra:?})");
let mut answers = Vec::new();
for question in questions.iter() {
// Contains all prompt lines including same line
let mut lines = question.text.split('\n').collect::<Vec<_>>();
// Line that is prompt on same line as answer
let line = lines.pop().unwrap();
// Go ahead and display all other lines
for line in lines.into_iter() {
eprintln!("{}", line);
}
// Get an answer from user input, or use a blank string as an answer
// if we fail to get input from the user
let answer = password_prompt(line).unwrap_or_default();
answers.push(answer);
}
answers
}),
on_verify: Box::new(move |kind, text| {
trace!("[manager client] on_verify({kind}, {text})");
match kind {
AuthVerifyKind::Host => {
eprintln!("{}", text);
match text_prompt("Enter [y/N]> ") {
Ok(answer) => {
trace!("Verify? Answer = '{answer}'");
matches!(answer.trim(), "y" | "Y" | "yes" | "YES")
}
Err(x) => {
error!("Failed verification: {x}");
false
}
}
}
x => {
error!("Unsupported verify kind: {x}");
false
}
}
}),
on_info: Box::new(|text| {
trace!("[manager client] on_info({text})");
println!("{}", text);
}),
on_error: Box::new(|kind, text| {
trace!("[manager client] on_error({kind}, {text})");
eprintln!("{}: {}", kind, text);
}),
}
}
}

@ -1,14 +0,0 @@
mod tcp;
pub use tcp::*;
#[cfg(unix)]
mod unix;
#[cfg(unix)]
pub use unix::*;
#[cfg(windows)]
mod windows;
#[cfg(windows)]
pub use windows::*;

@ -1,50 +0,0 @@
use crate::{DistantManagerClient, DistantManagerClientConfig};
use async_trait::async_trait;
use distant_net::{Codec, FramedTransport, TcpTransport};
use std::{convert, net::SocketAddr};
use tokio::{io, time::Duration};
#[async_trait]
pub trait TcpDistantManagerClientExt {
/// Connect to a remote TCP server using the provided information
async fn connect<C>(
config: DistantManagerClientConfig,
addr: SocketAddr,
codec: C,
) -> io::Result<DistantManagerClient>
where
C: Codec + Send + 'static;
/// Connect to a remote TCP server, timing out after duration has passed
async fn connect_timeout<C>(
config: DistantManagerClientConfig,
addr: SocketAddr,
codec: C,
duration: Duration,
) -> io::Result<DistantManagerClient>
where
C: Codec + Send + 'static,
{
tokio::time::timeout(duration, Self::connect(config, addr, codec))
.await
.map_err(|x| io::Error::new(io::ErrorKind::TimedOut, x))
.and_then(convert::identity)
}
}
#[async_trait]
impl TcpDistantManagerClientExt for DistantManagerClient {
/// Connect to a remote TCP server using the provided information
async fn connect<C>(
config: DistantManagerClientConfig,
addr: SocketAddr,
codec: C,
) -> io::Result<DistantManagerClient>
where
C: Codec + Send + 'static,
{
let transport = TcpTransport::connect(addr).await?;
let transport = FramedTransport::new(transport, codec);
Self::new(config, transport)
}
}

@ -1,54 +0,0 @@
use crate::{DistantManagerClient, DistantManagerClientConfig};
use async_trait::async_trait;
use distant_net::{Codec, FramedTransport, UnixSocketTransport};
use std::{convert, path::Path};
use tokio::{io, time::Duration};
#[async_trait]
pub trait UnixSocketDistantManagerClientExt {
/// Connect to a proxy unix socket
async fn connect<P, C>(
config: DistantManagerClientConfig,
path: P,
codec: C,
) -> io::Result<DistantManagerClient>
where
P: AsRef<Path> + Send,
C: Codec + Send + 'static;
/// Connect to a proxy unix socket, timing out after duration has passed
async fn connect_timeout<P, C>(
config: DistantManagerClientConfig,
path: P,
codec: C,
duration: Duration,
) -> io::Result<DistantManagerClient>
where
P: AsRef<Path> + Send,
C: Codec + Send + 'static,
{
tokio::time::timeout(duration, Self::connect(config, path, codec))
.await
.map_err(|x| io::Error::new(io::ErrorKind::TimedOut, x))
.and_then(convert::identity)
}
}
#[async_trait]
impl UnixSocketDistantManagerClientExt for DistantManagerClient {
/// Connect to a proxy unix socket
async fn connect<P, C>(
config: DistantManagerClientConfig,
path: P,
codec: C,
) -> io::Result<DistantManagerClient>
where
P: AsRef<Path> + Send,
C: Codec + Send + 'static,
{
let p = path.as_ref();
let transport = UnixSocketTransport::connect(p).await?;
let transport = FramedTransport::new(transport, codec);
Ok(DistantManagerClient::new(config, transport)?)
}
}

@ -1,91 +0,0 @@
use crate::{DistantManagerClient, DistantManagerClientConfig};
use async_trait::async_trait;
use distant_net::{Codec, FramedTransport, WindowsPipeTransport};
use std::{
convert,
ffi::{OsStr, OsString},
};
use tokio::{io, time::Duration};
#[async_trait]
pub trait WindowsPipeDistantManagerClientExt {
/// Connect to a server listening on a Windows pipe at the specified address
/// using the given codec
async fn connect<A, C>(
config: DistantManagerClientConfig,
addr: A,
codec: C,
) -> io::Result<DistantManagerClient>
where
A: AsRef<OsStr> + Send,
C: Codec + Send + 'static;
/// Connect to a server listening on a Windows pipe at the specified address
/// via `\\.\pipe\{name}` using the given codec
async fn connect_local<N, C>(
config: DistantManagerClientConfig,
name: N,
codec: C,
) -> io::Result<DistantManagerClient>
where
N: AsRef<OsStr> + Send,
C: Codec + Send + 'static,
{
let mut addr = OsString::from(r"\\.\pipe\");
addr.push(name.as_ref());
Self::connect(config, addr, codec).await
}
/// Connect to a server listening on a Windows pipe at the specified address
/// using the given codec, timing out after duration has passed
async fn connect_timeout<A, C>(
config: DistantManagerClientConfig,
addr: A,
codec: C,
duration: Duration,
) -> io::Result<DistantManagerClient>
where
A: AsRef<OsStr> + Send,
C: Codec + Send + 'static,
{
tokio::time::timeout(duration, Self::connect(config, addr, codec))
.await
.map_err(|x| io::Error::new(io::ErrorKind::TimedOut, x))
.and_then(convert::identity)
}
/// Connect to a server listening on a Windows pipe at the specified address
/// via `\\.\pipe\{name}` using the given codec, timing out after duration has passed
async fn connect_local_timeout<N, C>(
config: DistantManagerClientConfig,
name: N,
codec: C,
duration: Duration,
) -> io::Result<DistantManagerClient>
where
N: AsRef<OsStr> + Send,
C: Codec + Send + 'static,
{
let mut addr = OsString::from(r"\\.\pipe\");
addr.push(name.as_ref());
Self::connect_timeout(config, addr, codec, duration).await
}
}
#[async_trait]
impl WindowsPipeDistantManagerClientExt for DistantManagerClient {
async fn connect<A, C>(
config: DistantManagerClientConfig,
addr: A,
codec: C,
) -> io::Result<DistantManagerClient>
where
A: AsRef<OsStr> + Send,
C: Codec + Send + 'static,
{
let a = addr.as_ref();
let transport = WindowsPipeTransport::connect(a).await?;
let transport = FramedTransport::new(transport, codec);
Ok(DistantManagerClient::new(config, transport)?)
}
}

@ -1,2 +0,0 @@
/// Represents extra data included for connections
pub type Extra = crate::data::Map;

@ -1,5 +0,0 @@
/// Id associated with an active connection
pub type ConnectionId = u64;
/// Id associated with an open channel
pub type ChannelId = u64;

@ -1,72 +0,0 @@
use super::{ChannelId, ConnectionId, Destination, Extra};
use crate::{DistantMsg, DistantRequestData};
use distant_net::Request;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(feature = "clap", derive(clap::Subcommand))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum ManagerRequest {
/// Launch a server using the manager
Launch {
// NOTE: Boxed per clippy's large_enum_variant warning
destination: Box<Destination>,
/// Extra details specific to the connection
#[cfg_attr(feature = "clap", clap(short, long, action = clap::ArgAction::Append))]
extra: Extra,
},
/// Initiate a connection through the manager
Connect {
// NOTE: Boxed per clippy's large_enum_variant warning
destination: Box<Destination>,
/// Extra details specific to the connection
#[cfg_attr(feature = "clap", clap(short, long, action = clap::ArgAction::Append))]
extra: Extra,
},
/// Opens a channel for communication with a server
#[cfg_attr(feature = "clap", clap(skip))]
OpenChannel {
/// Id of the connection
id: ConnectionId,
},
/// Sends data through channel
#[cfg_attr(feature = "clap", clap(skip))]
Channel {
/// Id of the channel
id: ChannelId,
/// Request to send to through the channel
#[cfg_attr(feature = "clap", clap(skip = skipped_request()))]
request: Request<DistantMsg<DistantRequestData>>,
},
/// Closes an open channel
#[cfg_attr(feature = "clap", clap(skip))]
CloseChannel {
/// Id of the channel to close
id: ChannelId,
},
/// Retrieve information about a specific connection
Info { id: ConnectionId },
/// Kill a specific connection
Kill { id: ConnectionId },
/// Retrieve list of connections being managed
List,
/// Signals the manager to shutdown
Shutdown,
}
/// Produces some default request, purely to satisfy clap
#[cfg(feature = "clap")]
fn skipped_request() -> Request<DistantMsg<DistantRequestData>> {
Request::new(DistantMsg::Single(DistantRequestData::SystemInfo {}))
}

@ -1,698 +0,0 @@
use crate::{
ChannelId, ConnectionId, ConnectionInfo, ConnectionList, Destination, Extra, ManagerRequest,
ManagerResponse,
};
use async_trait::async_trait;
use distant_net::{
router, Auth, AuthClient, Client, IntoSplit, Listener, MpscListener, Request, Response, Server,
ServerCtx, ServerExt, UntypedTransportRead, UntypedTransportWrite,
};
use log::*;
use std::{collections::HashMap, io, sync::Arc};
use tokio::{
sync::{mpsc, Mutex, RwLock},
task::JoinHandle,
};
mod config;
pub use config::*;
mod connection;
pub use connection::*;
mod ext;
pub use ext::*;
mod handler;
pub use handler::*;
mod r#ref;
pub use r#ref::*;
router!(DistantManagerRouter {
auth_transport: Response<Auth> => Request<Auth>,
manager_transport: Request<ManagerRequest> => Response<ManagerResponse>,
});
/// Represents a manager of multiple distant server connections
pub struct DistantManager {
/// Receives authentication clients to feed into local data of server
auth_client_rx: Mutex<mpsc::Receiver<AuthClient>>,
/// Configuration settings for the server
config: DistantManagerConfig,
/// Mapping of connection id -> connection
connections: RwLock<HashMap<ConnectionId, DistantManagerConnection>>,
/// Handlers for launch requests
launch_handlers: Arc<RwLock<HashMap<String, BoxedLaunchHandler>>>,
/// Handlers for connect requests
connect_handlers: Arc<RwLock<HashMap<String, BoxedConnectHandler>>>,
/// Primary task of server
task: JoinHandle<()>,
}
impl DistantManager {
/// Initializes a new instance of [`DistantManagerServer`] using the provided [`UntypedTransport`]
pub fn start<L, T>(
mut config: DistantManagerConfig,
mut listener: L,
) -> io::Result<DistantManagerRef>
where
L: Listener<Output = T> + 'static,
T: IntoSplit + Send + 'static,
T::Read: UntypedTransportRead + 'static,
T::Write: UntypedTransportWrite + 'static,
{
let (conn_tx, mpsc_listener) = MpscListener::channel(config.connection_buffer_size);
let (auth_client_tx, auth_client_rx) = mpsc::channel(1);
// Spawn task that uses our input listener to get both auth and manager events,
// forwarding manager events to the internal mpsc listener
let task = tokio::spawn(async move {
while let Ok(transport) = listener.accept().await {
let DistantManagerRouter {
auth_transport,
manager_transport,
..
} = DistantManagerRouter::new(transport);
let (writer, reader) = auth_transport.into_split();
let client = match Client::new(writer, reader) {
Ok(client) => client,
Err(x) => {
error!("Creating auth client failed: {}", x);
continue;
}
};
let auth_client = AuthClient::from(client);
// Forward auth client for new connection in server
if auth_client_tx.send(auth_client).await.is_err() {
break;
}
// Forward connected and routed transport to server
if conn_tx.send(manager_transport.into_split()).await.is_err() {
break;
}
}
});
let launch_handlers = Arc::new(RwLock::new(config.launch_handlers.drain().collect()));
let weak_launch_handlers = Arc::downgrade(&launch_handlers);
let connect_handlers = Arc::new(RwLock::new(config.connect_handlers.drain().collect()));
let weak_connect_handlers = Arc::downgrade(&connect_handlers);
let server_ref = Self {
auth_client_rx: Mutex::new(auth_client_rx),
config,
launch_handlers,
connect_handlers,
connections: RwLock::new(HashMap::new()),
task,
}
.start(mpsc_listener)?;
Ok(DistantManagerRef {
launch_handlers: weak_launch_handlers,
connect_handlers: weak_connect_handlers,
inner: server_ref,
})
}
/// Launches a new server at the specified `destination` using the given `extra` information
/// and authentication client (if needed) to retrieve additional information needed to
/// enter the destination prior to starting the server, returning the destination of the
/// launched server
async fn launch(
&self,
destination: Destination,
extra: Extra,
auth: Option<&mut AuthClient>,
) -> io::Result<Destination> {
let auth = auth.ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
"Authentication client not initialized",
)
})?;
let scheme = match destination.scheme.as_deref() {
Some(scheme) => {
trace!("Using scheme {}", scheme);
scheme
}
None => {
trace!(
"Using fallback scheme of {}",
self.config.launch_fallback_scheme.as_str()
);
self.config.launch_fallback_scheme.as_str()
}
}
.to_lowercase();
let credentials = {
let lock = self.launch_handlers.read().await;
let handler = lock.get(&scheme).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("No launch handler registered for {}", scheme),
)
})?;
handler.launch(&destination, &extra, auth).await?
};
Ok(credentials)
}
/// Connects to a new server at the specified `destination` using the given `extra` information
/// and authentication client (if needed) to retrieve additional information needed to
/// establish the connection to the server
async fn connect(
&self,
destination: Destination,
extra: Extra,
auth: Option<&mut AuthClient>,
) -> io::Result<ConnectionId> {
let auth = auth.ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
"Authentication client not initialized",
)
})?;
let scheme = match destination.scheme.as_deref() {
Some(scheme) => {
trace!("Using scheme {}", scheme);
scheme
}
None => {
trace!(
"Using fallback scheme of {}",
self.config.connect_fallback_scheme.as_str()
);
self.config.connect_fallback_scheme.as_str()
}
}
.to_lowercase();
let (writer, reader) = {
let lock = self.connect_handlers.read().await;
let handler = lock.get(&scheme).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("No connect handler registered for {}", scheme),
)
})?;
handler.connect(&destination, &extra, auth).await?
};
let connection = DistantManagerConnection::new(destination, extra, writer, reader);
let id = connection.id;
self.connections.write().await.insert(id, connection);
Ok(id)
}
/// Retrieves information about the connection to the server with the specified `id`
async fn info(&self, id: ConnectionId) -> io::Result<ConnectionInfo> {
match self.connections.read().await.get(&id) {
Some(connection) => Ok(ConnectionInfo {
id: connection.id,
destination: connection.destination.clone(),
extra: connection.extra.clone(),
}),
None => Err(io::Error::new(
io::ErrorKind::NotConnected,
"No connection found",
)),
}
}
/// Retrieves a list of connections to servers
async fn list(&self) -> io::Result<ConnectionList> {
Ok(ConnectionList(
self.connections
.read()
.await
.values()
.map(|conn| (conn.id, conn.destination.clone()))
.collect(),
))
}
/// Kills the connection to the server with the specified `id`
async fn kill(&self, id: ConnectionId) -> io::Result<()> {
match self.connections.write().await.remove(&id) {
Some(_) => Ok(()),
None => Err(io::Error::new(
io::ErrorKind::NotConnected,
"No connection found",
)),
}
}
}
#[derive(Default)]
pub struct DistantManagerServerConnection {
/// Authentication client that manager can use when establishing a new connection
/// and needing to get authentication details from the client to move forward
auth_client: Option<Mutex<AuthClient>>,
/// Holds on to open channels feeding data back from a server to some connected client,
/// enabling us to cancel the tasks on demand
channels: RwLock<HashMap<ChannelId, DistantManagerChannel>>,
}
#[async_trait]
impl Server for DistantManager {
type Request = ManagerRequest;
type Response = ManagerResponse;
type LocalData = DistantManagerServerConnection;
async fn on_accept(&self, local_data: &mut Self::LocalData) {
local_data.auth_client = self
.auth_client_rx
.lock()
.await
.recv()
.await
.map(Mutex::new);
// Enable jit handshake
if let Some(auth_client) = local_data.auth_client.as_ref() {
auth_client.lock().await.set_jit_handshake(true);
}
}
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
let ServerCtx {
connection_id,
request,
reply,
local_data,
} = ctx;
let response = match request.payload {
ManagerRequest::Launch { destination, extra } => {
let mut auth = match local_data.auth_client.as_ref() {
Some(client) => Some(client.lock().await),
None => None,
};
match self.launch(*destination, extra, auth.as_deref_mut()).await {
Ok(destination) => ManagerResponse::Launched { destination },
Err(x) => ManagerResponse::Error(x.into()),
}
}
ManagerRequest::Connect { destination, extra } => {
let mut auth = match local_data.auth_client.as_ref() {
Some(client) => Some(client.lock().await),
None => None,
};
match self.connect(*destination, extra, auth.as_deref_mut()).await {
Ok(id) => ManagerResponse::Connected { id },
Err(x) => ManagerResponse::Error(x.into()),
}
}
ManagerRequest::OpenChannel { id } => match self.connections.read().await.get(&id) {
Some(connection) => match connection.open_channel(reply.clone()).await {
Ok(channel) => {
let id = channel.id();
local_data.channels.write().await.insert(id, channel);
ManagerResponse::ChannelOpened { id }
}
Err(x) => ManagerResponse::Error(x.into()),
},
None => ManagerResponse::Error(
io::Error::new(io::ErrorKind::NotConnected, "Connection does not exist").into(),
),
},
ManagerRequest::Channel { id, request } => {
match local_data.channels.read().await.get(&id) {
// TODO: For now, we are NOT sending back a response to acknowledge
// a successful channel send. We could do this in order for
// the client to listen for a complete send, but is it worth it?
Some(channel) => match channel.send(request).await {
Ok(_) => return,
Err(x) => ManagerResponse::Error(x.into()),
},
None => ManagerResponse::Error(
io::Error::new(
io::ErrorKind::NotConnected,
"Channel is not open or does not exist",
)
.into(),
),
}
}
ManagerRequest::CloseChannel { id } => {
match local_data.channels.write().await.remove(&id) {
Some(channel) => match channel.close().await {
Ok(_) => ManagerResponse::ChannelClosed { id },
Err(x) => ManagerResponse::Error(x.into()),
},
None => ManagerResponse::Error(
io::Error::new(
io::ErrorKind::NotConnected,
"Channel is not open or does not exist",
)
.into(),
),
}
}
ManagerRequest::Info { id } => match self.info(id).await {
Ok(info) => ManagerResponse::Info(info),
Err(x) => ManagerResponse::Error(x.into()),
},
ManagerRequest::List => match self.list().await {
Ok(list) => ManagerResponse::List(list),
Err(x) => ManagerResponse::Error(x.into()),
},
ManagerRequest::Kill { id } => match self.kill(id).await {
Ok(()) => ManagerResponse::Killed,
Err(x) => ManagerResponse::Error(x.into()),
},
ManagerRequest::Shutdown => {
if let Err(x) = reply.send(ManagerResponse::Shutdown).await {
error!("[Conn {}] {}", connection_id, x);
}
// Clear out handler state in order to trigger drops
self.launch_handlers.write().await.clear();
self.connect_handlers.write().await.clear();
// Shutdown the primary server task
self.task.abort();
// TODO: Perform a graceful shutdown instead of this?
// Review https://tokio.rs/tokio/topics/shutdown
std::process::exit(0);
}
};
if let Err(x) = reply.send(response).await {
error!("[Conn {}] {}", connection_id, x);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use distant_net::{
AuthClient, FramedTransport, HeapAuthServer, InmemoryTransport, IntoSplit, MappedListener,
OneshotListener, PlainCodec, ServerExt, ServerRef,
};
/// Create a new server, bypassing the start loop
fn setup() -> DistantManager {
let (_, rx) = mpsc::channel(1);
DistantManager {
auth_client_rx: Mutex::new(rx),
config: Default::default(),
connections: RwLock::new(HashMap::new()),
launch_handlers: Arc::new(RwLock::new(HashMap::new())),
connect_handlers: Arc::new(RwLock::new(HashMap::new())),
task: tokio::spawn(async move {}),
}
}
/// Creates a connected [`AuthClient`] with a launched auth server that blindly responds
fn auth_client_server() -> (AuthClient, Box<dyn ServerRef>) {
let (t1, t2) = FramedTransport::pair(1);
let client = AuthClient::from(Client::from_framed_transport(t1).unwrap());
// Create a server that does nothing, but will support
let server = HeapAuthServer {
on_challenge: Box::new(|_, _| Vec::new()),
on_verify: Box::new(|_, _| false),
on_info: Box::new(|_| ()),
on_error: Box::new(|_, _| ()),
}
.start(MappedListener::new(OneshotListener::from_value(t2), |t| {
t.into_split()
}))
.unwrap();
(client, server)
}
fn dummy_distant_writer_reader() -> (BoxedDistantWriter, BoxedDistantReader) {
setup_distant_writer_reader().0
}
/// Creates a writer & reader with a connected transport
fn setup_distant_writer_reader() -> (
(BoxedDistantWriter, BoxedDistantReader),
FramedTransport<InmemoryTransport, PlainCodec>,
) {
let (t1, t2) = FramedTransport::pair(1);
let (writer, reader) = t1.into_split();
((Box::new(writer), Box::new(reader)), t2)
}
#[tokio::test]
async fn launch_should_fail_if_destination_scheme_is_unsupported() {
let server = setup();
let destination = "scheme://host".parse::<Destination>().unwrap();
let extra = "".parse::<Extra>().unwrap();
let (mut auth, _auth_server) = auth_client_server();
let err = server
.launch(destination, extra, Some(&mut auth))
.await
.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidInput, "{:?}", err);
}
#[tokio::test]
async fn launch_should_fail_if_handler_tied_to_scheme_fails() {
let server = setup();
let handler: Box<dyn LaunchHandler> = Box::new(|_: &_, _: &_, _: &mut _| async {
Err(io::Error::new(io::ErrorKind::Other, "test failure"))
});
server
.launch_handlers
.write()
.await
.insert("scheme".to_string(), handler);
let destination = "scheme://host".parse::<Destination>().unwrap();
let extra = "".parse::<Extra>().unwrap();
let (mut auth, _auth_server) = auth_client_server();
let err = server
.launch(destination, extra, Some(&mut auth))
.await
.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::Other);
assert_eq!(err.to_string(), "test failure");
}
#[tokio::test]
async fn launch_should_return_new_destination_on_success() {
let server = setup();
let handler: Box<dyn LaunchHandler> = {
Box::new(|_: &_, _: &_, _: &mut _| async {
Ok("scheme2://host2".parse::<Destination>().unwrap())
})
};
server
.launch_handlers
.write()
.await
.insert("scheme".to_string(), handler);
let destination = "scheme://host".parse::<Destination>().unwrap();
let extra = "key=value".parse::<Extra>().unwrap();
let (mut auth, _auth_server) = auth_client_server();
let destination = server
.launch(destination, extra, Some(&mut auth))
.await
.unwrap();
assert_eq!(
destination,
"scheme2://host2".parse::<Destination>().unwrap()
);
}
#[tokio::test]
async fn connect_should_fail_if_destination_scheme_is_unsupported() {
let server = setup();
let destination = "scheme://host".parse::<Destination>().unwrap();
let extra = "".parse::<Extra>().unwrap();
let (mut auth, _auth_server) = auth_client_server();
let err = server
.connect(destination, extra, Some(&mut auth))
.await
.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidInput, "{:?}", err);
}
#[tokio::test]
async fn connect_should_fail_if_handler_tied_to_scheme_fails() {
let server = setup();
let handler: Box<dyn ConnectHandler> = Box::new(|_: &_, _: &_, _: &mut _| async {
Err(io::Error::new(io::ErrorKind::Other, "test failure"))
});
server
.connect_handlers
.write()
.await
.insert("scheme".to_string(), handler);
let destination = "scheme://host".parse::<Destination>().unwrap();
let extra = "".parse::<Extra>().unwrap();
let (mut auth, _auth_server) = auth_client_server();
let err = server
.connect(destination, extra, Some(&mut auth))
.await
.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::Other);
assert_eq!(err.to_string(), "test failure");
}
#[tokio::test]
async fn connect_should_return_id_of_new_connection_on_success() {
let server = setup();
let handler: Box<dyn ConnectHandler> =
Box::new(|_: &_, _: &_, _: &mut _| async { Ok(dummy_distant_writer_reader()) });
server
.connect_handlers
.write()
.await
.insert("scheme".to_string(), handler);
let destination = "scheme://host".parse::<Destination>().unwrap();
let extra = "key=value".parse::<Extra>().unwrap();
let (mut auth, _auth_server) = auth_client_server();
let id = server
.connect(destination, extra, Some(&mut auth))
.await
.unwrap();
let lock = server.connections.read().await;
let connection = lock.get(&id).unwrap();
assert_eq!(connection.id, id);
assert_eq!(connection.destination, "scheme://host");
assert_eq!(connection.extra, "key=value".parse().unwrap());
}
#[tokio::test]
async fn info_should_fail_if_no_connection_found_for_specified_id() {
let server = setup();
let err = server.info(999).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::NotConnected, "{:?}", err);
}
#[tokio::test]
async fn info_should_return_information_about_established_connection() {
let server = setup();
let (writer, reader) = dummy_distant_writer_reader();
let connection = DistantManagerConnection::new(
"scheme://host".parse().unwrap(),
"key=value".parse().unwrap(),
writer,
reader,
);
let id = connection.id;
server.connections.write().await.insert(id, connection);
let info = server.info(id).await.unwrap();
assert_eq!(
info,
ConnectionInfo {
id,
destination: "scheme://host".parse().unwrap(),
extra: "key=value".parse().unwrap(),
}
);
}
#[tokio::test]
async fn list_should_return_empty_connection_list_if_no_established_connections() {
let server = setup();
let list = server.list().await.unwrap();
assert_eq!(list, ConnectionList(HashMap::new()));
}
#[tokio::test]
async fn list_should_return_a_list_of_established_connections() {
let server = setup();
let (writer, reader) = dummy_distant_writer_reader();
let connection = DistantManagerConnection::new(
"scheme://host".parse().unwrap(),
"key=value".parse().unwrap(),
writer,
reader,
);
let id_1 = connection.id;
server.connections.write().await.insert(id_1, connection);
let (writer, reader) = dummy_distant_writer_reader();
let connection = DistantManagerConnection::new(
"other://host2".parse().unwrap(),
"key=value".parse().unwrap(),
writer,
reader,
);
let id_2 = connection.id;
server.connections.write().await.insert(id_2, connection);
let list = server.list().await.unwrap();
assert_eq!(
list.get(&id_1).unwrap(),
&"scheme://host".parse::<Destination>().unwrap()
);
assert_eq!(
list.get(&id_2).unwrap(),
&"other://host2".parse::<Destination>().unwrap()
);
}
#[tokio::test]
async fn kill_should_fail_if_no_connection_found_for_specified_id() {
let server = setup();
let err = server.kill(999).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::NotConnected, "{:?}", err);
}
#[tokio::test]
async fn kill_should_terminate_established_connection_and_remove_it_from_the_list() {
let server = setup();
let (writer, reader) = dummy_distant_writer_reader();
let connection = DistantManagerConnection::new(
"scheme://host".parse().unwrap(),
"key=value".parse().unwrap(),
writer,
reader,
);
let id = connection.id;
server.connections.write().await.insert(id, connection);
server.kill(id).await.unwrap();
let lock = server.connections.read().await;
assert!(!lock.contains_key(&id), "Connection still exists");
}
}

@ -1,201 +0,0 @@
use crate::{
manager::{
data::{ChannelId, ConnectionId, Destination, Extra},
BoxedDistantReader, BoxedDistantWriter,
},
DistantMsg, DistantRequestData, DistantResponseData, ManagerResponse,
};
use distant_net::{Request, Response, ServerReply};
use log::*;
use std::{collections::HashMap, io};
use tokio::{sync::mpsc, task::JoinHandle};
/// Represents a connection a distant manager has with some distant-compatible server
pub struct DistantManagerConnection {
pub id: ConnectionId,
pub destination: Destination,
pub extra: Extra,
tx: mpsc::Sender<StateMachine>,
reader_task: JoinHandle<()>,
writer_task: JoinHandle<()>,
}
#[derive(Clone)]
pub struct DistantManagerChannel {
channel_id: ChannelId,
tx: mpsc::Sender<StateMachine>,
}
impl DistantManagerChannel {
pub fn id(&self) -> ChannelId {
self.channel_id
}
pub async fn send(&self, request: Request<DistantMsg<DistantRequestData>>) -> io::Result<()> {
let channel_id = self.channel_id;
self.tx
.send(StateMachine::Write {
id: channel_id,
request,
})
.await
.map_err(|x| {
io::Error::new(
io::ErrorKind::BrokenPipe,
format!("channel {} send failed: {}", channel_id, x),
)
})
}
pub async fn close(&self) -> io::Result<()> {
let channel_id = self.channel_id;
self.tx
.send(StateMachine::Unregister { id: channel_id })
.await
.map_err(|x| {
io::Error::new(
io::ErrorKind::BrokenPipe,
format!("channel {} close failed: {}", channel_id, x),
)
})
}
}
enum StateMachine {
Register {
id: ChannelId,
reply: ServerReply<ManagerResponse>,
},
Unregister {
id: ChannelId,
},
Read {
response: Response<DistantMsg<DistantResponseData>>,
},
Write {
id: ChannelId,
request: Request<DistantMsg<DistantRequestData>>,
},
}
impl DistantManagerConnection {
pub fn new(
destination: Destination,
extra: Extra,
mut writer: BoxedDistantWriter,
mut reader: BoxedDistantReader,
) -> Self {
let connection_id = rand::random();
let (tx, mut rx) = mpsc::channel(1);
let reader_task = {
let tx = tx.clone();
tokio::spawn(async move {
loop {
match reader.read().await {
Ok(Some(response)) => {
if tx.send(StateMachine::Read { response }).await.is_err() {
break;
}
}
Ok(None) => break,
Err(x) => {
error!("[Conn {}] {}", connection_id, x);
continue;
}
}
}
})
};
let writer_task = tokio::spawn(async move {
let mut registered = HashMap::new();
while let Some(state_machine) = rx.recv().await {
match state_machine {
StateMachine::Register { id, reply } => {
registered.insert(id, reply);
}
StateMachine::Unregister { id } => {
registered.remove(&id);
}
StateMachine::Read { mut response } => {
// Split {channel id}_{request id} back into pieces and
// update the origin id to match the request id only
let channel_id = match response.origin_id.split_once('_') {
Some((cid_str, oid_str)) => {
if let Ok(cid) = cid_str.parse::<ChannelId>() {
response.origin_id = oid_str.to_string();
cid
} else {
continue;
}
}
None => continue,
};
if let Some(reply) = registered.get(&channel_id) {
let response = ManagerResponse::Channel {
id: channel_id,
response,
};
if let Err(x) = reply.send(response).await {
error!("[Conn {}] {}", connection_id, x);
}
}
}
StateMachine::Write { id, request } => {
// Combine channel id with request id so we can properly forward
// the response containing this in the origin id
let request = Request {
id: format!("{}_{}", id, request.id),
payload: request.payload,
};
if let Err(x) = writer.write(request).await {
error!("[Conn {}] {}", connection_id, x);
}
}
}
}
});
Self {
id: connection_id,
destination,
extra,
tx,
reader_task,
writer_task,
}
}
pub async fn open_channel(
&self,
reply: ServerReply<ManagerResponse>,
) -> io::Result<DistantManagerChannel> {
let channel_id = rand::random();
self.tx
.send(StateMachine::Register {
id: channel_id,
reply,
})
.await
.map_err(|x| {
io::Error::new(
io::ErrorKind::BrokenPipe,
format!("open_channel failed: {}", x),
)
})?;
Ok(DistantManagerChannel {
channel_id,
tx: self.tx.clone(),
})
}
}
impl Drop for DistantManagerConnection {
fn drop(&mut self) {
self.reader_task.abort();
self.writer_task.abort();
}
}

@ -1,30 +0,0 @@
use crate::{DistantManager, DistantManagerConfig};
use distant_net::{
Codec, FramedTransport, IntoSplit, MappedListener, PortRange, TcpListener, TcpServerRef,
};
use std::{io, net::IpAddr};
impl DistantManager {
/// Start a new server by binding to the given IP address and one of the ports in the
/// specified range, mapping all connections to use the given codec
pub async fn start_tcp<P, C>(
config: DistantManagerConfig,
addr: IpAddr,
port: P,
codec: C,
) -> io::Result<TcpServerRef>
where
P: Into<PortRange> + Send,
C: Codec + Send + Sync + 'static,
{
let listener = TcpListener::bind(addr, port).await?;
let port = listener.port();
let listener = MappedListener::new(listener, move |transport| {
let transport = FramedTransport::new(transport, codec.clone());
transport.into_split()
});
let inner = DistantManager::start(config, listener)?;
Ok(TcpServerRef::new(addr, port, Box::new(inner)))
}
}

@ -1,50 +0,0 @@
use crate::{DistantManager, DistantManagerConfig};
use distant_net::{
Codec, FramedTransport, IntoSplit, MappedListener, UnixSocketListener, UnixSocketServerRef,
};
use std::{io, path::Path};
impl DistantManager {
/// Start a new server using the specified path as a unix socket using default unix socket file
/// permissions
pub async fn start_unix_socket<P, C>(
config: DistantManagerConfig,
path: P,
codec: C,
) -> io::Result<UnixSocketServerRef>
where
P: AsRef<Path> + Send,
C: Codec + Send + Sync + 'static,
{
Self::start_unix_socket_with_permissions(
config,
path,
codec,
UnixSocketListener::default_unix_socket_file_permissions(),
)
.await
}
/// Start a new server using the specified path as a unix socket and `mode` as the unix socket
/// file permissions
pub async fn start_unix_socket_with_permissions<P, C>(
config: DistantManagerConfig,
path: P,
codec: C,
mode: u32,
) -> io::Result<UnixSocketServerRef>
where
P: AsRef<Path> + Send,
C: Codec + Send + Sync + 'static,
{
let listener = UnixSocketListener::bind_with_permissions(path, mode).await?;
let path = listener.path().to_path_buf();
let listener = MappedListener::new(listener, move |transport| {
let transport = FramedTransport::new(transport, codec.clone());
transport.into_split()
});
let inner = DistantManager::start(config, listener)?;
Ok(UnixSocketServerRef::new(path, Box::new(inner)))
}
}

@ -1,48 +0,0 @@
use crate::{DistantManager, DistantManagerConfig};
use distant_net::{
Codec, FramedTransport, IntoSplit, MappedListener, WindowsPipeListener, WindowsPipeServerRef,
};
use std::{
ffi::{OsStr, OsString},
io,
};
impl DistantManager {
/// Start a new server at the specified address via `\\.\pipe\{name}` using the given codec
pub async fn start_local_named_pipe<N, C>(
config: DistantManagerConfig,
name: N,
codec: C,
) -> io::Result<WindowsPipeServerRef>
where
Self: Sized,
N: AsRef<OsStr> + Send,
C: Codec + Send + Sync + 'static,
{
let mut addr = OsString::from(r"\\.\pipe\");
addr.push(name.as_ref());
Self::start_named_pipe(config, addr, codec).await
}
/// Start a new server at the specified pipe address using the given codec
pub async fn start_named_pipe<A, C>(
config: DistantManagerConfig,
addr: A,
codec: C,
) -> io::Result<WindowsPipeServerRef>
where
A: AsRef<OsStr> + Send,
C: Codec + Send + Sync + 'static,
{
let a = addr.as_ref();
let listener = WindowsPipeListener::bind(a)?;
let addr = listener.addr().to_os_string();
let listener = MappedListener::new(listener, move |transport| {
let transport = FramedTransport::new(transport, codec.clone());
transport.into_split()
});
let inner = DistantManager::start(config, listener)?;
Ok(WindowsPipeServerRef::new(addr, Box::new(inner)))
}
}

@ -1,69 +0,0 @@
use crate::{
manager::data::{Destination, Extra},
DistantMsg, DistantRequestData, DistantResponseData,
};
use async_trait::async_trait;
use distant_net::{AuthClient, Request, Response, TypedAsyncRead, TypedAsyncWrite};
use std::{future::Future, io};
pub type BoxedDistantWriter =
Box<dyn TypedAsyncWrite<Request<DistantMsg<DistantRequestData>>> + Send>;
pub type BoxedDistantReader =
Box<dyn TypedAsyncRead<Response<DistantMsg<DistantResponseData>>> + Send>;
pub type BoxedDistantWriterReader = (BoxedDistantWriter, BoxedDistantReader);
pub type BoxedLaunchHandler = Box<dyn LaunchHandler>;
pub type BoxedConnectHandler = Box<dyn ConnectHandler>;
/// Used to launch a server at the specified destination, returning some result as a vec of bytes
#[async_trait]
pub trait LaunchHandler: Send + Sync {
async fn launch(
&self,
destination: &Destination,
extra: &Extra,
auth_client: &mut AuthClient,
) -> io::Result<Destination>;
}
#[async_trait]
impl<F, R> LaunchHandler for F
where
F: for<'a> Fn(&'a Destination, &'a Extra, &'a mut AuthClient) -> R + Send + Sync + 'static,
R: Future<Output = io::Result<Destination>> + Send + 'static,
{
async fn launch(
&self,
destination: &Destination,
extra: &Extra,
auth_client: &mut AuthClient,
) -> io::Result<Destination> {
self(destination, extra, auth_client).await
}
}
/// Used to connect to a destination, returning a connected reader and writer pair
#[async_trait]
pub trait ConnectHandler: Send + Sync {
async fn connect(
&self,
destination: &Destination,
extra: &Extra,
auth_client: &mut AuthClient,
) -> io::Result<BoxedDistantWriterReader>;
}
#[async_trait]
impl<F, R> ConnectHandler for F
where
F: for<'a> Fn(&'a Destination, &'a Extra, &'a mut AuthClient) -> R + Send + Sync + 'static,
R: Future<Output = io::Result<BoxedDistantWriterReader>> + Send + 'static,
{
async fn connect(
&self,
destination: &Destination,
extra: &Extra,
auth_client: &mut AuthClient,
) -> io::Result<BoxedDistantWriterReader> {
self(destination, extra, auth_client).await
}
}

@ -1,73 +0,0 @@
use super::{BoxedConnectHandler, BoxedLaunchHandler, ConnectHandler, LaunchHandler};
use distant_net::{ServerRef, ServerState};
use std::{collections::HashMap, io, sync::Weak};
use tokio::sync::RwLock;
/// Reference to a distant manager's server instance
pub struct DistantManagerRef {
/// Mapping of "scheme" -> handler
pub(crate) launch_handlers: Weak<RwLock<HashMap<String, BoxedLaunchHandler>>>,
/// Mapping of "scheme" -> handler
pub(crate) connect_handlers: Weak<RwLock<HashMap<String, BoxedConnectHandler>>>,
pub(crate) inner: Box<dyn ServerRef>,
}
impl DistantManagerRef {
/// Registers a new [`LaunchHandler`] for the specified scheme (e.g. "distant" or "ssh")
pub async fn register_launch_handler(
&self,
scheme: impl Into<String>,
handler: impl LaunchHandler + 'static,
) -> io::Result<()> {
let handlers = Weak::upgrade(&self.launch_handlers).ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
"Handler reference is no longer available",
)
})?;
handlers
.write()
.await
.insert(scheme.into(), Box::new(handler));
Ok(())
}
/// Registers a new [`ConnectHandler`] for the specified scheme (e.g. "distant" or "ssh")
pub async fn register_connect_handler(
&self,
scheme: impl Into<String>,
handler: impl ConnectHandler + 'static,
) -> io::Result<()> {
let handlers = Weak::upgrade(&self.connect_handlers).ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
"Handler reference is no longer available",
)
})?;
handlers
.write()
.await
.insert(scheme.into(), Box::new(handler));
Ok(())
}
}
impl ServerRef for DistantManagerRef {
fn state(&self) -> &ServerState {
self.inner.state()
}
fn is_finished(&self) -> bool {
self.inner.is_finished()
}
fn abort(&self) {
self.inner.abort();
}
}

@ -1,8 +1,9 @@
use serde::{
de::{Deserializer, Error as SerdeError, Visitor},
ser::Serializer,
};
use std::{fmt, marker::PhantomData, str::FromStr};
use std::fmt;
use std::marker::PhantomData;
use std::str::FromStr;
use serde::de::{Deserializer, Error as SerdeError, Visitor};
use serde::ser::Serializer;
/// From https://docs.rs/serde_with/1.14.0/src/serde_with/rust.rs.html#90-118
pub fn deserialize_from_str<'de, D, T>(deserializer: D) -> Result<T, D::Error>

@ -0,0 +1,325 @@
use std::io;
use std::path::PathBuf;
use async_trait::async_trait;
use distant_core::{
DistantApi, DistantApiServerHandler, DistantChannelExt, DistantClient, DistantCtx,
};
use distant_net::auth::{DummyAuthHandler, Verifier};
use distant_net::client::Client;
use distant_net::common::{InmemoryTransport, OneshotListener, Version};
use distant_net::server::{Server, ServerRef};
use distant_protocol::PROTOCOL_VERSION;
/// Stands up an inmemory client and server using the given api.
async fn setup(api: impl DistantApi + Send + Sync + 'static) -> (DistantClient, ServerRef) {
let (t1, t2) = InmemoryTransport::pair(100);
let server = Server::new()
.handler(DistantApiServerHandler::new(api))
.verifier(Verifier::none())
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.start(OneshotListener::from_value(t2))
.expect("Failed to start server");
let client: DistantClient = Client::build()
.auth_handler(DummyAuthHandler)
.connector(t1)
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.connect()
.await
.expect("Failed to connect to server");
(client, server)
}
mod single {
use test_log::test;
use super::*;
#[test(tokio::test)]
async fn should_support_single_request_returning_error() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, _path: PathBuf) -> io::Result<Vec<u8>> {
Err(io::Error::new(io::ErrorKind::NotFound, "test error"))
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let error = client.read_file(PathBuf::from("file")).await.unwrap_err();
assert_eq!(error.kind(), io::ErrorKind::NotFound);
assert_eq!(error.to_string(), "test error");
}
#[test(tokio::test)]
async fn should_support_single_request_returning_success() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, _path: PathBuf) -> io::Result<Vec<u8>> {
Ok(b"hello world".to_vec())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let contents = client.read_file(PathBuf::from("file")).await.unwrap();
assert_eq!(contents, b"hello world");
}
}
mod batch_parallel {
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use distant_net::common::Request;
use distant_protocol::{Msg, Request as RequestPayload};
use test_log::test;
use super::*;
#[test(tokio::test)]
async fn should_support_multiple_requests_running_in_parallel() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "slow" {
tokio::time::sleep(Duration::from_millis(500)).await;
}
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
Ok((time.as_millis() as u64).to_be_bytes().to_vec())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("slow"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Collect our times from the reading
let mut times = Vec::new();
for payload in payloads {
match payload {
distant_protocol::Response::Blob { data } => {
let mut buf = [0u8; 8];
buf.copy_from_slice(&data[..8]);
times.push(u64::from_be_bytes(buf));
}
x => panic!("Unexpected payload: {x:?}"),
}
}
// Verify that these ran in parallel as the first and third requests should not be
// over 500 milliseconds apart due to the sleep in the middle!
let diff = times[0].abs_diff(times[2]);
assert!(diff <= 500, "Sequential ordering detected");
}
#[test(tokio::test)]
async fn should_run_all_requests_even_if_some_fail() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "fail" {
return Err(io::Error::new(io::ErrorKind::Other, "test error"));
}
Ok(Vec::new())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("fail"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Should be a success, error, and success
assert!(
matches!(payloads[0], distant_protocol::Response::Blob { .. }),
"Unexpected payloads[0]: {:?}",
payloads[0]
);
assert!(
matches!(
&payloads[1],
distant_protocol::Response::Error(distant_protocol::Error { kind, description })
if matches!(kind, distant_protocol::ErrorKind::Other) && description == "test error"
),
"Unexpected payloads[1]: {:?}",
payloads[1]
);
assert!(
matches!(payloads[2], distant_protocol::Response::Blob { .. }),
"Unexpected payloads[2]: {:?}",
payloads[2]
);
}
}
mod batch_sequence {
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use distant_net::common::Request;
use distant_protocol::{Msg, Request as RequestPayload};
use test_log::test;
use super::*;
#[test(tokio::test)]
async fn should_support_multiple_requests_running_in_sequence() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "slow" {
tokio::time::sleep(Duration::from_millis(500)).await;
}
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
Ok((time.as_millis() as u64).to_be_bytes().to_vec())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let mut request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("slow"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
// Mark as running in sequence
request.header.insert("sequence", true);
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Collect our times from the reading
let mut times = Vec::new();
for payload in payloads {
match payload {
distant_protocol::Response::Blob { data } => {
let mut buf = [0u8; 8];
buf.copy_from_slice(&data[..8]);
times.push(u64::from_be_bytes(buf));
}
x => panic!("Unexpected payload: {x:?}"),
}
}
// Verify that these ran in sequence as the first and third requests should be
// over 500 milliseconds apart due to the sleep in the middle!
let diff = times[0].abs_diff(times[2]);
assert!(diff > 500, "Parallel ordering detected");
}
#[test(tokio::test)]
async fn should_interrupt_any_requests_following_a_failure() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "fail" {
return Err(io::Error::new(io::ErrorKind::Other, "test error"));
}
Ok(Vec::new())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let mut request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("fail"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
// Mark as running in sequence
request.header.insert("sequence", true);
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Should be a success, error, and interrupt
assert!(
matches!(payloads[0], distant_protocol::Response::Blob { .. }),
"Unexpected payloads[0]: {:?}",
payloads[0]
);
assert!(
matches!(
&payloads[1],
distant_protocol::Response::Error(distant_protocol::Error { kind, description })
if matches!(kind, distant_protocol::ErrorKind::Other) && description == "test error"
),
"Unexpected payloads[1]: {:?}",
payloads[1]
);
assert!(
matches!(
&payloads[2],
distant_protocol::Response::Error(distant_protocol::Error { kind, .. })
if matches!(kind, distant_protocol::ErrorKind::Interrupted)
),
"Unexpected payloads[2]: {:?}",
payloads[2]
);
}
}

@ -1,96 +0,0 @@
use distant_core::{
net::{FramedTransport, InmemoryTransport, IntoSplit, OneshotListener, PlainCodec},
BoxedDistantReader, BoxedDistantWriter, Destination, DistantApiServer, DistantChannelExt,
DistantManager, DistantManagerClient, DistantManagerClientConfig, DistantManagerConfig, Extra,
};
use std::io;
/// Creates a client transport and server listener for our tests
/// that are connected together
async fn setup() -> (
FramedTransport<InmemoryTransport, PlainCodec>,
OneshotListener<FramedTransport<InmemoryTransport, PlainCodec>>,
) {
let (t1, t2) = InmemoryTransport::pair(100);
let listener = OneshotListener::from_value(FramedTransport::new(t2, PlainCodec));
let transport = FramedTransport::new(t1, PlainCodec);
(transport, listener)
}
#[tokio::test]
async fn should_be_able_to_establish_a_single_connection_and_communicate() {
let (transport, listener) = setup().await;
let config = DistantManagerConfig::default();
let manager_ref = DistantManager::start(config, listener).expect("Failed to start manager");
// NOTE: To pass in a raw function, we HAVE to specify the types of the parameters manually,
// otherwise we get a compilation error about lifetime mismatches
manager_ref
.register_connect_handler("scheme", |_: &_, _: &_, _: &mut _| async {
use distant_core::net::ServerExt;
let (t1, t2) = FramedTransport::pair(100);
// Spawn a server on one end
let _ = DistantApiServer::local()
.unwrap()
.start(OneshotListener::from_value(t2.into_split()))?;
// Create a reader/writer pair on the other end
let (writer, reader) = t1.into_split();
let writer: BoxedDistantWriter = Box::new(writer);
let reader: BoxedDistantReader = Box::new(reader);
Ok((writer, reader))
})
.await
.expect("Failed to register handler");
let config = DistantManagerClientConfig::with_empty_prompts();
let mut client =
DistantManagerClient::new(config, transport).expect("Failed to connect to manager");
// Test establishing a connection to some remote server
let id = client
.connect(
"scheme://host".parse::<Destination>().unwrap(),
"key=value".parse::<Extra>().unwrap(),
)
.await
.expect("Failed to connect to a remote server");
// Test retrieving list of connections
let list = client
.list()
.await
.expect("Failed to get list of connections");
assert_eq!(list.len(), 1);
assert_eq!(list.get(&id).unwrap().to_string(), "scheme://host");
// Test retrieving information
let info = client
.info(id)
.await
.expect("Failed to get info about connection");
assert_eq!(info.id, id);
assert_eq!(info.destination.to_string(), "scheme://host");
assert_eq!(info.extra, "key=value".parse::<Extra>().unwrap());
// Create a new channel and request some data
let mut channel = client
.open_channel(id)
.await
.expect("Failed to open channel");
let _ = channel
.system_info()
.await
.expect("Failed to get system information");
// Test killing a connection
client.kill(id).await.expect("Failed to kill connection");
// Test getting an error to ensure that serialization of that data works,
// which we do by trying to access a connection that no longer exists
let err = client.info(id).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::NotConnected);
}

@ -1,68 +0,0 @@
use crate::stress::utils;
use distant_core::{DistantApiServer, DistantClient, LocalDistantApi};
use distant_net::{
PortRange, SecretKey, SecretKey32, TcpClientExt, TcpServerExt, XChaCha20Poly1305Codec,
};
use rstest::*;
use std::time::Duration;
use tokio::sync::mpsc;
const LOG_PATH: &str = "/tmp/test.distant.server.log";
pub struct DistantClientCtx {
pub client: DistantClient,
_done_tx: mpsc::Sender<()>,
}
impl DistantClientCtx {
pub async fn initialize() -> Self {
let ip_addr = "127.0.0.1".parse().unwrap();
let (done_tx, mut done_rx) = mpsc::channel::<()>(1);
let (started_tx, mut started_rx) = mpsc::channel::<(u16, SecretKey32)>(1);
tokio::spawn(async move {
let logger = utils::init_logging(LOG_PATH);
let key = SecretKey::default();
let codec = XChaCha20Poly1305Codec::from(key.clone());
if let Ok(api) = LocalDistantApi::initialize() {
let port: PortRange = "0".parse().unwrap();
let port = {
let server_ref = DistantApiServer::new(api)
.start(ip_addr, port, codec)
.await
.unwrap();
server_ref.port()
};
started_tx.send((port, key)).await.unwrap();
let _ = done_rx.recv().await;
}
logger.flush();
logger.shutdown();
});
// Extract our server startup data if we succeeded
let (port, key) = started_rx.recv().await.unwrap();
// Now initialize our client
let client = DistantClient::connect_timeout(
format!("{}:{}", ip_addr, port).parse().unwrap(),
XChaCha20Poly1305Codec::from(key),
Duration::from_secs(1),
)
.await
.unwrap();
DistantClientCtx {
client,
_done_tx: done_tx,
}
}
}
#[fixture]
pub async fn ctx() -> DistantClientCtx {
DistantClientCtx::initialize().await
}

@ -1,23 +0,0 @@
use std::path::PathBuf;
/// Initializes logging (should only call once)
pub fn init_logging(path: impl Into<PathBuf>) -> flexi_logger::LoggerHandle {
use flexi_logger::{FileSpec, LevelFilter, LogSpecification, Logger};
let modules = &["distant", "distant_core", "distant_ssh2"];
// Disable logging for everything but our binary, which is based on verbosity
let mut builder = LogSpecification::builder();
builder.default(LevelFilter::Off);
// For each module, configure logging
for module in modules {
builder.module(module, LevelFilter::Trace);
}
// Create our logger, but don't initialize yet
let logger = Logger::with(builder.build())
.format_for_files(flexi_logger::opt_format)
.log_to_file(FileSpec::try_from(path).expect("Failed to create log file spec"));
logger.start().expect("Failed to initialize logger")
}

@ -0,0 +1,46 @@
[package]
name = "distant-local"
description = "Library implementing distant API for local interactions"
categories = ["network-programming"]
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
repository = "https://github.com/chipsenkbeil/distant"
readme = "README.md"
license = "MIT OR Apache-2.0"
[features]
default = ["macos-fsevent"]
# If specified, will use MacOS FSEvent for file watching
macos-fsevent = ["notify/macos_fsevent"]
# If specified, will use MacOS kqueue for file watching
macos-kqueue = ["notify/macos_kqueue"]
[dependencies]
async-trait = "0.1.68"
distant-core = { version = "=0.20.0", path = "../distant-core" }
grep = "0.2.12"
ignore = "0.4.20"
log = "0.4.18"
notify = { version = "6.0.0", default-features = false, features = ["macos_fsevent"] }
notify-debouncer-full = { version = "0.1.0", default-features = false }
num_cpus = "1.15.0"
portable-pty = "0.8.1"
rand = { version = "0.8.5", features = ["getrandom"] }
shell-words = "1.1.0"
tokio = { version = "1.28.2", features = ["full"] }
walkdir = "2.3.3"
whoami = "1.4.0"
winsplit = "0.1.0"
[dev-dependencies]
assert_fs = "1.0.13"
env_logger = "0.10.0"
indoc = "2.0.1"
once_cell = "1.17.2"
predicates = "3.0.3"
rstest = "0.17.0"
test-log = "0.2.11"

@ -0,0 +1,45 @@
# distant local
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-local.svg
[distant_crates_lnk]: https://crates.io/crates/distant-local
[distant_doc_img]: https://docs.rs/distant-local/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-local
[distant_rustc_img]: https://img.shields.io/badge/distant_local-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details
The `distant-local` library acts as the primary implementation of a distant
server that powers the CLI. The logic acts on the local machine of the server
and is designed to be used as the foundation for distant operation handling.
## Installation
You can import the dependency by adding the following to your `Cargo.toml`:
```toml
[dependencies]
distant-local = "0.20"
```
## Examples
```rust,no_run
use distant_local::{Config, new_handler};
// Create a server API handler to be used with the server
let handler = new_handler(Config::default()).unwrap();
```
## License
This project is licensed under either of
Apache License, Version 2.0, (LICENSE-APACHE or
[apache-license][apache-license]) MIT license (LICENSE-MIT or
[mit-license][mit-license]) at your option.
[apache-license]: http://www.apache.org/licenses/LICENSE-2.0
[mit-license]: http://opensource.org/licenses/MIT

File diff suppressed because it is too large Load Diff

@ -1,6 +1,9 @@
use crate::data::{ProcessId, PtySize};
use std::{future::Future, pin::Pin};
use tokio::{io, sync::mpsc};
use std::future::Future;
use std::pin::Pin;
use distant_core::protocol::{ProcessId, PtySize};
use tokio::io;
use tokio::sync::mpsc;
mod pty;
pub use pty::*;

@ -1,24 +1,24 @@
use std::ffi::OsStr;
use std::io::{self, Read, Write};
use std::path::PathBuf;
use std::sync::{Arc, Mutex, Weak};
use distant_core::protocol::Environment;
use log::*;
use portable_pty::{CommandBuilder, MasterPty, PtySize as PortablePtySize};
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use super::{
wait, ExitStatus, FutureReturn, InputChannel, OutputChannel, Process, ProcessId, ProcessKiller,
ProcessPty, PtySize, WaitRx,
};
use crate::{
constants::{MAX_PIPE_CHUNK_SIZE, READ_PAUSE_MILLIS},
data::Environment,
};
use portable_pty::{CommandBuilder, MasterPty, PtySize as PortablePtySize};
use std::{
ffi::OsStr,
io::{self, Read, Write},
path::PathBuf,
sync::{Arc, Mutex},
};
use tokio::{sync::mpsc, task::JoinHandle};
use crate::constants::{MAX_PIPE_CHUNK_SIZE, READ_PAUSE_DURATION};
/// Represents a process that is associated with a pty
pub struct PtyProcess {
id: ProcessId,
pty_master: PtyProcessMaster,
pty_master: Option<Arc<Mutex<Box<dyn MasterPty + Send>>>>,
stdin: Option<Box<dyn InputChannel>>,
stdout: Option<Box<dyn OutputChannel>>,
stdin_task: Option<JoinHandle<()>>,
@ -41,6 +41,8 @@ impl PtyProcess {
I: IntoIterator<Item = S2>,
S2: AsRef<OsStr>,
{
let id = rand::random();
// Establish our new pty for the given size
let pty_system = portable_pty::native_pty_system();
let pty_pair = pty_system
@ -74,7 +76,7 @@ impl PtyProcess {
// Spawn a blocking task to process submitting stdin async
let (stdin_tx, mut stdin_rx) = mpsc::channel::<Vec<u8>>(1);
let mut stdin_writer = pty_master
.try_clone_writer()
.take_writer()
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
let stdin_task = tokio::task::spawn_blocking(move || {
while let Some(input) = stdin_rx.blocking_recv() {
@ -111,28 +113,43 @@ impl PtyProcess {
loop {
match (child.try_wait(), kill_rx.try_recv()) {
(Ok(Some(status)), _) => {
// TODO: Keep track of io error
let _ = wait_tx
trace!(
"Pty process {id} has exited: success = {}",
status.success()
);
if let Err(x) = wait_tx
.send(ExitStatus {
success: status.success(),
code: None,
})
.await;
.await
{
error!("Pty process {id} exit status lost: {x}");
}
break;
}
(_, Ok(_)) => {
// TODO: Keep track of io error
let _ = wait_tx.kill().await;
trace!("Pty process {id} received kill request");
if let Err(x) = wait_tx.kill().await {
error!("Pty process {id} exit status lost: {x}");
}
break;
}
(Err(x), _) => {
// TODO: Keep track of io error
let _ = wait_tx.send(x).await;
trace!("Pty process {id} failed to wait");
if let Err(x) = wait_tx.send(x).await {
error!("Pty process {id} exit status lost: {x}");
}
break;
}
_ => {
tokio::time::sleep(tokio::time::Duration::from_millis(READ_PAUSE_MILLIS))
.await;
tokio::time::sleep(READ_PAUSE_DURATION).await;
continue;
}
}
@ -140,8 +157,8 @@ impl PtyProcess {
});
Ok(Self {
id: rand::random(),
pty_master: PtyProcessMaster(Arc::new(Mutex::new(pty_master))),
id,
pty_master: Some(Arc::new(Mutex::new(pty_master))),
stdin: Some(Box::new(stdin_tx)),
stdout: Some(Box::new(stdout_rx)),
stdin_task: Some(stdin_task),
@ -150,6 +167,14 @@ impl PtyProcess {
wait: wait_rx,
})
}
/// Return a weak reference to the pty master
fn pty_master(&self) -> Weak<Mutex<Box<dyn MasterPty + Send>>> {
self.pty_master
.as_ref()
.map(Arc::downgrade)
.unwrap_or_default()
}
}
impl Process for PtyProcess {
@ -161,6 +186,9 @@ impl Process for PtyProcess {
async fn inner(this: &mut PtyProcess) -> io::Result<ExitStatus> {
let mut status = this.wait.recv().await?;
// Drop our master once we have finished
let _ = this.pty_master.take();
if let Some(task) = this.stdin_task.take() {
task.abort();
}
@ -171,6 +199,7 @@ impl Process for PtyProcess {
if status.success && status.code.is_none() {
status.code = Some(0);
}
Ok(status)
}
Box::pin(inner(self))
@ -250,42 +279,53 @@ impl ProcessKiller for PtyProcessKiller {
impl ProcessPty for PtyProcess {
fn pty_size(&self) -> Option<PtySize> {
self.pty_master.pty_size()
PtyProcessMaster(self.pty_master()).pty_size()
}
fn resize_pty(&self, size: PtySize) -> io::Result<()> {
self.pty_master.resize_pty(size)
PtyProcessMaster(self.pty_master()).resize_pty(size)
}
fn clone_pty(&self) -> Box<dyn ProcessPty> {
self.pty_master.clone_pty()
PtyProcessMaster(self.pty_master()).clone_pty()
}
}
#[derive(Clone)]
pub struct PtyProcessMaster(Arc<Mutex<Box<dyn MasterPty + Send>>>);
struct PtyProcessMaster(Weak<Mutex<Box<dyn MasterPty + Send>>>);
impl ProcessPty for PtyProcessMaster {
fn pty_size(&self) -> Option<PtySize> {
self.0.lock().unwrap().get_size().ok().map(|size| PtySize {
rows: size.rows,
cols: size.cols,
pixel_width: size.pixel_width,
pixel_height: size.pixel_height,
})
}
fn resize_pty(&self, size: PtySize) -> io::Result<()> {
self.0
.lock()
.unwrap()
.resize(PortablePtySize {
if let Some(master) = Weak::upgrade(&self.0) {
master.lock().unwrap().get_size().ok().map(|size| PtySize {
rows: size.rows,
cols: size.cols,
pixel_width: size.pixel_width,
pixel_height: size.pixel_height,
})
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))
} else {
None
}
}
fn resize_pty(&self, size: PtySize) -> io::Result<()> {
if let Some(master) = Weak::upgrade(&self.0) {
master
.lock()
.unwrap()
.resize(PortablePtySize {
rows: size.rows,
cols: size.cols,
pixel_width: size.pixel_width,
pixel_height: size.pixel_height,
})
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))
} else {
Err(io::Error::new(
io::ErrorKind::BrokenPipe,
"Pty master has been dropped",
))
}
}
fn clone_pty(&self) -> Box<dyn ProcessPty> {

@ -1,10 +1,18 @@
use std::ffi::OsStr;
use std::path::PathBuf;
use std::process::Stdio;
use distant_core::protocol::Environment;
use log::*;
use tokio::io;
use tokio::process::Command;
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use super::{
wait, ExitStatus, FutureReturn, InputChannel, NoProcessPty, OutputChannel, Process, ProcessId,
ProcessKiller, WaitRx,
};
use crate::data::Environment;
use std::{ffi::OsStr, path::PathBuf, process::Stdio};
use tokio::{io, process::Command, sync::mpsc, task::JoinHandle};
mod tasks;
@ -34,6 +42,7 @@ impl SimpleProcess {
I: IntoIterator<Item = S2>,
S2: AsRef<OsStr>,
{
let id = rand::random();
let mut child = {
let mut command = Command::new(program);
@ -65,23 +74,44 @@ impl SimpleProcess {
tokio::spawn(async move {
tokio::select! {
_ = kill_rx.recv() => {
trace!("Pty process {id} received kill request");
let status = match child.kill().await {
Ok(_) => ExitStatus::killed(),
Err(x) => ExitStatus::from(x),
};
// TODO: Keep track of io error
let _ = wait_tx.send(status).await;
trace!(
"Simple process {id} has exited: success = {}, code = {}",
status.success,
status.code.map(|code| code.to_string())
.unwrap_or_else(|| "<terminated>".to_string()),
);
if let Err(x) = wait_tx.send(status).await {
error!("Simple process {id} exit status lost: {x}");
}
}
status = child.wait() => {
// TODO: Keep track of io error
let _ = wait_tx.send(status).await;
match &status {
Ok(status) => trace!(
"Simple process {id} has exited: success = {}, code = {}",
status.success(),
status.code()
.map(|code| code.to_string())
.unwrap_or_else(|| "<terminated>".to_string()),
),
Err(_) => trace!("Simple process {id} failed to wait"),
}
if let Err(x) = wait_tx.send(status).await {
error!("Simple process {id} exit status lost: {x}");
}
}
}
});
Ok(Self {
id: rand::random(),
id,
stdin: Some(Box::new(stdin_ch)),
stdout: Some(Box::new(stdout_ch)),
stderr: Some(Box::new(stderr_ch)),

@ -1,10 +1,10 @@
use crate::constants::{MAX_PIPE_CHUNK_SIZE, READ_PAUSE_MILLIS};
use std::io;
use tokio::{
io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt},
sync::mpsc,
task::JoinHandle,
};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use crate::constants::{MAX_PIPE_CHUNK_SIZE, READ_PAUSE_DURATION};
pub fn spawn_read_task<R>(
reader: R,
@ -34,13 +34,13 @@ where
// Pause to allow buffer to fill up a little bit, avoiding
// spamming with a lot of smaller responses
tokio::time::sleep(tokio::time::Duration::from_millis(READ_PAUSE_MILLIS)).await;
tokio::time::sleep(READ_PAUSE_DURATION).await;
}
Ok(_) => return Ok(()),
Err(x) if x.kind() == io::ErrorKind::WouldBlock => {
// Pause to allow buffer to fill up a little bit, avoiding
// spamming with a lot of smaller responses
tokio::time::sleep(tokio::time::Duration::from_millis(READ_PAUSE_MILLIS)).await;
tokio::time::sleep(READ_PAUSE_DURATION).await;
}
Err(x) => return Err(x),
}

@ -1,4 +1,5 @@
use tokio::{io, sync::mpsc};
use tokio::io;
use tokio::sync::mpsc;
/// Exit status of a remote process
#[derive(Copy, Clone, Debug, PartialEq, Eq)]

@ -0,0 +1,36 @@
use std::io;
use crate::config::Config;
mod process;
pub use process::*;
mod search;
pub use search::*;
mod watcher;
pub use watcher::*;
/// Holds global state state managed by the server
pub struct GlobalState {
/// State that holds information about processes running on the server
pub process: ProcessState,
/// State that holds information about searches running on the server
pub search: SearchState,
/// Watcher used for filesystem events
pub watcher: WatcherState,
}
impl GlobalState {
pub fn initialize(config: Config) -> io::Result<Self> {
Ok(Self {
process: ProcessState::new(),
search: SearchState::new(),
watcher: WatcherBuilder::new()
.with_config(config.watch)
.initialize()?,
})
}
}

@ -1,22 +1,24 @@
use crate::data::{DistantResponseData, Environment, ProcessId, PtySize};
use distant_net::Reply;
use std::{collections::HashMap, io, ops::Deref, path::PathBuf};
use tokio::{
sync::{mpsc, oneshot},
task::JoinHandle,
};
use std::collections::HashMap;
use std::io;
use std::ops::Deref;
use std::path::PathBuf;
use distant_core::net::server::Reply;
use distant_core::protocol::{Environment, ProcessId, PtySize, Response};
use tokio::sync::{mpsc, oneshot};
use tokio::task::JoinHandle;
mod instance;
pub use instance::*;
/// Holds information related to spawned processes on the server
/// Holds information related to spawned processes on the server.
pub struct ProcessState {
channel: ProcessChannel,
task: JoinHandle<()>,
}
impl Drop for ProcessState {
/// Aborts the task that handles process operations and management
/// Aborts the task that handles process operations and management.
fn drop(&mut self) {
self.abort();
}
@ -33,10 +35,6 @@ impl ProcessState {
}
}
pub fn clone_channel(&self) -> ProcessChannel {
self.channel.clone()
}
/// Aborts the process task
pub fn abort(&self) {
self.task.abort();
@ -57,7 +55,7 @@ pub struct ProcessChannel {
}
impl Default for ProcessChannel {
/// Creates a new channel that is closed by default
/// Creates a new channel that is closed by default.
fn default() -> Self {
let (tx, _) = mpsc::channel(1);
Self { tx }
@ -65,15 +63,14 @@ impl Default for ProcessChannel {
}
impl ProcessChannel {
/// Spawns a new process, returning the id associated with it
/// Spawns a new process, returning the id associated with it.
pub async fn spawn(
&self,
cmd: String,
environment: Environment,
current_dir: Option<PathBuf>,
persist: bool,
pty: Option<PtySize>,
reply: Box<dyn Reply<Data = DistantResponseData>>,
reply: Box<dyn Reply<Data = Response>>,
) -> io::Result<ProcessId> {
let (cb, rx) = oneshot::channel();
self.tx
@ -81,7 +78,6 @@ impl ProcessChannel {
cmd,
environment,
current_dir,
persist,
pty,
reply,
cb,
@ -92,7 +88,7 @@ impl ProcessChannel {
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Response to spawn dropped"))?
}
/// Resizes the pty of a running process
/// Resizes the pty of a running process.
pub async fn resize_pty(&self, id: ProcessId, size: PtySize) -> io::Result<()> {
let (cb, rx) = oneshot::channel();
self.tx
@ -103,7 +99,7 @@ impl ProcessChannel {
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Response to resize dropped"))?
}
/// Send stdin to a running process
/// Send stdin to a running process.
pub async fn send_stdin(&self, id: ProcessId, data: Vec<u8>) -> io::Result<()> {
let (cb, rx) = oneshot::channel();
self.tx
@ -114,7 +110,8 @@ impl ProcessChannel {
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Response to stdin dropped"))?
}
/// Kills a running process
/// Kills a running process, including persistent processes if `force` is true. Will fail if
/// unable to kill the process or `force` is false when the process is persistent.
pub async fn kill(&self, id: ProcessId) -> io::Result<()> {
let (cb, rx) = oneshot::channel();
self.tx
@ -126,15 +123,14 @@ impl ProcessChannel {
}
}
/// Internal message to pass to our task below to perform some action
/// Internal message to pass to our task below to perform some action.
enum InnerProcessMsg {
Spawn {
cmd: String,
environment: Environment,
current_dir: Option<PathBuf>,
persist: bool,
pty: Option<PtySize>,
reply: Box<dyn Reply<Data = DistantResponseData>>,
reply: Box<dyn Reply<Data = Response>>,
cb: oneshot::Sender<io::Result<ProcessId>>,
},
Resize {
@ -165,14 +161,12 @@ async fn process_task(tx: mpsc::Sender<InnerProcessMsg>, mut rx: mpsc::Receiver<
cmd,
environment,
current_dir,
persist,
pty,
reply,
cb,
} => {
let _ = cb.send(
match ProcessInstance::spawn(cmd, environment, current_dir, persist, pty, reply)
{
match ProcessInstance::spawn(cmd, environment, current_dir, pty, reply) {
Ok(mut process) => {
let id = process.id;
@ -195,7 +189,7 @@ async fn process_task(tx: mpsc::Sender<InnerProcessMsg>, mut rx: mpsc::Receiver<
Some(process) => process.pty.resize_pty(size),
None => Err(io::Error::new(
io::ErrorKind::Other,
format!("No process found with id {}", id),
format!("No process found with id {id}"),
)),
});
}
@ -205,12 +199,12 @@ async fn process_task(tx: mpsc::Sender<InnerProcessMsg>, mut rx: mpsc::Receiver<
Some(stdin) => stdin.send(&data).await,
None => Err(io::Error::new(
io::ErrorKind::Other,
format!("Process {} stdin is closed", id),
format!("Process {id} stdin is closed"),
)),
},
None => Err(io::Error::new(
io::ErrorKind::Other,
format!("No process found with id {}", id),
format!("No process found with id {id}"),
)),
});
}
@ -219,7 +213,7 @@ async fn process_task(tx: mpsc::Sender<InnerProcessMsg>, mut rx: mpsc::Receiver<
Some(process) => process.killer.kill().await,
None => Err(io::Error::new(
io::ErrorKind::Other,
format!("No process found with id {}", id),
format!("No process found with id {id}"),
)),
});
}

@ -1,19 +1,20 @@
use crate::{
api::local::process::{
InputChannel, OutputChannel, Process, ProcessKiller, ProcessPty, PtyProcess, SimpleProcess,
},
data::{DistantResponseData, Environment, ProcessId, PtySize},
};
use distant_net::Reply;
use std::future::Future;
use std::io;
use std::path::PathBuf;
use distant_core::net::server::Reply;
use distant_core::protocol::{Environment, ProcessId, PtySize, Response};
use log::*;
use std::{future::Future, io, path::PathBuf};
use tokio::task::JoinHandle;
use crate::api::process::{
InputChannel, OutputChannel, Process, ProcessKiller, ProcessPty, PtyProcess, SimpleProcess,
};
/// Holds information related to a spawned process on the server
pub struct ProcessInstance {
pub cmd: String,
pub args: Vec<String>,
pub persist: bool,
pub id: ProcessId,
pub stdin: Option<Box<dyn InputChannel>>,
@ -63,9 +64,8 @@ impl ProcessInstance {
cmd: String,
environment: Environment,
current_dir: Option<PathBuf>,
persist: bool,
pty: Option<PtySize>,
reply: Box<dyn Reply<Data = DistantResponseData>>,
reply: Box<dyn Reply<Data = Response>>,
) -> io::Result<Self> {
// Build out the command and args from our string
let mut cmd_and_args = if cfg!(windows) {
@ -85,6 +85,7 @@ impl ProcessInstance {
let args = cmd_and_args.split_off(1);
let cmd = cmd_and_args.into_iter().next().unwrap();
debug!("Spawning process: {cmd} {args:?}");
let mut child: Box<dyn Process> = match pty {
Some(size) => Box::new(PtyProcess::spawn(
cmd.clone(),
@ -135,7 +136,6 @@ impl ProcessInstance {
Ok(ProcessInstance {
cmd,
args,
persist,
id,
stdin,
killer,
@ -169,17 +169,12 @@ impl ProcessInstance {
async fn stdout_task(
id: ProcessId,
mut stdout: Box<dyn OutputChannel>,
reply: Box<dyn Reply<Data = DistantResponseData>>,
reply: Box<dyn Reply<Data = Response>>,
) -> io::Result<()> {
loop {
match stdout.recv().await {
Ok(Some(data)) => {
if let Err(x) = reply
.send(DistantResponseData::ProcStdout { id, data })
.await
{
return Err(x);
}
reply.send(Response::ProcStdout { id, data })?;
}
Ok(None) => return Ok(()),
Err(x) => return Err(x),
@ -190,17 +185,12 @@ async fn stdout_task(
async fn stderr_task(
id: ProcessId,
mut stderr: Box<dyn OutputChannel>,
reply: Box<dyn Reply<Data = DistantResponseData>>,
reply: Box<dyn Reply<Data = Response>>,
) -> io::Result<()> {
loop {
match stderr.recv().await {
Ok(Some(data)) => {
if let Err(x) = reply
.send(DistantResponseData::ProcStderr { id, data })
.await
{
return Err(x);
}
reply.send(Response::ProcStderr { id, data })?;
}
Ok(None) => return Ok(()),
Err(x) => return Err(x),
@ -211,20 +201,16 @@ async fn stderr_task(
async fn wait_task(
id: ProcessId,
mut child: Box<dyn Process>,
reply: Box<dyn Reply<Data = DistantResponseData>>,
reply: Box<dyn Reply<Data = Response>>,
) -> io::Result<()> {
let status = child.wait().await;
match status {
Ok(status) => {
reply
.send(DistantResponseData::ProcDone {
id,
success: status.success,
code: status.code,
})
.await
}
Err(x) => reply.send(DistantResponseData::from(x)).await,
Ok(status) => reply.send(Response::ProcDone {
id,
success: status.success,
code: status.code,
}),
Err(x) => reply.send(Response::from(x)),
}
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,429 @@
use std::collections::HashMap;
use std::io;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use distant_core::net::common::ConnectionId;
use distant_core::protocol::{Change, ChangeDetails, ChangeDetailsAttribute, ChangeKind};
use log::*;
use notify::event::{AccessKind, AccessMode, MetadataKind, ModifyKind, RenameMode};
use notify::{
Config as WatcherConfig, Error as WatcherError, ErrorKind as WatcherErrorKind,
Event as WatcherEvent, EventKind, PollWatcher, RecommendedWatcher, RecursiveMode, Watcher,
};
use notify_debouncer_full::{new_debouncer_opt, DebounceEventResult, Debouncer, FileIdMap};
use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::{mpsc, oneshot};
use tokio::task::JoinHandle;
use crate::config::WatchConfig;
use crate::constants::SERVER_WATCHER_CAPACITY;
mod path;
pub use path::*;
/// Builder for a watcher.
#[derive(Default)]
pub struct WatcherBuilder {
config: WatchConfig,
}
impl WatcherBuilder {
/// Creates a new builder configured to use the native watcher using default configuration.
pub fn new() -> Self {
Self::default()
}
/// Swaps the configuration with the provided one.
pub fn with_config(self, config: WatchConfig) -> Self {
Self { config }
}
/// Will create a watcher and initialize watched paths to be empty
pub fn initialize(self) -> io::Result<WatcherState> {
// NOTE: Cannot be something small like 1 as this seems to cause a deadlock sometimes
// with a large volume of watch requests
let (tx, rx) = mpsc::channel(SERVER_WATCHER_CAPACITY);
let watcher_config = WatcherConfig::default()
.with_compare_contents(self.config.compare_contents)
.with_poll_interval(self.config.poll_interval.unwrap_or(Duration::from_secs(30)));
macro_rules! process_event {
($tx:ident, $evt:expr) => {
match $tx.try_send(match $evt {
Ok(x) => InnerWatcherMsg::Event { ev: x },
Err(x) => InnerWatcherMsg::Error { err: x },
}) {
Ok(_) => (),
Err(TrySendError::Full(_)) => {
warn!(
"Reached watcher capacity of {}! Dropping watcher event!",
SERVER_WATCHER_CAPACITY,
);
}
Err(TrySendError::Closed(_)) => {
warn!("Skipping watch event because watcher channel closed");
}
}
};
}
macro_rules! new_debouncer {
($watcher:ident, $tx:ident) => {{
new_debouncer_opt::<_, $watcher, FileIdMap>(
self.config.debounce_timeout,
self.config.debounce_tick_rate,
move |result: DebounceEventResult| match result {
Ok(events) => {
for x in events {
process_event!($tx, Ok(x));
}
}
Err(errors) => {
for x in errors {
process_event!($tx, Err(x));
}
}
},
FileIdMap::new(),
watcher_config,
)
}};
}
macro_rules! spawn_task {
($debouncer:expr) => {{
WatcherState {
channel: WatcherChannel { tx },
task: tokio::spawn(watcher_task($debouncer, rx)),
}
}};
}
let tx = tx.clone();
if self.config.native {
let result = {
let tx = tx.clone();
new_debouncer!(RecommendedWatcher, tx)
};
match result {
Ok(debouncer) => Ok(spawn_task!(debouncer)),
Err(x) => {
match x.kind {
// notify-rs has a bug on Mac M1 with Docker and Linux, so we detect that error
// and fall back to the poll watcher if this occurs
//
// https://github.com/notify-rs/notify/issues/423
WatcherErrorKind::Io(x) if x.raw_os_error() == Some(38) => {
warn!("Recommended watcher is unsupported! Falling back to polling watcher!");
Ok(spawn_task!(new_debouncer!(PollWatcher, tx)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?))
}
_ => Err(io::Error::new(io::ErrorKind::Other, x)),
}
}
}
} else {
Ok(spawn_task!(new_debouncer!(PollWatcher, tx)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?))
}
}
}
/// Holds information related to watched paths on the server
pub struct WatcherState {
channel: WatcherChannel,
task: JoinHandle<()>,
}
impl Drop for WatcherState {
/// Aborts the task that handles watcher path operations and management
fn drop(&mut self) {
self.abort();
}
}
impl WatcherState {
/// Aborts the watcher task
pub fn abort(&self) {
self.task.abort();
}
}
impl Deref for WatcherState {
type Target = WatcherChannel;
fn deref(&self) -> &Self::Target {
&self.channel
}
}
#[derive(Clone)]
pub struct WatcherChannel {
tx: mpsc::Sender<InnerWatcherMsg>,
}
impl Default for WatcherChannel {
/// Creates a new channel that is closed by default
fn default() -> Self {
let (tx, _) = mpsc::channel(1);
Self { tx }
}
}
impl WatcherChannel {
/// Watch a path for a specific connection denoted by the id within the registered path
pub async fn watch(&self, registered_path: RegisteredPath) -> io::Result<()> {
let (cb, rx) = oneshot::channel();
self.tx
.send(InnerWatcherMsg::Watch {
registered_path,
cb,
})
.await
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Internal watcher task closed"))?;
rx.await
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Response to watch dropped"))?
}
/// Unwatch a path for a specific connection denoted by the id
pub async fn unwatch(&self, id: ConnectionId, path: impl AsRef<Path>) -> io::Result<()> {
let (cb, rx) = oneshot::channel();
let path = tokio::fs::canonicalize(path.as_ref())
.await
.unwrap_or_else(|_| path.as_ref().to_path_buf());
self.tx
.send(InnerWatcherMsg::Unwatch { id, path, cb })
.await
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Internal watcher task closed"))?;
rx.await
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Response to unwatch dropped"))?
}
}
/// Internal message to pass to our task below to perform some action
enum InnerWatcherMsg {
Watch {
registered_path: RegisteredPath,
cb: oneshot::Sender<io::Result<()>>,
},
Unwatch {
id: ConnectionId,
path: PathBuf,
cb: oneshot::Sender<io::Result<()>>,
},
Event {
ev: WatcherEvent,
},
Error {
err: WatcherError,
},
}
async fn watcher_task<W>(
mut debouncer: Debouncer<W, FileIdMap>,
mut rx: mpsc::Receiver<InnerWatcherMsg>,
) where
W: Watcher,
{
// TODO: Optimize this in some way to be more performant than
// checking every path whenever an event comes in
let mut registered_paths: Vec<RegisteredPath> = Vec::new();
let mut path_cnt: HashMap<PathBuf, usize> = HashMap::new();
while let Some(msg) = rx.recv().await {
match msg {
InnerWatcherMsg::Watch {
registered_path,
cb,
} => {
// Check if we are tracking the path across any connection
if let Some(cnt) = path_cnt.get_mut(registered_path.path()) {
// Increment the count of times we are watching that path
*cnt += 1;
// Store the registered path in our collection without worry
// since we are already watching a path that impacts this one
registered_paths.push(registered_path);
// Send an okay because we always succeed in this case
let _ = cb.send(Ok(()));
} else {
let res = debouncer
.watcher()
.watch(
registered_path.path(),
if registered_path.is_recursive() {
RecursiveMode::Recursive
} else {
RecursiveMode::NonRecursive
},
)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x));
// If we succeeded, store our registered path and set the tracking cnt to 1
if res.is_ok() {
path_cnt.insert(registered_path.path().to_path_buf(), 1);
registered_paths.push(registered_path);
}
// Send the result of the watch, but don't worry if the channel was closed
let _ = cb.send(res);
}
}
InnerWatcherMsg::Unwatch { id, path, cb } => {
// Check if we are tracking the path across any connection
if let Some(cnt) = path_cnt.get(path.as_path()) {
// Cycle through and remove all paths that match the given id and path,
// capturing how many paths we removed
let removed_cnt = {
let old_len = registered_paths.len();
registered_paths
.retain(|p| p.id() != id || (p.path() != path && p.raw_path() != path));
let new_len = registered_paths.len();
old_len - new_len
};
// 1. If we are now at zero cnt for our path, we want to actually unwatch the
// path with our watcher
// 2. If we removed nothing from our path list, we want to return an error
// 3. Otherwise, we return okay because we succeeded
if *cnt <= removed_cnt {
let _ = cb.send(
debouncer
.watcher()
.unwatch(&path)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x)),
);
} else if removed_cnt == 0 {
// Send a failure as there was nothing to unwatch for this connection
let _ = cb.send(Err(io::Error::new(
io::ErrorKind::Other,
format!("{path:?} is not being watched"),
)));
} else {
// Send a success as we removed some paths
let _ = cb.send(Ok(()));
}
} else {
// Send a failure as there was nothing to unwatch
let _ = cb.send(Err(io::Error::new(
io::ErrorKind::Other,
format!("{path:?} is not being watched"),
)));
}
}
InnerWatcherMsg::Event { ev } => {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("System time before unix epoch")
.as_secs();
let kind = match ev.kind {
EventKind::Access(AccessKind::Read) => ChangeKind::Access,
EventKind::Modify(ModifyKind::Metadata(_)) => ChangeKind::Attribute,
EventKind::Access(AccessKind::Close(AccessMode::Write)) => {
ChangeKind::CloseWrite
}
EventKind::Access(AccessKind::Close(_)) => ChangeKind::CloseNoWrite,
EventKind::Create(_) => ChangeKind::Create,
EventKind::Remove(_) => ChangeKind::Delete,
EventKind::Modify(ModifyKind::Data(_)) => ChangeKind::Modify,
EventKind::Access(AccessKind::Open(_)) => ChangeKind::Open,
EventKind::Modify(ModifyKind::Name(_)) => ChangeKind::Rename,
_ => ChangeKind::Unknown,
};
for registered_path in registered_paths.iter() {
// For rename both, we assume the paths is a pair that represents before and
// after, so we want to grab the before and use it!
let (paths, renamed): (&[PathBuf], Option<PathBuf>) = match ev.kind {
EventKind::Modify(ModifyKind::Name(RenameMode::Both)) => (
&ev.paths[0..1],
if ev.paths.len() > 1 {
ev.paths.last().cloned()
} else {
None
},
),
_ => (&ev.paths, None),
};
for path in paths {
let attribute = match ev.kind {
EventKind::Modify(ModifyKind::Metadata(MetadataKind::Ownership)) => {
Some(ChangeDetailsAttribute::Ownership)
}
EventKind::Modify(ModifyKind::Metadata(MetadataKind::Permissions)) => {
Some(ChangeDetailsAttribute::Permissions)
}
EventKind::Modify(ModifyKind::Metadata(MetadataKind::WriteTime)) => {
Some(ChangeDetailsAttribute::Timestamp)
}
_ => None,
};
// Calculate a timestamp for creation & modification paths
let details_timestamp = match ev.kind {
EventKind::Create(_) => tokio::fs::symlink_metadata(path.as_path())
.await
.ok()
.and_then(|m| m.created().ok())
.and_then(|t| t.duration_since(UNIX_EPOCH).ok())
.map(|d| d.as_secs()),
EventKind::Modify(_) => tokio::fs::symlink_metadata(path.as_path())
.await
.ok()
.and_then(|m| m.modified().ok())
.and_then(|t| t.duration_since(UNIX_EPOCH).ok())
.map(|d| d.as_secs()),
_ => None,
};
let change = Change {
timestamp,
kind,
path: path.to_path_buf(),
details: ChangeDetails {
attribute,
renamed: renamed.clone(),
timestamp: details_timestamp,
extra: ev.info().map(ToString::to_string),
},
};
match registered_path.filter_and_send(change) {
Ok(_) => (),
Err(x) => error!(
"[Conn {}] Failed to forward changes to paths: {}",
registered_path.id(),
x
),
}
}
}
}
InnerWatcherMsg::Error { err } => {
let msg = err.to_string();
error!("Watcher encountered an error {} for {:?}", msg, err.paths);
for registered_path in registered_paths.iter() {
match registered_path.filter_and_send_error(
&msg,
&err.paths,
!err.paths.is_empty(),
) {
Ok(_) => (),
Err(x) => error!(
"[Conn {}] Failed to forward changes to paths: {}",
registered_path.id(),
x
),
}
}
}
}
}
}

@ -1,14 +1,10 @@
use crate::{
data::{Change, ChangeKind, ChangeKindSet, DistantResponseData, Error},
ConnectionId,
};
use distant_net::Reply;
use std::{
fmt,
hash::{Hash, Hasher},
io,
path::{Path, PathBuf},
};
use std::hash::{Hash, Hasher};
use std::path::{Path, PathBuf};
use std::{fmt, io};
use distant_core::net::common::ConnectionId;
use distant_core::net::server::Reply;
use distant_core::protocol::{Change, ChangeKindSet, Error, Response};
/// Represents a path registered with a watcher that includes relevant state including
/// the ability to reply with
@ -32,7 +28,7 @@ pub struct RegisteredPath {
allowed: ChangeKindSet,
/// Used to send a reply through the connection watching this path
reply: Box<dyn Reply<Data = DistantResponseData>>,
reply: Box<dyn Reply<Data = Response>>,
}
impl fmt::Debug for RegisteredPath {
@ -72,7 +68,7 @@ impl RegisteredPath {
recursive: bool,
only: impl Into<ChangeKindSet>,
except: impl Into<ChangeKindSet>,
reply: Box<dyn Reply<Data = DistantResponseData>>,
reply: Box<dyn Reply<Data = Response>>,
) -> io::Result<Self> {
let raw_path = path.into();
let path = tokio::fs::canonicalize(raw_path.as_path()).await?;
@ -123,39 +119,27 @@ impl RegisteredPath {
}
/// Sends a reply for a change tied to this registered path, filtering
/// out any paths that are not applicable
/// out any changes that are not applicable.
///
/// Returns true if message was sent, and false if not
pub async fn filter_and_send<T>(&self, kind: ChangeKind, paths: T) -> io::Result<bool>
where
T: IntoIterator,
T::Item: AsRef<Path>,
{
if !self.allowed().contains(&kind) {
/// Returns true if message was sent, and false if not.
pub fn filter_and_send(&self, change: Change) -> io::Result<bool> {
if !self.allowed().contains(&change.kind) {
return Ok(false);
}
let paths: Vec<PathBuf> = paths
.into_iter()
.filter(|p| self.applies_to_path(p.as_ref()))
.map(|p| p.as_ref().to_path_buf())
.collect();
if !paths.is_empty() {
self.reply
.send(DistantResponseData::Changed(Change { kind, paths }))
.await
.map(|_| true)
// Only send if this registered path applies to the changed path
if self.applies_to_path(&change.path) {
self.reply.send(Response::Changed(change)).map(|_| true)
} else {
Ok(false)
}
}
/// Sends an error message and includes paths if provided, skipping sending the message if
/// no paths match and `skip_if_no_paths` is true
/// no paths match and `skip_if_no_paths` is true.
///
/// Returns true if message was sent, and false if not
pub async fn filter_and_send_error<T>(
/// Returns true if message was sent, and false if not.
pub fn filter_and_send_error<T>(
&self,
msg: &str,
paths: T,
@ -174,11 +158,10 @@ impl RegisteredPath {
if !paths.is_empty() || !skip_if_no_paths {
self.reply
.send(if paths.is_empty() {
DistantResponseData::Error(Error::from(msg))
Response::Error(Error::from(msg))
} else {
DistantResponseData::Error(Error::from(format!("{} about {:?}", msg, paths)))
Response::Error(Error::from(format!("{msg} about {paths:?}")))
})
.await
.map(|_| true)
} else {
Ok(false)

@ -0,0 +1,28 @@
use std::time::Duration;
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct Config {
pub watch: WatchConfig,
}
/// Configuration specifically for watching files and directories.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchConfig {
pub native: bool,
pub poll_interval: Option<Duration>,
pub compare_contents: bool,
pub debounce_timeout: Duration,
pub debounce_tick_rate: Option<Duration>,
}
impl Default for WatchConfig {
fn default() -> Self {
Self {
native: true,
poll_interval: None,
compare_contents: false,
debounce_timeout: Duration::from_millis(500),
debounce_tick_rate: None,
}
}
}

@ -0,0 +1,14 @@
use std::time::Duration;
/// Capacity associated with the server's file watcher to pass events outbound
pub const SERVER_WATCHER_CAPACITY: usize = 10000;
/// Represents the maximum size (in bytes) that data will be read from pipes
/// per individual `read` call
///
/// Current setting is 16k size
pub const MAX_PIPE_CHUNK_SIZE: usize = 16384;
/// Duration in milliseconds to sleep between reading stdout/stderr chunks
/// to avoid sending many small messages to clients
pub const READ_PAUSE_DURATION: Duration = Duration::from_millis(1);

@ -0,0 +1,20 @@
#![doc = include_str!("../README.md")]
#[doc = include_str!("../README.md")]
#[cfg(doctest)]
pub struct ReadmeDoctests;
mod api;
mod config;
mod constants;
pub use api::Api;
pub use config::*;
use distant_core::DistantApiServerHandler;
/// Implementation of [`DistantApiServerHandler`] using [`Api`].
pub type Handler = DistantApiServerHandler<Api>;
/// Initializes a new [`Handler`].
pub fn new_handler(config: Config) -> std::io::Result<Handler> {
Ok(Handler::new(Api::initialize(config)?))
}

@ -0,0 +1,65 @@
use assert_fs::prelude::*;
use distant_core::DistantChannelExt;
use rstest::*;
use test_log::test;
use crate::stress::fixtures::*;
// 64KB is maximum TCP packet size
const MAX_TCP_PACKET_BYTES: usize = 65535;
// 640KB should be big enough to cause problems
const LARGE_FILE_LEN: usize = MAX_TCP_PACKET_BYTES * 10;
#[rstest]
#[test(tokio::test)]
async fn should_handle_large_files(#[future] ctx: DistantClientCtx) {
let ctx = ctx.await;
let mut channel = ctx.client.clone_channel();
let root = assert_fs::TempDir::new().unwrap();
// Generate data
eprintln!("Creating random data of size: {LARGE_FILE_LEN}");
let mut data = Vec::with_capacity(LARGE_FILE_LEN);
for i in 0..LARGE_FILE_LEN {
data.push(i as u8);
}
// Create our large file to read, write, and append
let file = root.child("large_file.dat");
eprintln!("Writing random file: {:?}", file.path());
file.write_binary(&data)
.expect("Failed to write large file");
// Perform the read
eprintln!("Reading file using distant");
let mut new_data = channel
.read_file(file.path())
.await
.expect("Failed to read large file");
assert_eq!(new_data, data, "Data mismatch");
// Perform the write after modifying one byte
eprintln!("Writing file using distant");
new_data[LARGE_FILE_LEN - 1] = new_data[LARGE_FILE_LEN - 1].overflowing_add(1).0;
channel
.write_file(file.path(), new_data.clone())
.await
.expect("Failed to write large file");
let data = tokio::fs::read(file.path())
.await
.expect("Failed to read large file");
assert_eq!(new_data, data, "Data was not written correctly");
// Perform append
eprintln!("Appending to file using distant");
channel
.append_file(file.path(), vec![1, 2, 3])
.await
.expect("Failed to append to large file");
let new_data = tokio::fs::read(file.path())
.await
.expect("Failed to read large file");
assert_eq!(new_data[new_data.len() - 3..], [1, 2, 3]);
}

@ -0,0 +1,2 @@
mod large_file;
mod watch;

@ -1,12 +1,15 @@
use crate::stress::fixtures::*;
use assert_fs::prelude::*;
use distant_core::{data::ChangeKindSet, DistantChannelExt};
use distant_core::protocol::{ChangeKind, ChangeKindSet};
use distant_core::DistantChannelExt;
use rstest::*;
use test_log::test;
use crate::stress::fixtures::*;
const MAX_FILES: usize = 500;
#[rstest]
#[tokio::test]
#[test(tokio::test)]
#[ignore]
async fn should_handle_large_volume_of_file_watching(#[future] ctx: DistantClientCtx) {
let ctx = ctx.await;
@ -26,7 +29,7 @@ async fn should_handle_large_volume_of_file_watching(#[future] ctx: DistantClien
.watch(
file.path(),
false,
ChangeKindSet::modify_set(),
ChangeKindSet::new([ChangeKind::Modify]),
ChangeKindSet::empty(),
)
.await

@ -0,0 +1,70 @@
use std::net::SocketAddr;
use std::time::Duration;
use distant_core::net::auth::{DummyAuthHandler, Verifier};
use distant_core::net::client::{Client, TcpConnector};
use distant_core::net::common::PortRange;
use distant_core::net::server::Server;
use distant_core::{DistantApiServerHandler, DistantClient};
use distant_local::Api;
use rstest::*;
use tokio::sync::mpsc;
pub struct DistantClientCtx {
pub client: DistantClient,
_done_tx: mpsc::Sender<()>,
}
impl DistantClientCtx {
pub async fn initialize() -> Self {
let ip_addr = "127.0.0.1".parse().unwrap();
let (done_tx, mut done_rx) = mpsc::channel::<()>(1);
let (started_tx, mut started_rx) = mpsc::channel::<u16>(1);
tokio::spawn(async move {
if let Ok(api) = Api::initialize(Default::default()) {
let port: PortRange = "0".parse().unwrap();
let port = {
let handler = DistantApiServerHandler::new(api);
let server_ref = Server::new()
.handler(handler)
.verifier(Verifier::none())
.into_tcp_builder()
.start(ip_addr, port)
.await
.unwrap();
server_ref.port()
};
started_tx.send(port).await.unwrap();
let _ = done_rx.recv().await;
}
});
// Extract our server startup data if we succeeded
let port = started_rx.recv().await.unwrap();
// Now initialize our client
let client: DistantClient = Client::build()
.auth_handler(DummyAuthHandler)
.connect_timeout(Duration::from_secs(1))
.connector(TcpConnector::new(
format!("{}:{}", ip_addr, port)
.parse::<SocketAddr>()
.unwrap(),
))
.connect()
.await
.unwrap();
DistantClientCtx {
client,
_done_tx: done_tx,
}
}
}
#[fixture]
pub async fn ctx() -> DistantClientCtx {
DistantClientCtx::initialize().await
}

@ -3,7 +3,7 @@ name = "distant-net"
description = "Network library for distant, providing implementations to support client/server architecture"
categories = ["network-programming"]
keywords = ["api", "async"]
version = "0.17.5"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -12,26 +12,33 @@ readme = "README.md"
license = "MIT OR Apache-2.0"
[dependencies]
async-trait = "0.1.57"
bytes = "1.2.1"
chacha20poly1305 = "0.10.0"
async-trait = "0.1.68"
bytes = "1.4.0"
chacha20poly1305 = "0.10.1"
const-str = "0.5.6"
derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] }
futures = "0.3.21"
distant-auth = { version = "=0.20.0", path = "../distant-auth" }
dyn-clone = "1.0.11"
flate2 = "1.0.26"
hex = "0.4.3"
hkdf = "0.12.3"
log = "0.4.17"
paste = "1.0.8"
p256 = { version = "0.11.1", features = ["ecdh", "pem"] }
log = "0.4.18"
paste = "1.0.12"
p256 = { version = "0.13.2", features = ["ecdh", "pem"] }
rand = { version = "0.8.5", features = ["getrandom"] }
rmp-serde = "1.1.0"
sha2 = "0.10.2"
serde = { version = "1.0.142", features = ["derive"] }
serde_bytes = "0.11.7"
tokio = { version = "1.20.1", features = ["full"] }
tokio-util = { version = "0.7.3", features = ["codec"] }
# Optional dependencies based on features
schemars = { version = "0.8.10", optional = true }
rmp = "0.8.11"
rmp-serde = "1.1.1"
sha2 = "0.10.6"
semver = { version = "1.0.17", features = ["serde"] }
serde = { version = "1.0.163", features = ["derive"] }
serde_bytes = "0.11.9"
serde_json = "1.0.96"
strum = { version = "0.24.1", features = ["derive"] }
tokio = { version = "1.28.2", features = ["full"] }
[dev-dependencies]
tempfile = "3.3.0"
distant-auth = { version = "=0.20.0", path = "../distant-auth", features = ["tests"] }
env_logger = "0.10.0"
serde_json = "1.0.96"
tempfile = "3.5.0"
test-log = "0.2.11"

@ -1,18 +1,13 @@
# distant net
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.61.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-net.svg
[distant_crates_lnk]: https://crates.io/crates/distant-net
[distant_doc_img]: https://docs.rs/distant-net/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-net
[distant_rustc_img]: https://img.shields.io/badge/distant_net-rustc_1.61+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2022/05/19/Rust-1.61.0.html
Library that powers the [`distant`](https://github.com/chipsenkbeil/distant)
binary.
🚧 **(Alpha stage software) This library is in rapid development and may break or change frequently!** 🚧
[distant_rustc_img]: https://img.shields.io/badge/distant_net-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details
@ -25,18 +20,9 @@ You can import the dependency by adding the following to your `Cargo.toml`:
```toml
[dependencies]
distant-net = "0.17"
distant-net = "0.20"
```
## Features
Currently, the library supports the following features:
- `schemars`: derives the `schemars::JsonSchema` interface on `Request`
and `Response` data types
By default, no features are enabled on the library.
## License
This project is licensed under either of

@ -1,122 +0,0 @@
use derive_more::Display;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
mod client;
pub use client::*;
mod handshake;
pub use handshake::*;
mod server;
pub use server::*;
/// Represents authentication messages that can be sent over the wire
///
/// NOTE: Must use serde's content attribute with the tag attribute. Just the tag attribute will
/// cause deserialization to fail
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", tag = "type", content = "data")]
pub enum Auth {
/// Represents a request to perform an authentication handshake,
/// providing the public key and salt from one side in order to
/// derive the shared key
#[serde(rename = "auth_handshake")]
Handshake {
/// Bytes of the public key
#[serde(with = "serde_bytes")]
public_key: PublicKeyBytes,
/// Randomly generated salt
#[serde(with = "serde_bytes")]
salt: Salt,
},
/// Represents the bytes of an encrypted message
///
/// Underneath, will be one of either [`AuthRequest`] or [`AuthResponse`]
#[serde(rename = "auth_msg")]
Msg {
#[serde(with = "serde_bytes")]
encrypted_payload: Vec<u8>,
},
}
/// Represents authentication messages that act as initiators such as providing
/// a challenge, verifying information, presenting information, or highlighting an error
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", tag = "type")]
pub enum AuthRequest {
/// Represents a challenge comprising a series of questions to be presented
Challenge {
questions: Vec<AuthQuestion>,
extra: HashMap<String, String>,
},
/// Represents an ask to verify some information
Verify { kind: AuthVerifyKind, text: String },
/// Represents some information to be presented
Info { text: String },
/// Represents some error that occurred
Error { kind: AuthErrorKind, text: String },
}
/// Represents authentication messages that are responses to auth requests such
/// as answers to challenges or verifying information
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", tag = "type")]
pub enum AuthResponse {
/// Represents the answers to a previously-asked challenge
Challenge { answers: Vec<String> },
/// Represents the answer to a previously-asked verify
Verify { valid: bool },
}
/// Represents the type of verification being requested
#[derive(Copy, Clone, Debug, Display, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
#[non_exhaustive]
pub enum AuthVerifyKind {
/// An ask to verify the host such as with SSH
#[display(fmt = "host")]
Host,
}
/// Represents a single question in a challenge
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct AuthQuestion {
/// The text of the question
pub text: String,
/// Any extra information specific to a particular auth domain
/// such as including a username and instructions for SSH authentication
pub extra: HashMap<String, String>,
}
impl AuthQuestion {
/// Creates a new question without any extra data
pub fn new(text: impl Into<String>) -> Self {
Self {
text: text.into(),
extra: HashMap::new(),
}
}
}
/// Represents the type of error encountered during authentication
#[derive(Copy, Clone, Debug, Display, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum AuthErrorKind {
/// When the answer(s) to a challenge do not pass authentication
FailedChallenge,
/// When verification during authentication fails
/// (e.g. a host is not allowed or blocked)
FailedVerification,
/// When the error is unknown
Unknown,
}

@ -1,817 +0,0 @@
use crate::{
utils, Auth, AuthErrorKind, AuthQuestion, AuthRequest, AuthResponse, AuthVerifyKind, Client,
Codec, Handshake, XChaCha20Poly1305Codec,
};
use bytes::BytesMut;
use log::*;
use std::{collections::HashMap, io};
pub struct AuthClient {
inner: Client<Auth, Auth>,
codec: Option<XChaCha20Poly1305Codec>,
jit_handshake: bool,
}
impl From<Client<Auth, Auth>> for AuthClient {
fn from(client: Client<Auth, Auth>) -> Self {
Self {
inner: client,
codec: None,
jit_handshake: false,
}
}
}
impl AuthClient {
/// Sends a request to the server to establish an encrypted connection
pub async fn handshake(&mut self) -> io::Result<()> {
let handshake = Handshake::default();
let response = self
.inner
.send(Auth::Handshake {
public_key: handshake.pk_bytes(),
salt: *handshake.salt(),
})
.await?;
match response.payload {
Auth::Handshake { public_key, salt } => {
let key = handshake.handshake(public_key, salt)?;
self.codec.replace(XChaCha20Poly1305Codec::new(&key));
Ok(())
}
Auth::Msg { .. } => Err(io::Error::new(
io::ErrorKind::Other,
"Got unexpected encrypted message during handshake",
)),
}
}
/// Perform a handshake only if jit is enabled and no handshake has succeeded yet
async fn jit_handshake(&mut self) -> io::Result<()> {
if self.will_jit_handshake() && !self.is_ready() {
self.handshake().await
} else {
Ok(())
}
}
/// Returns true if client has successfully performed a handshake
/// and is ready to communicate with the server
pub fn is_ready(&self) -> bool {
self.codec.is_some()
}
/// Returns true if this client will perform a handshake just-in-time (JIT) prior to making a
/// request in the scenario where the client has not already performed a handshake
#[inline]
pub fn will_jit_handshake(&self) -> bool {
self.jit_handshake
}
/// Sets the jit flag on this client with `true` indicating that this client will perform a
/// handshake just-in-time (JIT) prior to making a request in the scenario where the client has
/// not already performed a handshake
#[inline]
pub fn set_jit_handshake(&mut self, flag: bool) {
self.jit_handshake = flag;
}
/// Provides a challenge to the server and returns the answers to the questions
/// asked by the client
pub async fn challenge(
&mut self,
questions: Vec<AuthQuestion>,
extra: HashMap<String, String>,
) -> io::Result<Vec<String>> {
trace!(
"AuthClient::challenge(questions = {:?}, extra = {:?})",
questions,
extra
);
// Perform JIT handshake if enabled
self.jit_handshake().await?;
let payload = AuthRequest::Challenge { questions, extra };
let encrypted_payload = self.serialize_and_encrypt(&payload)?;
let response = self.inner.send(Auth::Msg { encrypted_payload }).await?;
match response.payload {
Auth::Msg { encrypted_payload } => {
match self.decrypt_and_deserialize(&encrypted_payload)? {
AuthResponse::Challenge { answers } => Ok(answers),
AuthResponse::Verify { .. } => Err(io::Error::new(
io::ErrorKind::Other,
"Got unexpected verify response during challenge",
)),
}
}
Auth::Handshake { .. } => Err(io::Error::new(
io::ErrorKind::Other,
"Got unexpected handshake during challenge",
)),
}
}
/// Provides a verification request to the server and returns whether or not
/// the server approved
pub async fn verify(&mut self, kind: AuthVerifyKind, text: String) -> io::Result<bool> {
trace!("AuthClient::verify(kind = {:?}, text = {:?})", kind, text);
// Perform JIT handshake if enabled
self.jit_handshake().await?;
let payload = AuthRequest::Verify { kind, text };
let encrypted_payload = self.serialize_and_encrypt(&payload)?;
let response = self.inner.send(Auth::Msg { encrypted_payload }).await?;
match response.payload {
Auth::Msg { encrypted_payload } => {
match self.decrypt_and_deserialize(&encrypted_payload)? {
AuthResponse::Verify { valid } => Ok(valid),
AuthResponse::Challenge { .. } => Err(io::Error::new(
io::ErrorKind::Other,
"Got unexpected challenge response during verify",
)),
}
}
Auth::Handshake { .. } => Err(io::Error::new(
io::ErrorKind::Other,
"Got unexpected handshake during verify",
)),
}
}
/// Provides information to the server to use as it pleases with no response expected
pub async fn info(&mut self, text: String) -> io::Result<()> {
trace!("AuthClient::info(text = {:?})", text);
// Perform JIT handshake if enabled
self.jit_handshake().await?;
let payload = AuthRequest::Info { text };
let encrypted_payload = self.serialize_and_encrypt(&payload)?;
self.inner.fire(Auth::Msg { encrypted_payload }).await
}
/// Provides an error to the server to use as it pleases with no response expected
pub async fn error(&mut self, kind: AuthErrorKind, text: String) -> io::Result<()> {
trace!("AuthClient::error(kind = {:?}, text = {:?})", kind, text);
// Perform JIT handshake if enabled
self.jit_handshake().await?;
let payload = AuthRequest::Error { kind, text };
let encrypted_payload = self.serialize_and_encrypt(&payload)?;
self.inner.fire(Auth::Msg { encrypted_payload }).await
}
fn serialize_and_encrypt(&mut self, payload: &AuthRequest) -> io::Result<Vec<u8>> {
let codec = self.codec.as_mut().ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
"Handshake must be performed first (client encrypt message)",
)
})?;
let mut encryped_payload = BytesMut::new();
let payload = utils::serialize_to_vec(payload)?;
codec.encode(&payload, &mut encryped_payload)?;
Ok(encryped_payload.freeze().to_vec())
}
fn decrypt_and_deserialize(&mut self, payload: &[u8]) -> io::Result<AuthResponse> {
let codec = self.codec.as_mut().ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
"Handshake must be performed first (client decrypt message)",
)
})?;
let mut payload = BytesMut::from(payload);
match codec.decode(&mut payload)? {
Some(payload) => utils::deserialize_from_slice::<AuthResponse>(&payload),
None => Err(io::Error::new(
io::ErrorKind::InvalidData,
"Incomplete message received",
)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{Client, FramedTransport, Request, Response, TypedAsyncRead, TypedAsyncWrite};
use serde::{de::DeserializeOwned, Serialize};
const TIMEOUT_MILLIS: u64 = 100;
#[tokio::test]
async fn handshake_should_fail_if_get_unexpected_response_from_server() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move { client.handshake().await });
// Get the request, but send a bad response
let request: Request<Auth> = server.read().await.unwrap().unwrap();
match request.payload {
Auth::Handshake { .. } => server
.write(Response::new(
request.id,
Auth::Msg {
encrypted_payload: Vec::new(),
},
))
.await
.unwrap(),
_ => panic!("Server received unexpected payload"),
}
let result = task.await.unwrap();
assert!(result.is_err(), "Handshake succeeded unexpectedly")
}
#[tokio::test]
async fn challenge_should_fail_if_handshake_not_finished() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move { client.challenge(Vec::new(), HashMap::new()).await });
// Wait for a request, failing if we get one as the failure
// should have prevented sending anything, but we should
tokio::select! {
x = TypedAsyncRead::<Request<Auth>>::read(&mut server) => {
match x {
Ok(Some(x)) => panic!("Unexpectedly resolved: {:?}", x),
Ok(None) => {},
Err(x) => panic!("Unexpectedly failed on server side: {}", x),
}
},
_ = wait_ms(TIMEOUT_MILLIS) => {
panic!("Should have gotten server closure as part of client exit");
}
}
// Verify that we got an error with the method
let result = task.await.unwrap();
assert!(result.is_err(), "Challenge succeeded unexpectedly")
}
#[tokio::test]
async fn challenge_should_fail_if_receive_wrong_response() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move {
client.handshake().await.unwrap();
client
.challenge(
vec![
AuthQuestion::new("question1".to_string()),
AuthQuestion {
text: "question2".to_string(),
extra: vec![("key2".to_string(), "value2".to_string())]
.into_iter()
.collect(),
},
],
vec![("key".to_string(), "value".to_string())]
.into_iter()
.collect(),
)
.await
});
// Wait for a handshake request and set up our encryption codec
let request: Request<Auth> = server.read().await.unwrap().unwrap();
let mut codec = match request.payload {
Auth::Handshake { public_key, salt } => {
let handshake = Handshake::default();
let key = handshake.handshake(public_key, salt).unwrap();
server
.write(Response::new(
request.id,
Auth::Handshake {
public_key: handshake.pk_bytes(),
salt: *handshake.salt(),
},
))
.await
.unwrap();
XChaCha20Poly1305Codec::new(&key)
}
_ => panic!("Server received unexpected payload"),
};
// Wait for a challenge request and send back wrong response
let request: Request<Auth> = server.read().await.unwrap().unwrap();
match request.payload {
Auth::Msg { encrypted_payload } => {
match decrypt_and_deserialize(&mut codec, &encrypted_payload).unwrap() {
AuthRequest::Challenge { .. } => {
server
.write(Response::new(
request.id,
Auth::Msg {
encrypted_payload: serialize_and_encrypt(
&mut codec,
&AuthResponse::Verify { valid: true },
)
.unwrap(),
},
))
.await
.unwrap();
}
_ => panic!("Server received wrong request type"),
}
}
_ => panic!("Server received unexpected payload"),
};
// Verify that we got an error with the method
let result = task.await.unwrap();
assert!(result.is_err(), "Challenge succeeded unexpectedly")
}
#[tokio::test]
async fn challenge_should_return_answers_received_from_server() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move {
client.handshake().await.unwrap();
client
.challenge(
vec![
AuthQuestion::new("question1".to_string()),
AuthQuestion {
text: "question2".to_string(),
extra: vec![("key2".to_string(), "value2".to_string())]
.into_iter()
.collect(),
},
],
vec![("key".to_string(), "value".to_string())]
.into_iter()
.collect(),
)
.await
});
// Wait for a handshake request and set up our encryption codec
let request: Request<Auth> = server.read().await.unwrap().unwrap();
let mut codec = match request.payload {
Auth::Handshake { public_key, salt } => {
let handshake = Handshake::default();
let key = handshake.handshake(public_key, salt).unwrap();
server
.write(Response::new(
request.id,
Auth::Handshake {
public_key: handshake.pk_bytes(),
salt: *handshake.salt(),
},
))
.await
.unwrap();
XChaCha20Poly1305Codec::new(&key)
}
_ => panic!("Server received unexpected payload"),
};
// Wait for a challenge request and send back wrong response
let request: Request<Auth> = server.read().await.unwrap().unwrap();
match request.payload {
Auth::Msg { encrypted_payload } => {
match decrypt_and_deserialize(&mut codec, &encrypted_payload).unwrap() {
AuthRequest::Challenge { questions, extra } => {
assert_eq!(
questions,
vec![
AuthQuestion::new("question1".to_string()),
AuthQuestion {
text: "question2".to_string(),
extra: vec![("key2".to_string(), "value2".to_string())]
.into_iter()
.collect(),
},
],
);
assert_eq!(
extra,
vec![("key".to_string(), "value".to_string())]
.into_iter()
.collect(),
);
server
.write(Response::new(
request.id,
Auth::Msg {
encrypted_payload: serialize_and_encrypt(
&mut codec,
&AuthResponse::Challenge {
answers: vec![
"answer1".to_string(),
"answer2".to_string(),
],
},
)
.unwrap(),
},
))
.await
.unwrap();
}
_ => panic!("Server received wrong request type"),
}
}
_ => panic!("Server received unexpected payload"),
};
// Verify that we got the right results
let answers = task.await.unwrap().unwrap();
assert_eq!(answers, vec!["answer1".to_string(), "answer2".to_string()]);
}
#[tokio::test]
async fn verify_should_fail_if_handshake_not_finished() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move {
client
.verify(AuthVerifyKind::Host, "some text".to_string())
.await
});
// Wait for a request, failing if we get one as the failure
// should have prevented sending anything, but we should
tokio::select! {
x = TypedAsyncRead::<Request<Auth>>::read(&mut server) => {
match x {
Ok(Some(x)) => panic!("Unexpectedly resolved: {:?}", x),
Ok(None) => {},
Err(x) => panic!("Unexpectedly failed on server side: {}", x),
}
},
_ = wait_ms(TIMEOUT_MILLIS) => {
panic!("Should have gotten server closure as part of client exit");
}
}
// Verify that we got an error with the method
let result = task.await.unwrap();
assert!(result.is_err(), "Verify succeeded unexpectedly")
}
#[tokio::test]
async fn verify_should_fail_if_receive_wrong_response() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move {
client.handshake().await.unwrap();
client
.verify(AuthVerifyKind::Host, "some text".to_string())
.await
});
// Wait for a handshake request and set up our encryption codec
let request: Request<Auth> = server.read().await.unwrap().unwrap();
let mut codec = match request.payload {
Auth::Handshake { public_key, salt } => {
let handshake = Handshake::default();
let key = handshake.handshake(public_key, salt).unwrap();
server
.write(Response::new(
request.id,
Auth::Handshake {
public_key: handshake.pk_bytes(),
salt: *handshake.salt(),
},
))
.await
.unwrap();
XChaCha20Poly1305Codec::new(&key)
}
_ => panic!("Server received unexpected payload"),
};
// Wait for a verify request and send back wrong response
let request: Request<Auth> = server.read().await.unwrap().unwrap();
match request.payload {
Auth::Msg { encrypted_payload } => {
match decrypt_and_deserialize(&mut codec, &encrypted_payload).unwrap() {
AuthRequest::Verify { .. } => {
server
.write(Response::new(
request.id,
Auth::Msg {
encrypted_payload: serialize_and_encrypt(
&mut codec,
&AuthResponse::Challenge {
answers: Vec::new(),
},
)
.unwrap(),
},
))
.await
.unwrap();
}
_ => panic!("Server received wrong request type"),
}
}
_ => panic!("Server received unexpected payload"),
};
// Verify that we got an error with the method
let result = task.await.unwrap();
assert!(result.is_err(), "Verify succeeded unexpectedly")
}
#[tokio::test]
async fn verify_should_return_valid_bool_received_from_server() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move {
client.handshake().await.unwrap();
client
.verify(AuthVerifyKind::Host, "some text".to_string())
.await
});
// Wait for a handshake request and set up our encryption codec
let request: Request<Auth> = server.read().await.unwrap().unwrap();
let mut codec = match request.payload {
Auth::Handshake { public_key, salt } => {
let handshake = Handshake::default();
let key = handshake.handshake(public_key, salt).unwrap();
server
.write(Response::new(
request.id,
Auth::Handshake {
public_key: handshake.pk_bytes(),
salt: *handshake.salt(),
},
))
.await
.unwrap();
XChaCha20Poly1305Codec::new(&key)
}
_ => panic!("Server received unexpected payload"),
};
// Wait for a challenge request and send back wrong response
let request: Request<Auth> = server.read().await.unwrap().unwrap();
match request.payload {
Auth::Msg { encrypted_payload } => {
match decrypt_and_deserialize(&mut codec, &encrypted_payload).unwrap() {
AuthRequest::Verify { kind, text } => {
assert_eq!(kind, AuthVerifyKind::Host);
assert_eq!(text, "some text");
server
.write(Response::new(
request.id,
Auth::Msg {
encrypted_payload: serialize_and_encrypt(
&mut codec,
&AuthResponse::Verify { valid: true },
)
.unwrap(),
},
))
.await
.unwrap();
}
_ => panic!("Server received wrong request type"),
}
}
_ => panic!("Server received unexpected payload"),
};
// Verify that we got the right results
let valid = task.await.unwrap().unwrap();
assert!(valid, "Got verify response, but valid was set incorrectly");
}
#[tokio::test]
async fn info_should_fail_if_handshake_not_finished() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move { client.info("some text".to_string()).await });
// Wait for a request, failing if we get one as the failure
// should have prevented sending anything, but we should
tokio::select! {
x = TypedAsyncRead::<Request<Auth>>::read(&mut server) => {
match x {
Ok(Some(x)) => panic!("Unexpectedly resolved: {:?}", x),
Ok(None) => {},
Err(x) => panic!("Unexpectedly failed on server side: {}", x),
}
},
_ = wait_ms(TIMEOUT_MILLIS) => {
panic!("Should have gotten server closure as part of client exit");
}
}
// Verify that we got an error with the method
let result = task.await.unwrap();
assert!(result.is_err(), "Info succeeded unexpectedly")
}
#[tokio::test]
async fn info_should_send_the_server_a_request_but_not_wait_for_a_response() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move {
client.handshake().await.unwrap();
client.info("some text".to_string()).await
});
// Wait for a handshake request and set up our encryption codec
let request: Request<Auth> = server.read().await.unwrap().unwrap();
let mut codec = match request.payload {
Auth::Handshake { public_key, salt } => {
let handshake = Handshake::default();
let key = handshake.handshake(public_key, salt).unwrap();
server
.write(Response::new(
request.id,
Auth::Handshake {
public_key: handshake.pk_bytes(),
salt: *handshake.salt(),
},
))
.await
.unwrap();
XChaCha20Poly1305Codec::new(&key)
}
_ => panic!("Server received unexpected payload"),
};
// Wait for a request
let request: Request<Auth> = server.read().await.unwrap().unwrap();
match request.payload {
Auth::Msg { encrypted_payload } => {
match decrypt_and_deserialize(&mut codec, &encrypted_payload).unwrap() {
AuthRequest::Info { text } => {
assert_eq!(text, "some text");
}
_ => panic!("Server received wrong request type"),
}
}
_ => panic!("Server received unexpected payload"),
};
// Verify that we got the right results
task.await.unwrap().unwrap();
}
#[tokio::test]
async fn error_should_fail_if_handshake_not_finished() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move {
client
.error(AuthErrorKind::FailedChallenge, "some text".to_string())
.await
});
// Wait for a request, failing if we get one as the failure
// should have prevented sending anything, but we should
tokio::select! {
x = TypedAsyncRead::<Request<Auth>>::read(&mut server) => {
match x {
Ok(Some(x)) => panic!("Unexpectedly resolved: {:?}", x),
Ok(None) => {},
Err(x) => panic!("Unexpectedly failed on server side: {}", x),
}
},
_ = wait_ms(TIMEOUT_MILLIS) => {
panic!("Should have gotten server closure as part of client exit");
}
}
// Verify that we got an error with the method
let result = task.await.unwrap();
assert!(result.is_err(), "Error succeeded unexpectedly")
}
#[tokio::test]
async fn error_should_send_the_server_a_request_but_not_wait_for_a_response() {
let (t, mut server) = FramedTransport::make_test_pair();
let mut client = AuthClient::from(Client::from_framed_transport(t).unwrap());
// We start a separate task for the client to avoid blocking since
// we also need to receive the client's request and respond
let task = tokio::spawn(async move {
client.handshake().await.unwrap();
client
.error(AuthErrorKind::FailedChallenge, "some text".to_string())
.await
});
// Wait for a handshake request and set up our encryption codec
let request: Request<Auth> = server.read().await.unwrap().unwrap();
let mut codec = match request.payload {
Auth::Handshake { public_key, salt } => {
let handshake = Handshake::default();
let key = handshake.handshake(public_key, salt).unwrap();
server
.write(Response::new(
request.id,
Auth::Handshake {
public_key: handshake.pk_bytes(),
salt: *handshake.salt(),
},
))
.await
.unwrap();
XChaCha20Poly1305Codec::new(&key)
}
_ => panic!("Server received unexpected payload"),
};
// Wait for a request
let request: Request<Auth> = server.read().await.unwrap().unwrap();
match request.payload {
Auth::Msg { encrypted_payload } => {
match decrypt_and_deserialize(&mut codec, &encrypted_payload).unwrap() {
AuthRequest::Error { kind, text } => {
assert_eq!(kind, AuthErrorKind::FailedChallenge);
assert_eq!(text, "some text");
}
_ => panic!("Server received wrong request type"),
}
}
_ => panic!("Server received unexpected payload"),
};
// Verify that we got the right results
task.await.unwrap().unwrap();
}
async fn wait_ms(ms: u64) {
use std::time::Duration;
tokio::time::sleep(Duration::from_millis(ms)).await;
}
fn serialize_and_encrypt<T: Serialize>(
codec: &mut XChaCha20Poly1305Codec,
payload: &T,
) -> io::Result<Vec<u8>> {
let mut encryped_payload = BytesMut::new();
let payload = utils::serialize_to_vec(payload)?;
codec.encode(&payload, &mut encryped_payload)?;
Ok(encryped_payload.freeze().to_vec())
}
fn decrypt_and_deserialize<T: DeserializeOwned>(
codec: &mut XChaCha20Poly1305Codec,
payload: &[u8],
) -> io::Result<T> {
let mut payload = BytesMut::from(payload);
match codec.decode(&mut payload)? {
Some(payload) => utils::deserialize_from_slice::<T>(&payload),
None => Err(io::Error::new(
io::ErrorKind::InvalidData,
"Incomplete message received",
)),
}
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save