Initial v5 support

v5-api
Dominik Nakamura 3 years ago
parent 7f0e6e7fe9
commit e9d0afda5e
No known key found for this signature in database
GPG Key ID: E4C6A749B2491910

@ -10,6 +10,7 @@ homepage = "https://github.com/dnaka91/obws"
repository = "https://github.com/dnaka91/obws"
categories = ["api-bindings", "web-programming"]
keywords = ["async", "obs", "obs-websocket", "remote-control", "tokio"]
resolver = "2"
[package.metadata.docs.rs]
features = ["events", "tls"]
@ -18,14 +19,12 @@ features = ["events", "tls"]
async-stream = { version = "0.3.2", optional = true }
base64 = "0.13.0"
bitflags = "1.2.1"
chrono = { version = "0.4.19", default-features = false, features = ["std"] }
either = { version = "1.6.1", features = ["serde"] }
futures-util = { version = "0.3.15", features = ["sink"] }
log = "0.4.14"
rgb = { version = "0.8.27", default-features = false }
semver = { version = "1.0.3", features = ["serde"] }
serde = { version = "1.0.126", features = ["derive"] }
serde_json = "1.0.64"
serde_repr = "0.1.7"
serde_with = "1.9.4"
sha2 = "0.9.5"
thiserror = "1.0.26"
@ -48,11 +47,3 @@ tls = ["tokio-tungstenite/rustls-tls"]
[[example]]
name = "events"
required-features = ["events"]
[[test]]
name = "media_control"
required-features = ["events"]
[[test]]
name = "recording"
required-features = ["events"]

@ -25,7 +25,7 @@ Add `obws` to your project with `cargo add obws` (needs [cargo-edit]) or add it
```toml
[dependencies]
obws = "0.8.0"
obws = { git = "https://github.com/dnaka91/obws.git", branch = "v5-api" }
```
In addition, you will need to use the lastest [tokio](https://tokio.rs) runtime to use this library
@ -48,15 +48,12 @@ use obws::Client;
#[tokio::main]
async fn main() -> Result<()> {
/// Connect to the OBS instance through obs-websocket.
let client = Client::connect("localhost", 4444).await?;
let client = Client::connect("localhost", 4444, Some("password")).await?;
/// Get and print out version information of OBS and obs-websocket.
let version = client.general().get_version().await?;
println!("{:#?}", version);
/// Optionally log-in (if enabled in obs-websocket) to allow other APIs and receive events.
client.login(Some("password")).await?;
/// Get a list of available scenes and print them out.
let scene_list = client.scenes().get_scene_list().await?;
println!("{:#?}", scene_list);

@ -11,9 +11,7 @@ async fn main() -> Result<()> {
env::set_var("RUST_LOG", "obws=debug");
pretty_env_logger::init();
let client = Client::connect("localhost", 4444).await?;
client.login(env::var("OBS_PASSWORD").ok()).await?;
let client = Client::connect("localhost", 4444, env::var("OBS_PASSWORD").ok()).await?;
let events = client.events()?;
pin_mut!(events);

@ -10,14 +10,15 @@ async fn main() -> Result<()> {
env::set_var("RUST_LOG", "obws=debug");
pretty_env_logger::init();
let client = Client::connect("localhost", 4444).await?;
client.login(env::var("OBS_PASSWORD").ok()).await?;
let client = Client::connect("localhost", 4444, env::var("OBS_PASSWORD").ok()).await?;
let scene_list = client.scenes().get_scene_list().await?;
for scene in scene_list.scenes.iter().cycle() {
client.scenes().set_current_scene(&scene.name).await?;
client
.scenes()
.set_current_program_scene(&scene.scene_name)
.await?;
tokio::time::sleep(Duration::from_secs(1)).await;
}

@ -1,7 +1,7 @@
use std::env;
use anyhow::Result;
use obws::{requests::SourceScreenshot, Client};
use obws::{requests::GetSourceScreenshot, Client};
use tokio::fs;
#[tokio::main]
@ -11,22 +11,21 @@ async fn main() -> Result<()> {
env::set_var("RUST_LOG", "obws=debug");
pretty_env_logger::init();
let client = Client::connect("localhost", 4444).await?;
client.login(env::var("OBS_PASSWORD").ok()).await?;
let client = Client::connect("localhost", 4444, env::var("OBS_PASSWORD").ok()).await?;
let screenshot = client
.sources()
.take_source_screenshot(SourceScreenshot {
source_name: Some("Start"),
embed_picture_format: Some("png"),
..Default::default()
.get_source_screenshot(GetSourceScreenshot {
source_name: "OBWS-TEST-Scene",
image_width: None,
image_height: None,
image_compression_quality: None,
image_format: "png",
})
.await?;
let image = screenshot.img.unwrap();
let pos = image.find("base64,").unwrap();
let image = base64::decode(&image[pos + 7..])?;
let pos = screenshot.find("base64,").unwrap();
let image = base64::decode(&screenshot[pos + 7..])?;
fs::write("screenshot.png", &image).await?;

@ -10,14 +10,12 @@ async fn main() -> Result<()> {
env::set_var("RUST_LOG", "obws=debug");
pretty_env_logger::init();
let client = Client::connect("localhost", 4444).await?;
let client = Client::connect("localhost", 4444, env::var("OBS_PASSWORD").ok()).await?;
let version = client.general().get_version().await?;
println!("{:#?}", version);
client.login(env::var("OBS_PASSWORD").ok()).await?;
let scene_list = client.scenes().get_scene_list().await?;
let scene_list = client.scenes().get_scene_list().await?.scenes;
println!("{:#?}", scene_list);
Ok(())

@ -0,0 +1,56 @@
use super::Client;
use crate::{
requests::{RequestType, SetProfileParameter},
responses::{ProfileParameter, Profiles, SceneCollections},
Result,
};
/// API functions related to OBS configuration.
pub struct Config<'a> {
pub(super) client: &'a Client,
}
impl<'a> Config<'a> {
pub async fn get_scene_collection_list(&self) -> Result<SceneCollections> {
self.client
.send_message(RequestType::GetSceneCollectionList)
.await
}
pub async fn set_current_scene_collection(&self, scene_collection_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetCurrentSceneCollection {
scene_collection_name,
})
.await
}
pub async fn get_profile_list(&self) -> Result<Profiles> {
self.client.send_message(RequestType::GetProfileList).await
}
pub async fn set_current_profile(&self, profile_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetCurrentProfile { profile_name })
.await
}
pub async fn get_profile_parameter(
&self,
parameter_category: &str,
parameter_name: &str,
) -> Result<ProfileParameter> {
self.client
.send_message(RequestType::GetProfileParameter {
parameter_category,
parameter_name,
})
.await
}
pub async fn set_profile_parameter(&self, parameter: SetProfileParameter<'_>) -> Result<()> {
self.client
.send_message(RequestType::SetProfileParameter(parameter))
.await
}
}

@ -1,9 +1,9 @@
use serde::Serialize;
use super::Client;
use crate::{
requests::{KeyModifier, Projector, ProjectorInternal, QtGeometry, RequestType},
responses, Error, Result,
requests::{KeyModifiers, RequestType},
responses,
responses::{Hotkeys, StudioModeEnabled},
Result,
};
/// General functions of the API.
@ -12,115 +12,54 @@ pub struct General<'a> {
}
impl<'a> General<'a> {
/// Returns the latest version of the plugin and the API.
pub async fn get_version(&self) -> Result<responses::Version> {
self.client.send_message(RequestType::GetVersion).await
}
/// Tells the client if authentication is required. If so, returns authentication parameters
/// `challenge` and `salt`.
pub async fn get_auth_required(&self) -> Result<responses::AuthRequired> {
self.client.send_message(RequestType::GetAuthRequired).await
}
/// Attempt to authenticate the client to the server.
///
/// - `auth`: Response to the auth challenge.
pub async fn authenticate(&self, auth: &str) -> Result<()> {
self.client
.send_message(RequestType::Authenticate { auth })
.await
}
/// Set the filename formatting string.
///
/// - `filename_formatting`: Filename formatting string to set.
pub async fn set_filename_formatting(&self, filename_formatting: &str) -> Result<()> {
pub async fn broadcast_custom_event(&self, event_data: serde_json::Value) -> Result<()> {
self.client
.send_message(RequestType::SetFilenameFormatting {
filename_formatting,
})
.send_message(RequestType::BroadcastCustomEvent { event_data })
.await
}
/// Get the filename formatting string.
pub async fn get_filename_formatting(&self) -> Result<String> {
pub async fn get_hotkey_list(&self) -> Result<Vec<String>> {
self.client
.send_message::<responses::FilenameFormatting>(RequestType::GetFilenameFormatting)
.send_message::<Hotkeys>(RequestType::GetHotkeyList)
.await
.map(|ff| ff.filename_formatting)
.map(|h| h.hotkeys)
}
/// Get OBS stats (almost the same info as provided in OBS' stats window).
pub async fn get_stats(&self) -> Result<responses::ObsStats> {
pub async fn trigger_hotkey_by_name(&self, hotkey_name: &str) -> Result<()> {
self.client
.send_message::<responses::Stats>(RequestType::GetStats)
.send_message(RequestType::TriggerHotkeyByName { hotkey_name })
.await
.map(|s| s.stats)
}
/// Broadcast custom message to all connected WebSocket clients.
///
/// - `realm`: Identifier to be choosen by the client.
/// - `data`: User-defined data.
pub async fn broadcast_custom_message<T>(&self, realm: &str, data: &T) -> Result<()>
where
T: Serialize,
{
pub async fn trigger_hotkey_by_key_sequence(
&self,
key_id: &str,
key_modifiers: KeyModifiers,
) -> Result<()> {
self.client
.send_message(RequestType::BroadcastCustomMessage {
realm,
data: &serde_json::to_value(data).map_err(Error::SerializeCustomData)?,
.send_message(RequestType::TriggerHotkeyByKeySequence {
key_id,
key_modifiers,
})
.await
}
/// Get basic OBS video information.
pub async fn get_video_info(&self) -> Result<responses::VideoInfo> {
self.client.send_message(RequestType::GetVideoInfo).await
}
/// Open a projector window or create a projector on a monitor. Requires OBS v24.0.4 or newer.
pub async fn open_projector(&self, projector: Projector<'_>) -> Result<()> {
self.client
.send_message(RequestType::OpenProjector(ProjectorInternal {
ty: projector.ty,
monitor: projector.monitor,
geometry: projector.geometry.map(QtGeometry::serialize).as_deref(),
name: projector.name,
}))
.await
}
/// Executes hotkey routine, identified by hotkey unique name.
///
/// - `hotkey_name`: Unique name of the hotkey, as defined when registering the hotkey (e.g.
/// "ReplayBuffer.Save").
pub async fn trigger_hotkey_by_name(&self, hotkey_name: &str) -> Result<()> {
pub async fn get_studio_mode_enabled(&self) -> Result<bool> {
self.client
.send_message(RequestType::TriggerHotkeyByName { hotkey_name })
.send_message::<StudioModeEnabled>(RequestType::GetStudioModeEnabled)
.await
.map(|sme| sme.studio_mode_enabled)
}
/// Executes hotkey routine, identified by bound combination of keys. A single key combination
/// might trigger multiple hotkey routines depending on user settings.
///
/// - `key_id`: Main key identifier (e.g. `OBS_KEY_A` for key "A"). Available identifiers
/// [here](https://github.com/obsproject/obs-studio/blob/master/libobs/obs-hotkeys.h)
/// - `key_modifiers`: Optional key modifiers object. False entries can be ommitted.
pub async fn trigger_hotkey_by_sequence(
&self,
key_id: &str,
key_modifiers: &[KeyModifier],
) -> Result<()> {
pub async fn set_studio_mode_enabled(&self, studio_mode_enabled: bool) -> Result<()> {
self.client
.send_message(RequestType::TriggerHotkeyBySequence {
key_id,
key_modifiers,
.send_message(RequestType::SetStudioModeEnabled {
studio_mode_enabled,
})
.await
}
// TODO: Add `ExecuteBatch` request
// TODO: Add `Sleep` request (only useful together with `ExecuteBatch`)
}

@ -0,0 +1,101 @@
use super::Client;
use crate::{
requests::{CreateInput, RequestType, SetInputSettings, Volume},
responses, Result,
};
/// API functions related to inputs.
pub struct Inputs<'a> {
pub(super) client: &'a Client,
}
impl<'a> Inputs<'a> {
pub async fn get_input_list(&self, input_kind: Option<&str>) -> Result<Vec<responses::Input>> {
self.client
.send_message::<responses::Inputs>(RequestType::GetInputList { input_kind })
.await
.map(|i| i.inputs)
}
pub async fn get_input_kind_list(&self, unversioned: bool) -> Result<Vec<String>> {
self.client
.send_message::<responses::InputKinds>(RequestType::GetInputKindList { unversioned })
.await
.map(|ik| ik.input_kinds)
}
pub async fn get_input_default_settings(&self, input_kind: &str) -> Result<serde_json::Value> {
self.client
.send_message::<responses::DefaultInputSettings>(RequestType::GetInputDefaultSettings {
input_kind,
})
.await
.map(|dis| dis.default_input_settings)
}
pub async fn get_input_settings(&self, input_name: &str) -> Result<responses::InputSettings> {
self.client
.send_message(RequestType::GetInputSettings { input_name })
.await
}
pub async fn set_input_settings(&self, settings: SetInputSettings<'_>) -> Result<()> {
self.client
.send_message(RequestType::SetInputSettings(settings))
.await
}
pub async fn get_input_mute(&self, input_name: &str) -> Result<bool> {
self.client
.send_message::<responses::InputMuted>(RequestType::GetInputMute { input_name })
.await
.map(|im| im.input_muted)
}
pub async fn set_input_mute(&self, input_name: &str, input_muted: bool) -> Result<()> {
self.client
.send_message(RequestType::SetInputMute {
input_name,
input_muted,
})
.await
}
pub async fn toggle_input_mute(&self, input_name: &str) -> Result<bool> {
self.client
.send_message::<responses::InputMuted>(RequestType::ToggleInputMute { input_name })
.await
.map(|im| im.input_muted)
}
pub async fn get_input_volume(&self, input_name: &str) -> Result<responses::InputVolume> {
self.client
.send_message(RequestType::GetInputVolume { input_name })
.await
}
pub async fn set_input_volume(&self, input_name: &str, input_volume: Volume) -> Result<()> {
self.client
.send_message(RequestType::SetInputVolume {
input_name,
input_volume,
})
.await
}
pub async fn set_input_name(&self, input_name: &str, new_input_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetInputName {
input_name,
new_input_name,
})
.await
}
pub async fn create_input(&self, input: CreateInput<'_>) -> Result<String> {
self.client
.send_message::<responses::SceneItemId>(RequestType::CreateInput(input))
.await
.map(|sii| sii.scene_item_id)
}
}

@ -1,134 +0,0 @@
use chrono::Duration;
use super::Client;
use crate::{requests::RequestType, responses, Result};
/// API functions related to media control.
pub struct MediaControl<'a> {
pub(super) client: &'a Client,
}
impl<'a> MediaControl<'a> {
/// Pause or play a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8).
///
/// - `source_name`: Source name.
/// - `play_pause`: Whether to pause or play the source. `false` for play, `true` for pause.
pub async fn play_pause_media(
&self,
source_name: &str,
play_pause: Option<bool>,
) -> Result<()> {
self.client
.send_message(RequestType::PlayPauseMedia {
source_name,
play_pause,
})
.await
}
/// Restart a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8).
///
/// - `source_name`: Source name.
pub async fn restart_media(&self, source_name: &str) -> Result<()> {
self.client
.send_message(RequestType::RestartMedia { source_name })
.await
}
/// Stop a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8).
///
/// - `source_name`: Source name.
pub async fn stop_media(&self, source_name: &str) -> Result<()> {
self.client
.send_message(RequestType::StopMedia { source_name })
.await
}
/// Skip to the next media item in the playlist. Supports only vlc media source (as of OBS
/// v25.0.8).
///
/// - `source_name`: Source name.
pub async fn next_media(&self, source_name: &str) -> Result<()> {
self.client
.send_message(RequestType::NextMedia { source_name })
.await
}
/// Go to the previous media item in the playlist. Supports only vlc media source (as of OBS
/// v25.0.8).
///
/// - `source_name`: Source name.
pub async fn previous_media(&self, source_name: &str) -> Result<()> {
self.client
.send_message(RequestType::PreviousMedia { source_name })
.await
}
/// Get the length of media in milliseconds. Supports ffmpeg and vlc media sources (as of OBS
/// v25.0.8).
///
/// Note: For some reason, for the first 5 or so seconds that the media is playing, the total
/// duration can be off by upwards of 50ms.
///
/// - `source_name`: Source name.
pub async fn get_media_duration(&self, source_name: &str) -> Result<Duration> {
self.client
.send_message::<responses::MediaDuration>(RequestType::GetMediaDuration { source_name })
.await
.map(|md| md.media_duration)
}
/// Get the current timestamp of media in milliseconds. Supports ffmpeg and vlc media sources
/// (as of OBS v25.0.8).
///
/// - `source_name`: Source name.
pub async fn get_media_time(&self, source_name: &str) -> Result<Duration> {
self.client
.send_message::<responses::MediaTime>(RequestType::GetMediaTime { source_name })
.await
.map(|mt| mt.timestamp)
}
/// Set the timestamp of a media source. Supports ffmpeg and vlc media sources (as of OBS
/// v25.0.8).
///
/// - `source_name`: Source name.
/// - `timestamp`: Milliseconds to set the timestamp to.
pub async fn set_media_time(&self, source_name: &str, timestamp: Duration) -> Result<()> {
self.client
.send_message(RequestType::SetMediaTime {
source_name,
timestamp,
})
.await
}
/// Scrub media using a supplied offset. Supports ffmpeg and vlc media sources (as of OBS
/// v25.0.8).
///
/// Note: Due to processing/network delays, this request is not perfect. The processing rate of
/// this request has also not been tested.
///
/// - `source_name`: Source name.
/// - `time_offset`: Millisecond offset (positive or negative) to offset the current media
/// position.
pub async fn scrub_media(&self, source_name: &str, time_offset: Duration) -> Result<()> {
self.client
.send_message(RequestType::ScrubMedia {
source_name,
time_offset,
})
.await
}
/// Get the current playing state of a media source. Supports ffmpeg and vlc media sources (as
/// of OBS v25.0.8).
///
/// - `source_name`: Source name.
pub async fn get_media_state(&self, source_name: &str) -> Result<responses::MediaState> {
self.client
.send_message::<responses::GetMediaState>(RequestType::GetMediaState { source_name })
.await
.map(|msr| msr.media_state)
}
}

@ -11,11 +11,10 @@ use std::{
},
};
#[cfg(feature = "events")]
use futures_util::stream::Stream;
use futures_util::{
sink::SinkExt,
stream::{SplitSink, StreamExt},
stream::{SplitSink, Stream, StreamExt},
Sink,
};
use log::{debug, error, trace};
use semver::{Comparator, Op, Prerelease};
@ -30,33 +29,21 @@ use tokio::{
use tokio_tungstenite::{tungstenite::Message, MaybeTlsStream, WebSocketStream};
pub use self::{
general::General, media_control::MediaControl, outputs::Outputs, profiles::Profiles,
recording::Recording, replay_buffer::ReplayBuffer, scene_collections::SceneCollections,
scene_items::SceneItems, scenes::Scenes, sources::Sources, streaming::Streaming,
studio_mode::StudioMode, transitions::Transitions, virtual_cam::VirtualCam,
config::Config, general::General, inputs::Inputs, scenes::Scenes, sources::Sources,
};
#[cfg(feature = "events")]
use crate::events::{Event, EventType};
use crate::events::Event;
use crate::{
requests::{Request, RequestType},
responses::{AuthRequired, Response},
requests::{ClientRequest, RequestType},
responses::{ServerMessage, Status},
Error, Result,
};
mod config;
mod general;
mod media_control;
mod outputs;
mod profiles;
mod recording;
mod replay_buffer;
mod scene_collections;
mod scene_items;
mod inputs;
mod scenes;
mod sources;
mod streaming;
mod studio_mode;
mod transitions;
mod virtual_cam;
#[derive(Debug, thiserror::Error)]
enum InnerError {
@ -64,9 +51,10 @@ enum InnerError {
IntoText(#[source] tokio_tungstenite::tungstenite::Error),
#[error("failed deserializing message")]
DeserializeMessage(#[source] serde_json::Error),
#[error("failed deserializing event")]
#[cfg_attr(not(feature = "events"), allow(dead_code))]
DeserializeEvent(#[source] serde_json::Error),
#[error("the request ID `{0}` is not an integer")]
InvalidRequestId(#[source] std::num::ParseIntError, String),
#[error("received unexpected server message: {0:?}")]
UnexpectedMessage(ServerMessage),
}
/// The client is the main entry point to access the obs-websocket API. It allows to call various
@ -81,7 +69,7 @@ pub struct Client {
/// A list of currently waiting requests to get a response back. The key is the string version
/// of a request ID and the value is a oneshot sender that allows to send the response back to
/// the other end that waits for the response.
receivers: Arc<Mutex<HashMap<u64, oneshot::Sender<serde_json::Value>>>>,
receivers: Arc<Mutex<ReceiverList>>,
/// Broadcast sender that distributes received events to all current listeners. Events are
/// dropped if nobody listens.
#[cfg(feature = "events")]
@ -95,19 +83,24 @@ pub struct Client {
/// Shorthand for the writer side of a websocket stream that has been split into reader and writer.
type MessageWriter = SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, Message>;
/// Shorthand for the list of ongoing requests that wait for a response.
type ReceiverList = HashMap<u64, oneshot::Sender<(Status, serde_json::Value)>>;
/// Default broadcast capacity used when not overwritten by the user.
#[cfg(feature = "events")]
const DEFAULT_CAPACITY: usize = 100;
/// Configuration for connecting to a obs-websocket instance.
pub struct ConnectConfig<H>
pub struct ConnectConfig<H, P>
where
H: AsRef<str>,
P: AsRef<str>,
{
/// The hostname, usually `localhost` unless the OBS instance is on a remote machine.
pub host: H,
/// Port to connect to.
pub port: u16,
pub password: Option<P>,
/// Whether to use TLS when connecting. Only useful when OBS runs on a remote machine.
#[cfg(feature = "tls")]
pub tls: bool,
@ -127,16 +120,18 @@ const OBS_STUDIO_VERSION: Comparator = Comparator {
pre: Prerelease::EMPTY,
};
const OBS_WEBSOCKET_VERSION: Comparator = Comparator {
op: Op::Tilde,
major: 4,
minor: Some(9),
patch: Some(1),
op: Op::Caret,
major: 5,
minor: None,
patch: None,
pre: Prerelease::EMPTY,
};
const RPC_VERSION: u32 = 1;
impl<H> ConnectConfig<H>
impl<H, P> ConnectConfig<H, P>
where
H: AsRef<str>,
P: AsRef<str>,
{
#[cfg(feature = "tls")]
fn tls(&self) -> bool {
@ -151,10 +146,15 @@ where
impl Client {
/// Connect to a obs-websocket instance on the given host and port.
pub async fn connect(host: impl AsRef<str>, port: u16) -> Result<Self> {
pub async fn connect(
host: impl AsRef<str>,
port: u16,
password: Option<impl AsRef<str>>,
) -> Result<Self> {
Self::connect_with_config(ConnectConfig {
host,
port,
password,
#[cfg(feature = "tls")]
tls: false,
broadcast_capacity: None,
@ -163,7 +163,11 @@ impl Client {
}
/// Connect to a obs-websocket instance with the given configuration.
pub async fn connect_with_config<H: AsRef<str>>(config: ConnectConfig<H>) -> Result<Self> {
pub async fn connect_with_config<H, P>(config: ConnectConfig<H, P>) -> Result<Self>
where
H: AsRef<str>,
P: AsRef<str>,
{
let (socket, _) = tokio_tungstenite::connect_async(format!(
"{}://{}:{}",
if config.tls() { "wss" } else { "ws" },
@ -173,7 +177,7 @@ impl Client {
.await
.map_err(Error::Connect)?;
let (write, mut read) = socket.split();
let (mut write, mut read) = socket.split();
let receivers = Arc::new(Mutex::new(HashMap::<_, oneshot::Sender<_>>::new()));
let receivers2 = Arc::clone(&receivers);
#[cfg(feature = "events")]
@ -184,6 +188,13 @@ impl Client {
#[cfg(feature = "events")]
let events_tx = Arc::clone(&event_sender);
handshake(
&mut write,
&mut read,
config.password.as_ref().map(AsRef::as_ref),
)
.await?;
let handle = tokio::spawn(async move {
while let Some(Ok(msg)) = read.next().await {
trace!("{}", msg);
@ -191,31 +202,34 @@ impl Client {
let text = msg.into_text().map_err(InnerError::IntoText)?;
let text = if text == "Server stopping" {
debug!("Websocket server is stopping");
r#"{"update-type": "ServerStopping"}"#.to_string()
r#"{"messageType":"Event","eventType":"ServerStopping"}"#.to_string()
} else {
text
};
let json = serde_json::from_str::<serde_json::Value>(&text)
let message = serde_json::from_str::<ServerMessage>(&text)
.map_err(InnerError::DeserializeMessage)?;
if let Some(message_id) = json
.as_object()
.and_then(|obj| obj.get("message-id"))
.and_then(|id| id.as_str())
.and_then(|id| id.parse().ok())
{
debug!("got message with id {}", message_id);
if let Some(tx) = receivers2.lock().await.remove(&message_id) {
tx.send(json).ok();
match message {
ServerMessage::RequestResponse {
request_id,
request_status,
response_data,
} => {
let request_id = request_id
.parse()
.map_err(|e| InnerError::InvalidRequestId(e, request_id))?;
debug!("got message with id {}", request_id);
if let Some(tx) = receivers2.lock().await.remove(&request_id) {
tx.send((request_status, response_data)).ok();
}
}
} else {
#[cfg(feature = "events")]
{
let event = serde_json::from_value(json)
.map_err(InnerError::DeserializeEvent)?;
ServerMessage::Event(event) => {
events_tx.send(event).ok();
}
_ => return Err(InnerError::UnexpectedMessage(message)),
}
Ok(())
@ -228,14 +242,7 @@ impl Client {
}
#[cfg(feature = "events")]
{
let event = Event {
stream_timecode: None,
rec_timecode: None,
ty: EventType::ServerStopped,
};
events_tx.send(event).ok();
}
events_tx.send(Event::ServerStopped).ok();
// clear all outstanding receivers to stop them from waiting forever on responses
// they'll never receive.
@ -262,20 +269,27 @@ impl Client {
async fn verify_versions(&self) -> Result<()> {
let version = self.general().get_version().await?;
if !OBS_STUDIO_VERSION.matches(&version.obs_studio_version) {
if !OBS_STUDIO_VERSION.matches(&version.obs_version) {
return Err(Error::ObsStudioVersion(
version.obs_studio_version,
version.obs_version,
OBS_STUDIO_VERSION,
));
}
if !OBS_WEBSOCKET_VERSION.matches(&version.obs_websocket_version) {
if !OBS_WEBSOCKET_VERSION.matches(&version.obs_web_socket_version) {
return Err(Error::ObsWebsocketVersion(
version.obs_websocket_version,
version.obs_web_socket_version,
OBS_WEBSOCKET_VERSION,
));
}
if RPC_VERSION != version.rpc_version {
return Err(Error::RpcVersion {
requested: RPC_VERSION,
negotiated: version.rpc_version,
});
}
Ok(())
}
@ -284,8 +298,8 @@ impl Client {
T: DeserializeOwned,
{
let id = self.id_counter.fetch_add(1, Ordering::SeqCst);
let req = Request {
message_id: &id.to_string(),
let req = ClientRequest::Request {
request_id: &id.to_string(),
ty: req,
};
let json = serde_json::to_string(&req).map_err(Error::SerializeMessage)?;
@ -307,15 +321,15 @@ impl Client {
return Err(e);
}
let mut resp = rx.await.map_err(Error::ReceiveMessage)?;
if let Some(error) = extract_error(&mut resp) {
return Err(Error::Api(error));
let (status, resp) = rx.await.map_err(Error::ReceiveMessage)?;
if !status.result {
return Err(Error::Api {
code: status.code,
message: status.comment,
});
}
serde_json::from_value::<Response<T>>(resp)
.map(|r| r.details)
.map_err(Error::DeserializeResponse)
serde_json::from_value(resp).map_err(Error::DeserializeResponse)
}
/// Disconnect from obs-websocket and shut down all machinery.
@ -336,46 +350,8 @@ impl Client {
}
}
/// Login to the OBS websocket if an authentication is required.
pub async fn login(&self, password: Option<impl AsRef<str>>) -> Result<()> {
let auth_required = self.general().get_auth_required().await?;
if let AuthRequired {
auth_required: true,
challenge: Some(challenge),
salt: Some(salt),
} = auth_required
{
match password {
Some(password) => {
let auth = Self::create_auth_response(&challenge, &salt, password.as_ref());
self.general().authenticate(&auth).await?;
}
None => return Err(Error::NoPassword),
}
}
Ok(())
}
fn create_auth_response(challenge: &str, salt: &str, password: &str) -> String {
use sha2::{Digest, Sha256};
let mut hasher = Sha256::new();
hasher.update(password.as_bytes());
hasher.update(salt.as_bytes());
let mut auth = String::with_capacity(Sha256::output_size() * 4 / 3 + 4);
base64::encode_config_buf(hasher.finalize_reset(), base64::STANDARD, &mut auth);
hasher.update(auth.as_bytes());
hasher.update(challenge.as_bytes());
auth.clear();
base64::encode_config_buf(hasher.finalize(), base64::STANDARD, &mut auth);
auth
pub async fn reidentify(&self) -> Result<()> {
todo!("The `Reidentify` command is not yet implemented")
}
/// Get a stream of events. Each call to this function creates a new listener, therefore it's
@ -404,94 +380,143 @@ impl Client {
}
}
/// Access API functions related to OBS configuration.
pub fn config(&self) -> Config<'_> {
Config { client: self }
}
/// Access general API functions.
pub fn general(&self) -> General<'_> {
General { client: self }
}
/// Access API functions related to media control.
pub fn media_control(&self) -> MediaControl<'_> {
MediaControl { client: self }
/// Access API functions related to inputs.
pub fn inputs(&self) -> Inputs<'_> {
Inputs { client: self }
}
/// Access API functions related to scenes.
pub fn scenes(&self) -> Scenes<'_> {
Scenes { client: self }
}
/// Access API functions related to sources.
pub fn sources(&self) -> Sources<'_> {
Sources { client: self }
}
}
/// Access API functions related to outputs.
pub fn outputs(&self) -> Outputs<'_> {
Outputs { client: self }
impl Drop for Client {
fn drop(&mut self) {
// We simply drop the future as the background task has been aborted but we have no way here
// to wait for it to fully shut down (except spinning up a new tokio runtime).
drop(self.disconnect());
}
}
/// Access API functions related to profiles.
pub fn profiles(&self) -> Profiles<'_> {
Profiles { client: self }
}
/// Errors that can occur while performaning the initial handshake with obs-websocket.
#[derive(Debug, thiserror::Error)]
pub enum HandshakeError {
/// The connection to obs-websocket was interrupted while trying to read a message.
#[error("connection to obs-websocket was closed")]
ConnectionClosed,
/// Receiving a message did not succeed.
#[error("failed reading websocket message")]
Receive(#[source] tokio_tungstenite::tungstenite::Error),
/// The WebSocket message was not convertible to text.
#[error("websocket message not convertible to text")]
IntoText(#[source] tokio_tungstenite::tungstenite::Error),
/// A message from obs-websocket could not be deserialized.
#[error("failed deserializing message")]
DeserializeMessage(#[source] serde_json::Error),
/// A message could not be serialized for sending.
#[error("failed serializing message")]
SerializeMessage(#[source] serde_json::Error),
/// Sending a message to obs-websocket failed.
#[error("failed to send message to obs-websocket")]
Send(#[source] tokio_tungstenite::tungstenite::Error),
/// Didn't receive the initial `Hello` message from obs-websocket after connecting.
#[error("didn't receive a `Hello` message after connecting")]
NoHello,
/// Didn't receive a `Identified` message from obs-websocket after authentication.
#[error("didn't receive a `Identified` message")]
NoIdentified,
}
/// Access API functions related to recording.
pub fn recording(&self) -> Recording<'_> {
Recording { client: self }
}
async fn handshake(
write: &mut (impl Sink<Message, Error = tokio_tungstenite::tungstenite::Error> + Unpin),
read: &mut (impl Stream<Item = tokio_tungstenite::tungstenite::Result<Message>> + Unpin),
password: Option<&str>,
) -> Result<(), HandshakeError> {
async fn read_message(
read: &mut (impl Stream<Item = tokio_tungstenite::tungstenite::Result<Message>> + Unpin),
) -> Result<ServerMessage, HandshakeError> {
let message = read
.next()
.await
.ok_or(HandshakeError::ConnectionClosed)?
.map_err(HandshakeError::Receive)?
.into_text()
.map_err(HandshakeError::IntoText)?;
/// Access API functions related to the replay buffer.
pub fn replay_buffer(&self) -> ReplayBuffer<'_> {
ReplayBuffer { client: self }
serde_json::from_str::<ServerMessage>(&message).map_err(HandshakeError::DeserializeMessage)
}
/// Access API functions related to scene collections.
pub fn scene_collections(&self) -> SceneCollections<'_> {
SceneCollections { client: self }
}
match read_message(read).await? {
ServerMessage::Hello {
obs_web_socket_version: _,
rpc_version,
authentication,
} => {
let authentication = authentication.zip(password).map(|(auth, password)| {
create_auth_response(&auth.challenge, &auth.salt, password)
});
let req = serde_json::to_string(&ClientRequest::Identify {
rpc_version,
authentication,
ignore_invalid_messages: false,
ignore_non_fatal_request_checks: false,
event_subscriptions: None,
})
.map_err(HandshakeError::SerializeMessage)?;
/// Access API functions related to scene items.
pub fn scene_items(&self) -> SceneItems<'_> {
SceneItems { client: self }
write
.send(Message::Text(req))
.await
.map_err(HandshakeError::Send)?;
}
_ => return Err(HandshakeError::NoHello),
}
/// Access API functions related to scenes.
pub fn scenes(&self) -> Scenes<'_> {
Scenes { client: self }
match read_message(read).await? {
ServerMessage::Identified {
negotiated_rpc_version,
} => {
debug!("identified with RPC version {}", negotiated_rpc_version);
}
_ => return Err(HandshakeError::NoIdentified),
}
/// Access API functions related to streaming.
pub fn streaming(&self) -> Streaming<'_> {
Streaming { client: self }
}
Ok(())
}
/// Access API functions related to the studio mode.
pub fn studio_mode(&self) -> StudioMode<'_> {
StudioMode { client: self }
}
fn create_auth_response(challenge: &str, salt: &str, password: &str) -> String {
use sha2::{Digest, Sha256};
/// Access API functions related to transitions.
pub fn transitions(&self) -> Transitions<'_> {
Transitions { client: self }
}
let mut hasher = Sha256::new();
hasher.update(password.as_bytes());
hasher.update(salt.as_bytes());
/// Access API functions related to the virtual cam.
pub fn virtual_cam(&self) -> VirtualCam<'_> {
VirtualCam { client: self }
}
}
let mut auth = String::with_capacity(Sha256::output_size() * 4 / 3 + 4);
fn extract_error(value: &mut serde_json::Value) -> Option<String> {
value
.as_object_mut()
.and_then(|o| o.get_mut("error"))
.and_then(|e| {
if let serde_json::Value::String(msg) = e.take() {
Some(msg)
} else {
None
}
})
}
base64::encode_config_buf(hasher.finalize_reset(), base64::STANDARD, &mut auth);
impl Drop for Client {
fn drop(&mut self) {
// We simply drop the future as the background task has been aborted but we have no way here
// to wait for it to fully shut down (except spinning up a new tokio runtime).
drop(self.disconnect());
}
hasher.update(auth.as_bytes());
hasher.update(challenge.as_bytes());
auth.clear();
base64::encode_config_buf(hasher.finalize(), base64::STANDARD, &mut auth);
auth
}

@ -1,52 +0,0 @@
use super::Client;
use crate::{requests::RequestType, responses, Result};
/// API functions related to outputs.
pub struct Outputs<'a> {
pub(super) client: &'a Client,
}
impl<'a> Outputs<'a> {
/// List existing outputs.
pub async fn list_outputs(&self) -> Result<Vec<responses::Output>> {
self.client
.send_message::<responses::Outputs>(RequestType::ListOutputs)
.await
.map(|o| o.outputs)
}
/// Get information about a single output.
///
/// - `output_name`: Output name.
pub async fn get_output_info(&self, output_name: &str) -> Result<responses::Output> {
self.client
.send_message::<responses::OutputInfo>(RequestType::GetOutputInfo { output_name })
.await
.map(|o| o.output_info)
}
/// Start an output.
///
/// Note: Controlling outputs is an experimental feature of obs-websocket. Some plugins which
/// add outputs to OBS may not function properly when they are controlled in this way.
///
/// - `output_name`: Output name.
pub async fn start_output(&self, output_name: &str) -> Result<()> {
self.client
.send_message(RequestType::StartOutput { output_name })
.await
}
/// Stop an output.
///
/// Note: Controlling outputs is an experimental feature of obs-websocket. Some plugins which
/// add outputs to OBS may not function properly when they are controlled in this way.
///
/// - `output_name`: Output name.
/// - `force`: Force stop (default: false).
pub async fn stop_output(&self, output_name: &str, force: Option<bool>) -> Result<()> {
self.client
.send_message(RequestType::StopOutput { output_name, force })
.await
}
}

@ -1,34 +0,0 @@
use super::Client;
use crate::{requests::RequestType, responses, Result};
/// API functions related to profiles.
pub struct Profiles<'a> {
pub(super) client: &'a Client,
}
impl<'a> Profiles<'a> {
/// Set the currently active profile.
///
/// - `profile_name`: Name of the desired profile.
pub async fn set_current_profile(&self, profile_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetCurrentProfile { profile_name })
.await
}
/// Get the name of the current profile.
pub async fn get_current_profile(&self) -> Result<String> {
self.client
.send_message::<responses::CurrentProfile>(RequestType::GetCurrentProfile)
.await
.map(|cp| cp.profile_name)
}
/// Get a list of available profiles.
pub async fn list_profiles(&self) -> Result<Vec<responses::Profile>> {
self.client
.send_message::<responses::Profiles>(RequestType::ListProfiles)
.await
.map(|cp| cp.profiles)
}
}

@ -1,65 +0,0 @@
use std::path::{Path, PathBuf};
use super::Client;
use crate::{requests::RequestType, responses, Result};
/// API functions related to recording.
pub struct Recording<'a> {
pub(super) client: &'a Client,
}
impl<'a> Recording<'a> {
/// Get current recording status.
pub async fn get_recording_status(&self) -> Result<responses::RecordingStatus> {
self.client
.send_message(RequestType::GetRecordingStatus)
.await
}
/// Toggle recording on or off (depending on the current recording state).
pub async fn start_stop_recording(&self) -> Result<()> {
self.client
.send_message(RequestType::StartStopRecording)
.await
}
/// Start recording. Will return an `error` if recording is already active.
pub async fn start_recording(&self) -> Result<()> {
self.client.send_message(RequestType::StartRecording).await
}
/// Stop recording. Will return an `error` if recording is not active.
pub async fn stop_recording(&self) -> Result<()> {
self.client.send_message(RequestType::StopRecording).await
}
/// Pause the current recording. Returns an `error` if recording is not active or already
/// paused.
pub async fn pause_recording(&self) -> Result<()> {
self.client.send_message(RequestType::PauseRecording).await
}
/// Resume/unpause the current recording (if paused). Returns an error if recording is not
/// active or not paused.
pub async fn resume_recording(&self) -> Result<()> {
self.client.send_message(RequestType::ResumeRecording).await
}
/// Please note: if this is called while a recording is in progress, the change won't be applied
/// immediately and will be effective on the next recording.
///
/// - `rec_folder`: Path of the recording folder.
pub async fn set_recording_folder(&self, rec_folder: &Path) -> Result<()> {
self.client
.send_message(RequestType::SetRecordingFolder { rec_folder })
.await
}
/// Get the path of the current recording folder.
pub async fn get_recording_folder(&self) -> Result<PathBuf> {
self.client
.send_message::<responses::RecordingFolder>(RequestType::GetRecordingFolder)
.await
.map(|rf| rf.rec_folder)
}
}

@ -1,50 +0,0 @@
use super::Client;
use crate::{requests::RequestType, responses, Result};
/// API functions related to the replay buffer.
pub struct ReplayBuffer<'a> {
pub(super) client: &'a Client,
}
impl<'a> ReplayBuffer<'a> {
/// Get the status of the OBS replay buffer.
pub async fn get_replay_buffer_status(&self) -> Result<bool> {
self.client
.send_message::<responses::ReplayBufferStatus>(RequestType::GetReplayBufferStatus)
.await
.map(|rbs| rbs.is_replay_buffer_active)
}
/// Toggle the Replay Buffer on/off (depending on the current state of the replay buffer).
pub async fn start_stop_replay_buffer(&self) -> Result<()> {
self.client
.send_message(RequestType::StartStopReplayBuffer)
.await
}
/// Start recording into the Replay Buffer. Will return an `error` if the Replay Buffer is
/// already active or if the "Save Replay Buffer" hotkey is not set in OBS' settings. Setting
/// this hotkey is mandatory, even when triggering saves only through obs-websocket.
pub async fn start_replay_buffer(&self) -> Result<()> {
self.client
.send_message(RequestType::StartReplayBuffer)
.await
}
/// Stop recording into the Replay Buffer. Will return an `error` if the Replay Buffer is not
/// active.
pub async fn stop_replay_buffer(&self) -> Result<()> {
self.client
.send_message(RequestType::StopReplayBuffer)
.await
}
/// Flush and save the contents of the Replay Buffer to disk. This is basically the same as
/// triggering the "Save Replay Buffer" hotkey. Will return an `error` if the Replay Buffer is
/// not active.
pub async fn save_replay_buffer(&self) -> Result<()> {
self.client
.send_message(RequestType::SaveReplayBuffer)
.await
}
}

@ -1,36 +0,0 @@
use super::Client;
use crate::{requests::RequestType, responses, Result};
/// API functions related to scene collections.
pub struct SceneCollections<'a> {
pub(super) client: &'a Client,
}
impl<'a> SceneCollections<'a> {
/// Change the active scene collection.
///
/// - `sc_name`: Name of the desired scene collection.
pub async fn set_current_scene_collection(&self, sc_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetCurrentSceneCollection { sc_name })
.await
}
/// Get the name of the current scene collection.
pub async fn get_current_scene_collection(&self) -> Result<String> {
self.client
.send_message::<responses::CurrentSceneCollection>(
RequestType::GetCurrentSceneCollection,
)
.await
.map(|csc| csc.sc_name)
}
/// List available scene collections.
pub async fn list_scene_collections(&self) -> Result<Vec<responses::SceneCollection>> {
self.client
.send_message::<responses::SceneCollections>(RequestType::ListSceneCollections)
.await
.map(|sc| sc.scene_collections)
}
}

@ -1,112 +0,0 @@
use either::Either;
use super::Client;
use crate::{
requests::{
AddSceneItem, DuplicateSceneItem, RequestType, SceneItemProperties, SceneItemRender,
SceneItemSpecification,
},
responses, Result,
};
/// API functions related to scene items.
pub struct SceneItems<'a> {
pub(super) client: &'a Client,
}
impl<'a> SceneItems<'a> {
/// Get a list of all scene items in a scene.
///
/// - `scene_name`: Name of the scene to get the list of scene items from. Defaults to the
/// current scene if not specified.
pub async fn get_scene_item_list(
&self,
scene_name: Option<&str>,
) -> Result<responses::SceneItemList> {
self.client
.send_message(RequestType::GetSceneItemList { scene_name })
.await
}
/// Gets the scene specific properties of the specified source item. Coordinates are relative to
/// the item's parent (the scene or group it belongs to).
///
/// - `scene_name`: Name of the scene the scene item belongs to. Defaults to the current scene.
/// - `item`: Scene Item name (if this field is a string) or specification (if it is an object).
pub async fn get_scene_item_properties(
&self,
scene_name: Option<&str>,
item: Either<&str, SceneItemSpecification<'_>>,
) -> Result<responses::SceneItemProperties> {
self.client
.send_message(RequestType::GetSceneItemProperties { scene_name, item })
.await
}
/// Sets the scene specific properties of a source. Unspecified properties will remain
/// unchanged. Coordinates are relative to the item's parent (the scene or group it belongs to).
pub async fn set_scene_item_properties(
&self,
properties: SceneItemProperties<'_>,
) -> Result<()> {
self.client
.send_message(RequestType::SetSceneItemProperties(properties))
.await
}
/// Reset a scene item.
///
/// - `scene_name`: Name of the scene the scene item belongs to. Defaults to the current scene.
/// - `item`: Scene Item name (if this field is a string) or specification (if it is an object).
pub async fn reset_scene_item(
&self,
scene_name: Option<&str>,
item: Either<&str, SceneItemSpecification<'_>>,
) -> Result<()> {
self.client
.send_message(RequestType::ResetSceneItem { scene_name, item })
.await
}
/// Show or hide a specified source item in a specified scene.
pub async fn set_scene_item_render(
&self,
scene_item_render: SceneItemRender<'_>,
) -> Result<()> {
self.client
.send_message(RequestType::SetSceneItemRender(scene_item_render))
.await
}
/// Deletes a scene item.
///
/// - `scene`: Name of the scene the scene item belongs to. Defaults to the current scene.
/// - `item`: Scene item to delete.
pub async fn delete_scene_item(
&self,
scene: Option<&str>,
item: SceneItemSpecification<'_>,
) -> Result<()> {
self.client
.send_message(RequestType::DeleteSceneItem { scene, item })
.await
}
/// Creates a scene item in a scene. In other words, this is how you add a source into a scene.
pub async fn add_scene_item(&self, scene_item: AddSceneItem<'_>) -> Result<i64> {
self.client
.send_message::<responses::SceneItemId>(RequestType::AddSceneItem(scene_item))
.await
.map(|sii| sii.item_id)
}
/// Duplicates a scene item.
pub async fn duplicate_scene_item(
&self,
scene_item: DuplicateSceneItem<'_>,
) -> Result<responses::DuplicateSceneItem> {
self.client
.send_message(RequestType::DuplicateSceneItem(scene_item))
.await
}
}

@ -1,8 +1,5 @@
use super::Client;
use crate::{
requests::{RequestType, SceneItem, SceneTransitionOverride},
responses, Result,
};
use crate::{requests::RequestType, responses, Result};
/// API functions related to scenes.
pub struct Scenes<'a> {
@ -10,77 +7,54 @@ pub struct Scenes<'a> {
}
impl<'a> Scenes<'a> {
/// Switch to the specified scene.
///
/// - `scene_name`: Name of the scene to switch to.
pub async fn set_current_scene(&self, scene_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetCurrentScene { scene_name })
.await
pub async fn get_scene_list(&self) -> Result<responses::Scenes> {
self.client.send_message(RequestType::GetSceneList).await
}
/// Get the current scene's name and source items.
pub async fn get_current_scene(&self) -> Result<responses::CurrentScene> {
self.client.send_message(RequestType::GetCurrentScene).await
pub async fn get_current_program_scene(&self) -> Result<String> {
self.client
.send_message::<responses::CurrentProgramScene>(RequestType::GetCurrentProgramScene)
.await
.map(|cps| cps.current_program_scene_name)
}
/// Get a list of scenes in the currently active profile.
pub async fn get_scene_list(&self) -> Result<responses::SceneList> {
self.client.send_message(RequestType::GetSceneList).await
pub async fn set_current_program_scene(&self, scene_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetCurrentProgramScene { scene_name })
.await
}
/// Create a new scene scene.
///
/// - `scene_name`: Name of the scene to create.
pub async fn create_scene(&self, scene_name: &str) -> Result<()> {
pub async fn get_current_preview_scene(&self) -> Result<Option<String>> {
self.client
.send_message(RequestType::CreateScene { scene_name })
.send_message::<responses::CurrentPreviewScene>(RequestType::GetCurrentPreviewScene)
.await
.map(|cps| cps.current_preview_scene_name)
}
/// Changes the order of scene items in the requested scene.
///
/// - `scene`: Name of the scene to reorder (defaults to current).
/// - `items`: Ordered list of objects with name and/or id specified. Id preferred due to
/// uniqueness per scene
pub async fn reorder_scene_items(
&self,
scene: Option<&str>,
items: &[SceneItem<'_>],
) -> Result<()> {
pub async fn set_current_preview_scene(&self, scene_name: &str) -> Result<()> {
self.client
.send_message(RequestType::ReorderSceneItems { scene, items })
.send_message(RequestType::SetCurrentPreviewScene { scene_name })
.await
}
/// Set a scene to use a specific transition override.
pub async fn set_scene_transition_override(
&self,
scene_transition: SceneTransitionOverride<'_>,
) -> Result<()> {
pub async fn set_scene_name(&self, scene_name: &str, new_scene_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetSceneTransitionOverride(scene_transition))
.send_message(RequestType::SetSceneName {
scene_name,
new_scene_name,
})
.await
}
/// Remove any transition override on a scene.
///
/// - `scene_name`: Name of the scene to remove the override from.
pub async fn remove_scene_transition_override(&self, scene_name: &str) -> Result<()> {
pub async fn create_scene(&self, scene_name: &str) -> Result<()> {
self.client
.send_message(RequestType::RemoveSceneTransitionOverride { scene_name })
.send_message(RequestType::CreateScene { scene_name })
.await
}
/// Get the current scene transition override.
///
/// - `scene_name`: Name of the scene to get the override for.
pub async fn get_scene_transition_override(
&self,
scene_name: &str,
) -> Result<responses::SceneTransitionOverride> {
pub async fn remove_scene(&self, scene_name: &str) -> Result<()> {
self.client
.send_message(RequestType::GetSceneTransitionOverride { scene_name })
.send_message(RequestType::RemoveScene { scene_name })
.await
}
}

@ -1,14 +1,6 @@
use chrono::Duration;
use serde::de::DeserializeOwned;
use super::Client;
use crate::{
common::MonitorType,
requests::{
AddFilter, CreateSource, MoveFilter, ReorderFilter, RequestType, SourceFilterSettings,
SourceFilterVisibility, SourceScreenshot, SourceSettings, TextFreetype2Properties,
TextGdiPlusProperties, Volume,
},
requests::{GetSourceScreenshot, RequestType, SaveSourceScreenshot},
responses, Result,
};
@ -18,386 +10,22 @@ pub struct Sources<'a> {
}
impl<'a> Sources<'a> {
/// List the media state of all media sources (vlc and media source).
pub async fn get_media_sources_list(&self) -> Result<Vec<responses::MediaSource>> {
self.client
.send_message::<responses::MediaSourcesList>(RequestType::GetMediaSourcesList)
.await
.map(|ms| ms.media_sources)
}
/// Create a source and add it as a scene item to a scene.
pub async fn create_source(&self, source: CreateSource<'_>) -> Result<i64> {
self.client
.send_message::<responses::SourceItemId>(RequestType::CreateSource(source))
.await
.map(|sii| sii.item_id)
}
/// List all sources available in the running OBS instance.
pub async fn get_sources_list(&self) -> Result<Vec<responses::SourceListItem>> {
self.client
.send_message::<responses::SourcesList>(RequestType::GetSourcesList)
.await
.map(|sl| sl.sources)
}
/// Get a list of all available sources types.
pub async fn get_sources_types_list(&self) -> Result<Vec<responses::SourceTypeItem>> {
self.client
.send_message::<responses::SourceTypesList>(RequestType::GetSourceTypesList)
.await
.map(|stl| stl.types)
}
/// Get the volume of the specified source. Default response uses mul format, NOT SLIDER
/// PERCENTAGE.
///
/// - `source`: Source name.
/// - `use_decibel`: Output volume in decibels of attenuation instead of amplitude/mul.
pub async fn get_volume(
&self,
source: &str,
use_decibel: Option<bool>,
) -> Result<responses::Volume> {
self.client
.send_message(RequestType::GetVolume {
source,
use_decibel,
})
.await
}
/// Set the volume of the specified source. Default request format uses mul, NOT SLIDER
/// PERCENTAGE.
pub async fn set_volume(&self, volume: Volume<'_>) -> Result<()> {
self.client
.send_message(RequestType::SetVolume(volume))
.await
}
/// Get the mute status of a specified source.
///
/// - `source`: Source name.
pub async fn get_mute(&self, source: &str) -> Result<responses::Mute> {
self.client
.send_message(RequestType::GetMute { source })
.await
}
/// Sets the mute status of a specified source.
///
/// - `source`: Source name.
/// - `mute`: Desired mute status.
pub async fn set_mute(&self, source: &str, mute: bool) -> Result<()> {
self.client
.send_message(RequestType::SetMute { source, mute })
.await
}
/// Inverts the mute status of a specified source.
///
/// - `source`: Source name.
pub async fn toggle_mute(&self, source: &str) -> Result<()> {
self.client
.send_message(RequestType::ToggleMute { source })
.await
}
/// Get the source's active status of a specified source (if it is showing in the final mix).
///
/// - `source_name`: Source name.
pub async fn get_source_active(&self, source_name: &str) -> Result<bool> {
self.client
.send_message::<responses::SourceActive>(RequestType::GetSourceActive { source_name })
.await
.map(|sa| sa.source_active)
}
/// Get the audio's active status of a specified source.
///
/// - `source_name`: Source name.
pub async fn get_audio_active(&self, source_name: &str) -> Result<bool> {
self.client
.send_message::<responses::AudioActive>(RequestType::GetAudioActive { source_name })
.await
.map(|aa| aa.audio_active)
}
/// Rename an existing source.
///
/// Note: If the new name already exists as a source, obs-websocket will return an error.
///
/// - `source_name`: Source name.
/// - `new_name`: New source name.
pub async fn set_source_name(&self, source_name: &str, new_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetSourceName {
source_name,
new_name,
})
.await
}
/// Set the audio sync offset of a specified source.
///
/// - `source`: Source name.
/// - `offset`: The desired audio sync offset (in nanoseconds).
pub async fn set_sync_offset(&self, source: &str, offset: Duration) -> Result<()> {
self.client
.send_message(RequestType::SetSyncOffset { source, offset })
.await
}
/// Get the audio sync offset of a specified source.
///
/// - `source`: Source name.
pub async fn get_sync_offset(&self, source: &str) -> Result<responses::SyncOffset> {
self.client
.send_message(RequestType::GetSyncOffset { source })
.await
}
/// Get settings of the specified source.
///
/// - `source_name`: Source name.
/// - `source_type`: Type of the specified source. Useful for type-checking if you expect a
/// specific settings schema.
pub async fn get_source_settings<T>(
&self,
source_name: &str,
source_type: Option<&str>,
) -> Result<responses::SourceSettings<T>>
where
T: DeserializeOwned,
{
self.client
.send_message(RequestType::GetSourceSettings {
source_name,
source_type,
})
.await
}
/// Set settings of the specified source.
pub async fn set_source_settings<T>(
&self,
source_settings: SourceSettings<'_>,
) -> Result<responses::SourceSettings<T>>
where
T: DeserializeOwned,
{
self.client
.send_message(RequestType::SetSourceSettings(source_settings))
.await
}
/// Get the current properties of a Text GDI Plus source.
///
/// - `source`: Source name.
pub async fn get_text_gdi_plus_properties(
&self,
source: &str,
) -> Result<responses::TextGdiPlusProperties> {
self.client
.send_message(RequestType::GetTextGdiPlusProperties { source })
.await
}
/// Set the current properties of a Text GDI Plus source.
pub async fn set_text_gdi_plus_properties(
&self,
properties: TextGdiPlusProperties<'_>,
) -> Result<()> {
self.client
.send_message(RequestType::SetTextGdiPlusProperties(Box::new(properties)))
.await
}
/// Get the current properties of a Text Freetype 2 source.
///
/// - `source`: Source name.
pub async fn get_text_freetype2_properties(
&self,
source: &str,
) -> Result<responses::TextFreetype2Properties> {
self.client
.send_message(RequestType::GetTextFreetype2Properties { source })
.await
}
/// Set the current properties of a Text Freetype 2 source.
pub async fn set_text_freetype2_properties(
&self,
properties: TextFreetype2Properties<'_>,
) -> Result<()> {
self.client
.send_message(RequestType::SetTextFreetype2Properties(properties))
.await
}
/// Get configured special sources like Desktop Audio and Mic/Aux sources.
pub async fn get_special_sources(&self) -> Result<responses::SpecialSources> {
self.client
.send_message(RequestType::GetSpecialSources)
.await
}
/// List filters applied to a source.
///
/// - `source_name`: Source name.
pub async fn get_source_filters(
&self,
source_name: &str,
) -> Result<Vec<responses::SourceFilter>> {
self.client
.send_message::<responses::SourceFilters>(RequestType::GetSourceFilters { source_name })
.await
.map(|sf| sf.filters)
}
/// Get a specific filter that is applied to a source.
///
/// - `source_name`: Source name.
/// - `filter_name`: Source filter name.
pub async fn get_source_filter_info<T>(
&self,
source_name: &str,
filter_name: &str,
) -> Result<responses::SourceFilterInfo<T>>
where
T: DeserializeOwned,
{
self.client
.send_message(RequestType::GetSourceFilterInfo {
source_name,
filter_name,
})
.await
}
/// Add a new filter to a source. Available source types along with their settings properties
/// are available from [`get_sources_types_list`](Self::get_sources_types_list).
pub async fn add_filter_to_source(&self, add_filter: AddFilter<'_>) -> Result<()> {
self.client
.send_message(RequestType::AddFilterToSource(add_filter))
.await
}
/// Remove a filter from a source.
///
/// - `source_name`: Name of the source from which the specified filter is removed.
/// - `filter_name`: Name of the filter to remove.
pub async fn remove_filter_from_source(
&self,
source_name: &str,
filter_name: &str,
) -> Result<()> {
self.client
.send_message(RequestType::RemoveFilterFromSource {
source_name,
filter_name,
})
.await
}
/// Move a filter in the chain (absolute index positioning).
pub async fn reorder_source_filter(&self, reorder_filter: ReorderFilter<'_>) -> Result<()> {
self.client
.send_message(RequestType::ReorderSourceFilter(reorder_filter))
.await
}
/// Move a filter in the chain (relative positioning).
pub async fn move_source_filter(&self, move_filter: MoveFilter<'_>) -> Result<()> {
self.client
.send_message(RequestType::MoveSourceFilter(move_filter))
.await
}
/// Update settings of a filter.
pub async fn set_source_filter_settings(
&self,
settings: SourceFilterSettings<'_>,
) -> Result<()> {
self.client
.send_message(RequestType::SetSourceFilterSettings(settings))
.await
}
/// Change the visibility/enabled state of a filter.
pub async fn set_source_filter_visibility(
&self,
visibility: SourceFilterVisibility<'_>,
) -> Result<()> {
self.client
.send_message(RequestType::SetSourceFilterVisibility(visibility))
.await
}
/// Get the audio monitoring type of the specified source.
///
/// - `source_name`: Source name.
pub async fn get_audio_monitor_type(&self, source_name: &str) -> Result<MonitorType> {
self.client
.send_message::<responses::AudioMonitorType>(RequestType::GetAudioMonitorType {
source_name,
})
.await
.map(|amt| amt.monitor_type)
}
/// Set the audio monitoring type of the specified source.
///
/// - `source_name`: Source name.
/// - `monitor_type`: The monitor type to use. Options: `none`, `monitorOnly`,
/// `monitorAndOutput`.
pub async fn set_audio_monitor_type(
&self,
source_name: &str,
monitor_type: MonitorType,
) -> Result<()> {
self.client
.send_message(RequestType::SetAudioMonitorType {
source_name,
monitor_type,
})
.await
}
/// Get the default settings for a given source type.
///
/// - `source_kind`: Source kind. Also called "source id" in libobs terminology.
pub async fn get_source_default_settings(
&self,
source_kind: &str,
) -> Result<responses::SourceDefaultSettings> {
pub async fn get_source_active(&self, source_name: &str) -> Result<responses::SourceActive> {
self.client
.send_message(RequestType::GetSourceDefaultSettings { source_kind })
.send_message(RequestType::GetSourceActive { source_name })
.await
}
/// At least [`embed_picture_format`](SourceScreenshot::embed_picture_format) or
/// [`save_to_file_path`](SourceScreenshot::save_to_file_path) must be specified.
///
/// Clients can specify [`width`](SourceScreenshot::width) and
/// [`height`](SourceScreenshot::height) parameters to receive scaled pictures. Aspect ratio is
/// preserved if only one of these two parameters is specified.
pub async fn take_source_screenshot(
&self,
source_screenshot: SourceScreenshot<'_>,
) -> Result<responses::SourceScreenshot> {
pub async fn get_source_screenshot(&self, settings: GetSourceScreenshot<'_>) -> Result<String> {
self.client
.send_message(RequestType::TakeSourceScreenshot(source_screenshot))
.send_message::<responses::ImageData>(RequestType::GetSourceScreenshot(settings))
.await
.map(|id| id.image_data)
}
/// Refreshes the specified browser source.
///
/// - `source_name`: Source name.
pub async fn refresh_browser_source(&self, source_name: &str) -> Result<()> {
pub async fn save_source_screenshot(&self, settings: SaveSourceScreenshot<'_>) -> Result<()> {
self.client
.send_message(RequestType::RefreshBrowserSource { source_name })
.send_message(RequestType::SaveSourceScreenshot(settings))
.await
}
}

@ -1,73 +0,0 @@
use super::Client;
use crate::{
requests::{RequestType, SetStreamSettings, Stream},
responses, Result,
};
/// API functions related to streaming.
pub struct Streaming<'a> {
pub(super) client: &'a Client,
}
impl<'a> Streaming<'a> {
/// Get current streaming and recording status.
pub async fn get_streaming_status(&self) -> Result<responses::StreamingStatus> {
self.client
.send_message(RequestType::GetStreamingStatus)
.await
}
/// Toggle streaming on or off (depending on the current stream state).
pub async fn start_stop_streaming(&self) -> Result<()> {
self.client
.send_message(RequestType::StartStopStreaming)
.await
}
/// Start streaming. Will return an `error` if streaming is already active.
///
/// - `stream`: Special stream configuration. Note: these won't be saved to OBS' configuration.
pub async fn start_streaming(&self, stream: Option<Stream<'_>>) -> Result<()> {
self.client
.send_message(RequestType::StartStreaming { stream })
.await
}
/// Stop streaming. Will return an `error` if streaming is not active.
pub async fn stop_streaming(&self) -> Result<()> {
self.client.send_message(RequestType::StopStreaming).await
}
/// Sets one or more attributes of the current streaming server settings. Any options not passed
/// will remain unchanged. Returns the updated settings in response. If 'type' is different than
/// the current streaming service type, all settings are required. Returns the full settings of
/// the stream (the same as GetStreamSettings).
pub async fn set_stream_settings(&self, settings: SetStreamSettings<'_>) -> Result<()> {
self.client
.send_message(RequestType::SetStreamSettings(settings))
.await
}
/// Get the current streaming server settings.
pub async fn get_stream_settings(&self) -> Result<responses::GetStreamSettings> {
self.client
.send_message(RequestType::GetStreamSettings)
.await
}
/// Save the current streaming server settings to disk.
pub async fn save_stream_settings(&self) -> Result<()> {
self.client
.send_message(RequestType::SaveStreamSettings)
.await
}
/// Send the provided text as embedded CEA-608 caption data.
///
/// - `text`: Captions text.
pub async fn send_captions(&self, text: &str) -> Result<()> {
self.client
.send_message(RequestType::SendCaptions { text })
.await
}
}

@ -1,70 +0,0 @@
use super::Client;
use crate::{
requests::{RequestType, Transition},
responses, Result,
};
/// API functions related to the studio mode.
pub struct StudioMode<'a> {
pub(super) client: &'a Client,
}
impl<'a> StudioMode<'a> {
/// Indicates if Studio Mode is currently enabled.
pub async fn get_studio_mode_status(&self) -> Result<bool> {
self.client
.send_message::<responses::StudioModeStatus>(RequestType::GetStudioModeStatus)
.await
.map(|sms| sms.studio_mode)
}
/// Get the name of the currently previewed scene and its list of sources. Will return an
/// `error` if Studio Mode is not enabled.
pub async fn get_preview_scene(&self) -> Result<responses::PreviewScene> {
self.client.send_message(RequestType::GetPreviewScene).await
}
/// Set the active preview scene. Will return an `error` if Studio Mode is not enabled.
///
/// - `scene_name`: The name of the scene to preview.
pub async fn set_preview_scene(&self, scene_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetPreviewScene { scene_name })
.await
}
/// Transitions the currently previewed scene to the main output. Will return an `error` if
/// Studio Mode is not enabled.
///
/// - `with_transition`: Change the active transition before switching scenes. Defaults to the
/// active transition.
pub async fn transition_to_program(
&self,
with_transition: Option<Transition<'_>>,
) -> Result<()> {
self.client
.send_message(RequestType::TransitionToProgram { with_transition })
.await
}
/// Enables Studio Mode.
pub async fn enable_studio_mode(&self) -> Result<()> {
self.client
.send_message(RequestType::EnableStudioMode)
.await
}
/// Disables Studio Mode.
pub async fn disable_studio_mode(&self) -> Result<()> {
self.client
.send_message(RequestType::DisableStudioMode)
.await
}
/// Toggles Studio Mode (depending on the current state of studio mode).
pub async fn toggle_studio_mode(&self) -> Result<()> {
self.client
.send_message(RequestType::ToggleStudioMode)
.await
}
}

@ -1,120 +0,0 @@
use chrono::Duration;
use serde::Serialize;
use super::Client;
use crate::{requests::RequestType, responses, Error, Result};
/// API functions related to transitions.
pub struct Transitions<'a> {
pub(super) client: &'a Client,
}
impl<'a> Transitions<'a> {
/// List of all transitions available in the frontend's dropdown menu.
pub async fn get_transition_list(&self) -> Result<responses::TransitionList> {
self.client
.send_message(RequestType::GetTransitionList)
.await
}
/// Get the name of the currently selected transition in the frontend's dropdown menu.
pub async fn get_current_transition(&self) -> Result<responses::CurrentTransition> {
self.client
.send_message(RequestType::GetCurrentTransition)
.await
}
/// Set the active transition.
///
/// - `transition_name`: The name of the transition.
pub async fn set_current_transition(&self, transition_name: &str) -> Result<()> {
self.client
.send_message(RequestType::SetCurrentTransition { transition_name })
.await
}
/// Set the duration of the currently selected transition if supported.
///
/// - `duration`: Desired duration of the transition (in milliseconds).
pub async fn set_transition_duration(&self, duration: Duration) -> Result<()> {
self.client
.send_message(RequestType::SetTransitionDuration { duration })
.await
}
/// Get the duration of the currently selected transition if supported.
pub async fn get_transition_duration(&self) -> Result<Duration> {
self.client
.send_message::<responses::TransitionDuration>(RequestType::GetTransitionDuration)
.await
.map(|td| td.transition_duration)
}
/// Get the position of the current transition.
pub async fn get_transition_position(&self) -> Result<f64> {
self.client
.send_message::<responses::TransitionPosition>(RequestType::GetTransitionPosition)
.await
.map(|tp| tp.position)
}
/// Get the current settings of a transition.
///
/// - `transition_name`: Transition name.
pub async fn get_transition_settings(
&self,
transition_name: &str,
) -> Result<serde_json::Value> {
self.client
.send_message::<responses::TransitionSettings>(RequestType::GetTransitionSettings {
transition_name,
})
.await
.map(|ts| ts.transition_settings)
}
/// Change the current settings of a transition.
///
/// - `transition_name`: Transition name.
/// - `transition_settings`: Transition settings (they can be partial)
pub async fn set_transition_settings<T>(
&self,
transition_name: &str,
transition_settings: &T,
) -> Result<serde_json::Value>
where
T: Serialize,
{
self.client
.send_message::<responses::TransitionSettings>(RequestType::SetTransitionSettings {
transition_name,
transition_settings: &serde_json::to_value(transition_settings)
.map_err(Error::SerializeCustomData)?,
})
.await
.map(|ts| ts.transition_settings)
}
/// Release the T-Bar (like a user releasing their mouse button after moving it). *YOU MUST CALL
/// THIS if you called [`set_t_bar_position`](Self::set_t_bar_position) with the `release`
/// parameter set to `false`.*
pub async fn release_t_bar(&self) -> Result<()> {
self.client.send_message(RequestType::ReleaseTBar).await
}
/// If your code needs to perform multiple successive T-Bar moves (e.g. : in an animation, or in
/// response to a user moving a T-Bar control in your User Interface), set `release` to false
/// and call [`release_t_bar`](Self::release_t_bar) later once the animation/interaction is
/// over.
///
/// - `position`: T-Bar position. This value must be between 0.0 and 1.0.
/// - `release`: Whether or not the T-Bar gets released automatically after setting its new
/// position (like a user releasing their mouse button after moving the T-Bar). Call
/// [`release_t_bar`](Self::release_t_bar) manually if you set `release` to false. Defaults to
/// true.
pub async fn set_t_bar_position(&self, position: f64, release: Option<bool>) -> Result<()> {
self.client
.send_message(RequestType::SetTBarPosition { position, release })
.await
}
}

@ -1,33 +0,0 @@
use super::Client;
use crate::{requests::RequestType, responses, Result};
/// API functions related to the virtual cam.
pub struct VirtualCam<'a> {
pub(super) client: &'a Client,
}
impl<'a> VirtualCam<'a> {
/// Get current virtual cam status.
pub async fn get_virtual_cam_status(&self) -> Result<responses::VirtualCamStatus> {
self.client
.send_message(RequestType::GetVirtualCamStatus)
.await
}
/// Toggle virtual cam on or off (depending on the current virtual cam state).
pub async fn start_stop_virtual_cam(&self) -> Result<()> {
self.client
.send_message(RequestType::StartStopVirtualCam)
.await
}
/// Start virtual cam. Will return an `error` if virtual cam is already active.
pub async fn start_virtual_cam(&self) -> Result<()> {
self.client.send_message(RequestType::StartVirtualCam).await
}
/// Stop virtual cam. Will return an error if virtual cam is not active.
pub async fn stop_virtual_cam(&self) -> Result<()> {
self.client.send_message(RequestType::StopVirtualCam).await
}
}

@ -1,309 +0,0 @@
//! Common data structures shared between [`requests`](crate::requests),
//! [`responses`](crate::responses) and [`events`](crate::events).
use std::convert::TryFrom;
use bitflags::bitflags;
use serde::{Deserialize, Serialize};
use crate::Error;
/// Response value for [`get_current_scene`](crate::client::Scenes::get_current_scene) as part of
/// [`CurrentScene`](crate::responses::CurrentScene),
/// [`get_scene_list`](crate::client::Scenes::get_scene_list) as part of
/// [`Scene`](crate::responses::Scene),
/// [`get_preview_scene`](crate::client::StudioMode::get_preview_scene) as part of
/// [`PreviewScene`](crate::responses::PreviewScene),
/// [`EventType::SwitchScenes`](crate::events::EventType::SwitchScenes),
/// [`EventType::PreviewSceneChanged`](crate::events::EventType::PreviewSceneChanged),
/// and **itself**.
#[allow(missing_docs)] // Docs missing in the obs-websocket spec.
#[derive(Clone, Debug, Deserialize)]
pub struct SceneItem {
pub cy: f64,
pub cx: f64,
/// The point on the source that the item is manipulated from. The sum of 1=Left or 2=Right, and
/// 4=Top or 8=Bottom, or omit to center on that axis.
#[serde(deserialize_with = "crate::de::bitflags_u8")]
pub alignment: Alignment,
/// The name of this Scene Item.
pub name: String,
/// Scene item ID.
pub id: i64,
/// Whether or not this Scene Item is set to "visible".
pub render: bool,
/// Whether or not this Scene Item is muted.
pub muted: bool,
/// Whether or not this Scene Item is locked and can't be moved around
pub locked: bool,
pub source_cx: f64,
pub source_cy: f64,
/// Source type.
#[serde(rename = "type")]
pub ty: String,
pub volume: f64,
pub x: f64,
pub y: f64,
/// Name of the item's parent (if this item belongs to a group).
#[serde(rename = "parentGroupName")]
pub parent_group_name: Option<String>,
/// List of children (if this item is a group).
#[serde(rename = "groupChildren", default)]
pub group_children: Vec<SceneItem>,
}
/// Response value for
/// [`get_scene_item_properties`](crate::client::SceneItems::get_scene_item_properties) as part of
/// [`SceneItemProperties`](crate::responses::SceneItemProperties),
/// [`EventType::SceneItemTransformChanged`](crate::events::EventType::SceneItemTransformChanged)
/// and **itself**.
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SceneItemTransform {
/// Position of the scene item.
pub position: Position,
/// The clockwise rotation of the scene item in degrees around the point of alignment.
pub rotation: f64,
/// Scaling factor of the scene item.
pub scale: Scale,
/// Pixel cropping of the scene item before scaling.
pub crop: Crop,
/// If the scene item is visible.
pub visible: bool,
/// If the scene item is locked in position.
pub locked: bool,
/// Bounding box of the source item.
pub bounds: Bounds,
/// Base width (without scaling) of the source.
pub source_width: u64,
/// Base source (without scaling) of the source.
pub source_height: u64,
/// Scene item width (base source width multiplied by the horizontal scaling factor).
pub width: f64,
/// Scene item height (base source height multiplied by the vertical scaling factor).
pub height: f64,
/// Name of the item's parent (if this item belongs to a group).
pub parent_group_name: Option<String>,
/// List of children (if this item is a group).
#[serde(default)]
pub group_children: Vec<SceneItemTransform>,
}
/// Response value for
/// [`get_scene_item_properties`](crate::client::SceneItems::get_scene_item_properties) as part of
/// [`SceneItemProperties`](crate::responses::SceneItemProperties).
#[derive(Clone, Debug, Deserialize)]
pub struct Position {
/// The x position of the source from the left.
pub x: f64,
/// The y position of the source from the top.
pub y: f64,
/// The point on the source that the item is manipulated from. The sum of 1=Left or 2=Right, and
/// 4=Top or 8=Bottom, or omit to center on that axis.
#[serde(deserialize_with = "crate::de::bitflags_u8")]
pub alignment: Alignment,
}
/// Response value for
/// [`get_scene_item_properties`](crate::client::SceneItems::get_scene_item_properties) as part of
/// [`SceneItemProperties`](crate::responses::SceneItemProperties) and [`SceneItemTransform`].
#[derive(Clone, Debug, Deserialize)]
pub struct Scale {
/// The x-scale factor of the source.
pub x: f64,
/// The y-scale factor of the source.
pub y: f64,
/// The scale filter of the source.
pub filter: ScaleFilter,
}
/// Different scaling filters that can be applied to a scene item as part of [`Scale`].
#[derive(Clone, Copy, Debug, Deserialize)]
pub enum ScaleFilter {
/// Disable any scaling filters.
#[serde(rename = "OBS_SCALE_DISABLE")]
Disable,
/// Nearest neighbor scaling.
#[serde(rename = "OBS_SCALE_POINT")]
Point,
/// Sharpened scaling, 16 samples.
#[serde(rename = "OBS_SCALE_BICUBIC")]
Bicubic,
/// Fast but blurry scaling.
#[serde(rename = "OBS_SCALE_BILINEAR")]
Bilinear,
/// Sharpened scaling, 36 samples.
#[serde(rename = "OBS_SCALE_LANCZOS")]
Lanczos,
/// Weighted sum, 4/6/9 samples.
#[serde(rename = "OBS_SCALE_AREA")]
Area,
}
/// Response value for
/// [`get_scene_item_properties`](crate::client::SceneItems::get_scene_item_properties) as part of
/// [`SceneItemProperties`](crate::responses::SceneItemProperties) and [`SceneItemTransform`].
#[derive(Clone, Debug, Deserialize)]
pub struct Crop {
/// The number of pixels cropped off the top of the source before scaling.
pub top: u32,
/// The number of pixels cropped off the right of the source before scaling.
pub right: u32,
/// The number of pixels cropped off the bottom of the source before scaling.
pub bottom: u32,
/// The number of pixels cropped off the left of the source before scaling.
pub left: u32,
}
/// Response value for
/// [`get_scene_item_properties`](crate::client::SceneItems::get_scene_item_properties) as part of
/// [`SceneItemProperties`](crate::responses::SceneItemProperties) and [`SceneItemTransform`].
#[derive(Clone, Debug, Deserialize)]
pub struct Bounds {
/// Type of bounding box. Can be "OBS_BOUNDS_STRETCH", "OBS_BOUNDS_SCALE_INNER",
/// "OBS_BOUNDS_SCALE_OUTER", "OBS_BOUNDS_SCALE_TO_WIDTH", "OBS_BOUNDS_SCALE_TO_HEIGHT",
/// "OBS_BOUNDS_MAX_ONLY" or "OBS_BOUNDS_NONE".
#[serde(rename = "type")]
pub ty: BoundsType,
/// Alignment of the bounding box.
#[serde(deserialize_with = "crate::de::bitflags_u8")]
pub alignment: Alignment,
/// Width of the bounding box.
pub x: f64,
/// Height of the bounding box.
pub y: f64,
}
/// Monitoring type for audio outputs.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum MonitorType {
/// No monitoring.
None,
/// Only monitor but don't output any sounds.
MonitorOnly,
/// Mintor the audio and output it at the same time.
MonitorAndOutput,
}
/// Text alignment used for GDI+ text properties.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum Align {
/// Align to the left.
Left,
/// Center the text in the middle (horizontally).
Center,
/// Align to the right.
Right,
}
/// Vertical text alignment use for GDI+ text properties.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum Valign {
/// Align to the top.
Top,
/// Center the text in the middle (vertically).
Center,
/// Align to the bottom.
Bottom,
}
/// The type of streaming for service configurations.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum StreamType {
/// Customized RTMP streaming.
RtmpCustom,
/// Common RTMP configuration.
RtmpCommon,
}
bitflags! {
/// Different flags for font display that can be combined together.
pub struct FontFlags: u8 {
/// Make the text appear thicker.
const BOLD = 1;
/// Make the text appear cursive.
const ITALIC = 2;
/// Underline the text with a straight line.
const UNDERLINE = 5;
/// Strikeout the text.
const STRIKEOUT = 8;
}
}
impl TryFrom<u8> for FontFlags {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
Self::from_bits(value).ok_or(Error::UnknownFlags(value))
}
}
impl From<FontFlags> for u8 {
fn from(value: FontFlags) -> Self {
value.bits
}
}
bitflags! {
/// Alignment for different items on the scene that is described in two axis. The default is
/// center for both axis.
///
/// For example, only using `LEFT` would arrange the target to the left horzontally and centered
/// vertically. To align to the top right, the alignments can be combined to `LEFT | TOP`.
/// Combining both values for a single axis is invalid, like `LEFT | RIGHT`.
pub struct Alignment: u8 {
/// Align to the left side.
const LEFT = 1;
/// Align to the right side.
const RIGHT = 2;
/// Align to the top.
const TOP = 4;
/// Align to the bottom.
const BOTTOM = 8;
}
}
impl TryFrom<u8> for Alignment {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
Self::from_bits(value).ok_or(Error::UnknownFlags(value))
}
}
impl From<Alignment> for u8 {
fn from(value: Alignment) -> Self {
value.bits
}
}
/// Different kinds of bounds that can be applied to different items on the scene as part of the
/// [`Bounds`] type.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum BoundsType {
/// Stretch to bounds.
#[serde(rename = "OBS_BOUNDS_STRETCH")]
Stretch,
/// Scale to inner bounds.
#[serde(rename = "OBS_BOUNDS_SCALE_INNER")]
ScaleInner,
/// Scale to outer bounds.
#[serde(rename = "OBS_BOUNDS_SCALE_OUTER")]
ScaleOuter,
/// Scale to width of bounds.
#[serde(rename = "OBS_BOUNDS_SCALE_TO_WIDTH")]
ScaleToWidth,
/// Scale to height of bounds.
#[serde(rename = "OBS_BOUNDS_SCALE_TO_HEIGHT")]
ScaleToHeight,
/// Maximum size only.
#[serde(rename = "OBS_BOUNDS_MAX_ONLY")]
MaxOnly,
/// No bounds.
#[serde(rename = "OBS_BOUNDS_NONE")]
None,
}

@ -1,637 +0,0 @@
//! Custom deserializers that are used in both the [`events`](crate::events) and
//! [`responses`](crate::responses) modules.
use std::{
convert::TryFrom,
fmt::{self, Display},
marker::PhantomData,
};
use chrono::Duration;
use serde::de::{self, Deserializer, Visitor};
#[derive(Debug, thiserror::Error)]
enum Error {
#[error("hours missing")]
HoursMissing,
#[error("minutes missing")]
MinutesMissing,
#[error("seconds missing")]
SecondsMissing,
#[error("milliseconds missing")]
MillisecondsMissing,
#[error("invalid integer")]
InvalidInteger(#[from] std::num::ParseIntError),
#[error("value {1} is too large for an i64: {0}")]
ValueTooLargeI64(#[source] std::num::TryFromIntError, u64),
#[error("value doesn't fit into an u8 integer: {0}")]
ValueDoesntFitU8(#[source] std::num::TryFromIntError),
#[error("conversion from u8 failed: {0}")]
ConversionFailed(String),
}
pub fn duration_opt<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_option(DurationOptVisitor)
}
struct DurationOptVisitor;
impl<'de> Visitor<'de> for DurationOptVisitor {
type Value = Option<Duration>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("an optional duration formatted as 'HH:MM:SS.mmm'")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
let duration = || -> Result<Duration, Error> {
let mut hms = v.splitn(3, ':');
let hours = hms.next().ok_or(Error::HoursMissing)?.parse()?;
let minutes = hms.next().ok_or(Error::MinutesMissing)?.parse()?;
let seconds = hms.next().ok_or(Error::SecondsMissing)?;
let mut sm = seconds.splitn(2, '.');
let seconds = sm.next().ok_or(Error::SecondsMissing)?.parse()?;
let millis = sm.next().ok_or(Error::MillisecondsMissing)?.parse()?;
Ok(Duration::hours(hours)
+ Duration::minutes(minutes)
+ Duration::seconds(seconds)
+ Duration::milliseconds(millis))
};
duration().map(Some).map_err(de::Error::custom)
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(None)
}
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_str(Self)
}
}
pub fn duration_millis_opt<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_option(DurationMillisOptVisitor)
}
struct DurationMillisOptVisitor;
impl<'de> Visitor<'de> for DurationMillisOptVisitor {
type Value = Option<Duration>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a duration in milliseconds where -1 means a fixed duration")
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(if v < 0 {
None
} else {
Some(Duration::milliseconds(v))
})
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
match i64::try_from(v) {
Ok(value) => self.visit_i64(value),
Err(e) => Err(de::Error::custom(Error::ValueTooLargeI64(e, v))),
}
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(None)
}
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_i64(Self)
}
}
pub fn duration_millis<'de, D>(deserializer: D) -> Result<Duration, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_i64(DurationMillisVisitor)
}
struct DurationMillisVisitor;
impl<'de> Visitor<'de> for DurationMillisVisitor {
type Value = Duration;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a duration in milliseconds")
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Duration::milliseconds(v))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
match i64::try_from(v) {
Ok(value) => self.visit_i64(value),
Err(e) => Err(de::Error::custom(Error::ValueTooLargeI64(e, v))),
}
}
}
pub fn duration_nanos<'de, D>(deserializer: D) -> Result<Duration, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_i64(DurationNanosVisitor)
}
struct DurationNanosVisitor;
impl<'de> Visitor<'de> for DurationNanosVisitor {
type Value = Duration;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a duration in nanoseconds")
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Duration::nanoseconds(v))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
match i64::try_from(v) {
Ok(value) => self.visit_i64(value),
Err(e) => Err(de::Error::custom(Error::ValueTooLargeI64(e, v))),
}
}
}
pub fn bitflags_u8<'de, D, T, TE>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: TryFrom<u8, Error = TE>,
TE: Display,
{
deserializer.deserialize_u8(BitflagsU8Visitor { flags: PhantomData })
}
struct BitflagsU8Visitor<T, TE> {
flags: PhantomData<(T, TE)>,
}
impl<'de, T, TE> Visitor<'de> for BitflagsU8Visitor<T, TE>
where
T: TryFrom<u8, Error = TE>,
TE: Display,
{
type Value = T;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("bitflags encoded as u8 integer")
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: de::Error,
{
u8::try_from(v)
.map_err(|e| de::Error::custom(Error::ValueDoesntFitU8(e)))
.and_then(|v| self.visit_u8(v))
}
fn visit_u8<E>(self, v: u8) -> Result<Self::Value, E>
where
E: de::Error,
{
T::try_from(v).map_err(|e| de::Error::custom(Error::ConversionFailed(e.to_string())))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
u8::try_from(v)
.map_err(|e| de::Error::custom(Error::ValueDoesntFitU8(e)))
.and_then(|v| self.visit_u8(v))
}
}
#[cfg(test)]
mod tests {
use anyhow::Context;
use bitflags::bitflags;
use serde::Deserialize;
use serde_test::{assert_de_tokens, assert_de_tokens_error, Token};
use super::*;
#[test]
fn deser_duration_opt() {
#[derive(Debug, PartialEq, Eq, Deserialize)]
struct SimpleDuration {
#[serde(deserialize_with = "duration_opt")]
value: Option<Duration>,
}
assert_de_tokens(
&SimpleDuration {
value: Some(
Duration::hours(2)
+ Duration::minutes(15)
+ Duration::seconds(4)
+ Duration::milliseconds(310),
),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::Str("02:15:04.310"),
Token::StructEnd,
],
);
assert_de_tokens(
&SimpleDuration { value: None },
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::None,
Token::StructEnd,
],
);
assert_de_tokens(
&SimpleDuration {
value: Some(
Duration::hours(2)
+ Duration::minutes(15)
+ Duration::seconds(4)
+ Duration::milliseconds(310),
),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::Some,
Token::Str("02:15:04.310"),
Token::StructEnd,
],
);
}
#[test]
fn deser_duration_millis_opt() {
#[derive(Debug, PartialEq, Eq, Deserialize)]
struct SimpleDuration {
#[serde(deserialize_with = "duration_millis_opt")]
value: Option<Duration>,
}
assert_de_tokens(
&SimpleDuration {
value: Some(Duration::milliseconds(150)),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::I64(150),
Token::StructEnd,
],
);
assert_de_tokens(
&SimpleDuration { value: None },
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::I64(-1),
Token::StructEnd,
],
);
assert_de_tokens(
&SimpleDuration {
value: Some(Duration::milliseconds(150)),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::U64(150),
Token::StructEnd,
],
);
assert_de_tokens_error::<SimpleDuration>(
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::U64(u64::MAX),
Token::StructEnd,
],
"value 18446744073709551615 is too large for an i64: \
out of range integral type conversion attempted",
);
assert_de_tokens(
&SimpleDuration { value: None },
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::None,
Token::StructEnd,
],
);
assert_de_tokens(
&SimpleDuration {
value: Some(Duration::milliseconds(150)),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::Some,
Token::I64(150),
Token::StructEnd,
],
);
}
#[test]
fn deser_duration_millis() {
#[derive(Debug, PartialEq, Eq, Deserialize)]
struct SimpleDuration {
#[serde(deserialize_with = "duration_millis")]
value: Duration,
}
assert_de_tokens(
&SimpleDuration {
value: Duration::milliseconds(150),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::I64(150),
Token::StructEnd,
],
);
assert_de_tokens(
&SimpleDuration {
value: Duration::milliseconds(150),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::U64(150),
Token::StructEnd,
],
);
assert_de_tokens_error::<SimpleDuration>(
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::U64(u64::MAX),
Token::StructEnd,
],
"value 18446744073709551615 is too large for an i64: \
out of range integral type conversion attempted",
);
}
#[test]
fn deser_duration_nanos() {
#[derive(Debug, PartialEq, Eq, Deserialize)]
struct SimpleDuration {
#[serde(deserialize_with = "duration_nanos")]
value: Duration,
}
assert_de_tokens(
&SimpleDuration {
value: Duration::nanoseconds(150),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::I64(150),
Token::StructEnd,
],
);
assert_de_tokens(
&SimpleDuration {
value: Duration::nanoseconds(150),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::U64(150),
Token::StructEnd,
],
);
assert_de_tokens_error::<SimpleDuration>(
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::U64(u64::MAX),
Token::StructEnd,
],
"value 18446744073709551615 is too large for an i64: \
out of range integral type conversion attempted",
);
}
#[test]
fn deser_bitflags_u8() {
bitflags! {
struct Flags: u8 {
const ONE = 1;
const TWO = 2;
}
}
impl TryFrom<u8> for Flags {
type Error = anyhow::Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
Self::from_bits(value).context("unknown flags found")
}
}
#[derive(Debug, PartialEq, Eq, Deserialize)]
struct SimpleFlags {
#[serde(deserialize_with = "bitflags_u8")]
value: Flags,
}
assert_de_tokens(
&SimpleFlags {
value: Flags::ONE | Flags::TWO,
},
&[
Token::Struct {
name: "SimpleFlags",
len: 1,
},
Token::Str("value"),
Token::I64(3),
Token::StructEnd,
],
);
assert_de_tokens_error::<SimpleFlags>(
&[
Token::Struct {
name: "SimpleFlags",
len: 1,
},
Token::Str("value"),
Token::I64(i64::MAX),
Token::StructEnd,
],
"value doesn't fit into an u8 integer: out of range integral type conversion attempted",
);
assert_de_tokens(
&SimpleFlags {
value: Flags::ONE | Flags::TWO,
},
&[
Token::Struct {
name: "SimpleFlags",
len: 1,
},
Token::Str("value"),
Token::U8(3),
Token::StructEnd,
],
);
assert_de_tokens_error::<SimpleFlags>(
&[
Token::Struct {
name: "SimpleFlags",
len: 1,
},
Token::Str("value"),
Token::U8(100),
Token::StructEnd,
],
"conversion from u8 failed: unknown flags found",
);
assert_de_tokens(
&SimpleFlags {
value: Flags::ONE | Flags::TWO,
},
&[
Token::Struct {
name: "SimpleFlags",
len: 1,
},
Token::Str("value"),
Token::U64(3),
Token::StructEnd,
],
);
assert_de_tokens_error::<SimpleFlags>(
&[
Token::Struct {
name: "SimpleFlags",
len: 1,
},
Token::Str("value"),
Token::U64(u64::MAX),
Token::StructEnd,
],
"value doesn't fit into an u8 integer: out of range integral type conversion attempted",
);
}
}

@ -1,589 +1,223 @@
//! All events that can be received from the API.
use chrono::Duration;
use serde::Deserialize;
use crate::common::{SceneItem, SceneItemTransform};
use std::{collections::BTreeMap, path::PathBuf};
/// Events are sent when a recognized action occurs within OBS.
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Event {
#[serde(default, deserialize_with = "crate::de::duration_opt")]
/// Time elapsed between now and stream start (only present if OBS Studio is streaming).
pub stream_timecode: Option<Duration>,
/// Time elapsed between now and recording start (only present if OBS Studio is recording).
#[serde(default, deserialize_with = "crate::de::duration_opt")]
pub rec_timecode: Option<Duration>,
/// The type of event.
#[serde(flatten)]
pub ty: EventType,
}
use serde::Deserialize;
/// All possible event types that can occur while the user interacts with OBS.
#[derive(Clone, Debug, Deserialize)]
#[serde(tag = "update-type")]
pub enum EventType {
#[serde(tag = "eventType", content = "eventData")]
pub enum Event {
// --------------------------------
// Scenes
// Config
// --------------------------------
/// Indicates a scene change.
#[serde(rename_all = "kebab-case")]
SwitchScenes {
/// The new scene.
scene_name: String,
/// List of scene items in the new scene.
sources: Vec<SceneItem>,
},
/// The scene list has been modified. Scenes have been added, removed, or renamed.
///
/// Note: This event is not fired when the scenes are reordered.
ScenesChanged {
/// Scenes list.
scenes: Vec<Scene>,
},
/// Triggered when switching to another scene collection or when renaming the current scene
/// collection.
#[serde(rename_all = "camelCase")]
SceneCollectionChanged {
/// Name of the new current scene collection.
scene_collection: String,
CurrentSceneCollectionChanged {
scene_collection_name: String,
},
/// Triggered when a scene collection is created, added, renamed, or removed.
#[serde(rename_all = "camelCase")]
SceneCollectionListChanged {
/// Scene collections list.
scene_collections: Vec<SceneCollection>,
scene_collections: Vec<String>,
},
// --------------------------------
// Transitions
// --------------------------------
/// The active transition has been changed.
#[serde(rename_all = "kebab-case")]
SwitchTransition {
/// The name of the new active transition.
transition_name: String,
#[serde(rename_all = "camelCase")]
CurrentProfileChanged {
profile_name: String,
},
/// The list of available transitions has been modified. Transitions have been added, removed,
/// or renamed.
TransitionListChanged {
/// Transitions list.
transitions: Vec<Transition>,
},
/// The active transition duration has been changed.
#[serde(rename_all = "kebab-case")]
TransitionDurationChanged {
/// New transition duration.
#[serde(deserialize_with = "crate::de::duration_millis")]
new_duration: Duration,
},
/// A transition (other than "cut") has begun.
#[serde(rename_all = "kebab-case")]
TransitionBegin {
/// Transition name.
name: String,
/// Transition type.
#[serde(rename = "type")]
ty: String,
/// Transition duration (in milliseconds). Will be -1 for any transition with a fixed
/// duration, such as a Stinger, due to limitations of the OBS API.
#[serde(deserialize_with = "crate::de::duration_millis_opt")]
duration: Option<Duration>,
/// Source scene of the transition.
from_scene: Option<String>,
/// Destination scene of the transition.
to_scene: String,
},
/// A transition (other than "cut") has ended.
///
/// Note: The `from-scene` field is not available in TransitionEnd.
#[serde(rename_all = "kebab-case")]
TransitionEnd {
/// Transition name.
name: String,
/// Transition type.
#[serde(rename = "type")]
ty: String,
/// Transition duration (in milliseconds).
#[serde(deserialize_with = "crate::de::duration_millis")]
duration: Duration,
/// Destination scene of the transition.
to_scene: String,
},
/// A stinger transition has finished playing its video.
#[serde(rename_all = "kebab-case")]
TransitionVideoEnd {
/// Transition name.
name: String,
/// Transition type.
#[serde(rename = "type")]
ty: String,
/// Transition duration (in milliseconds).
#[serde(deserialize_with = "crate::de::duration_millis")]
duration: Duration,
/// Source scene of the transition.
from_scene: Option<String>,
/// Destination scene of the transition.
to_scene: String,
#[serde(rename_all = "camelCase")]
ProfileListChanged {
profiles: Vec<String>,
},
// --------------------------------
// Profiles
// Filters
// --------------------------------
/// Triggered when switching to another profile or when renaming the current profile.
ProfileChanged {
/// Name of the new current profile.
profile: String,
},
/// Triggered when a profile is created, added, renamed, or removed.
ProfileListChanged {
/// Profiles list.
profiles: Vec<Profile>,
},
// --------------------------------
// Streaming
// General
// --------------------------------
/// A request to start streaming has been issued.
#[serde(rename_all = "kebab-case")]
StreamStarting {
/// Always false (retrocompatibility).
#[serde(default)]
preview_only: bool,
},
/// Streaming started successfully.
StreamStarted,
/// A request to stop streaming has been issued.
#[serde(rename_all = "kebab-case")]
StreamStopping {
/// Always false (retrocompatibility).
#[serde(default)]
preview_only: bool,
},
/// Streaming stopped successfully.
StreamStopped,
/// Emitted every 2 seconds when stream is active.
#[serde(rename_all = "kebab-case")]
StreamStatus {
/// Current streaming state.
streaming: bool,
/// Current recording state.
recording: bool,
/// Replay Buffer status.
replay_buffer_active: bool,
/// Amount of data per second (in bytes) transmitted by the stream encoder.
bytes_per_sec: u64,
/// Amount of data per second (in kilobits) transmitted by the stream encoder.
kbits_per_sec: u64,
/// Percentage of dropped frames.
strain: f64,
/// Total time (in seconds) since the stream started.
total_stream_time: u64,
/// Total number of frames transmitted since the stream started.
num_total_frames: u64,
/// Number of frames dropped by the encoder since the stream started.
num_dropped_frames: u64,
/// Current framerate.
fps: f64,
/// Number of frames rendered.
render_total_frames: u64,
/// Number of frames missed due to rendering lag.
render_missed_frames: u64,
/// Number of frames outputted.
output_total_frames: u64,
/// Number of frames skipped due to encoding lag.
output_skipped_frames: u64,
/// Average frame time (in milliseconds).
average_frame_time: f64,
/// Current CPU usage (percentage).
cpu_usage: f64,
/// Current RAM usage (in megabytes).
memory_usage: f64,
/// Free recording disk space (in megabytes).
free_disk_space: f64,
/// Always false (retrocompatibility).
#[serde(default)]
preview_only: bool,
ExitStarted,
#[serde(rename_all = "camelCase")]
StudioModeStateChanged {
studio_mode_enabled: bool,
},
// --------------------------------
// Recording
// Inputs
// --------------------------------
/// A request to start recording has been issued.
///
/// Note: `recordingFilename` is not provided in this event because this information is not
/// available at the time this event is emitted.
RecordingStarting,
/// Recording started successfully.
#[serde(rename_all = "camelCase")]
RecordingStarted {
/// Absolute path to the file of the current recording.
recording_filename: String,
InputCreated {
input_name: String,
input_kind: String,
unversioned_input_kind: String,
input_settings: serde_json::Value,
default_input_settings: serde_json::Value,
},
/// A request to stop recording has been issued.
#[serde(rename_all = "camelCase")]
RecordingStopping {
/// Absolute path to the file of the current recording.
recording_filename: String,
InputRemoved {
input_name: String,
},
/// Recording stopped successfully.
#[serde(rename_all = "camelCase")]
RecordingStopped {
/// Absolute path to the file of the current recording.
recording_filename: String,
InputNameChanged {
old_input_name: String,
input_name: String,
},
/// Current recording paused.
RecordingPaused,
/// Current recording resumed.
RecordingResumed,
// --------------------------------
// Virtual Cam
// --------------------------------
/// Virtual cam started successfully.
VirtualCamStarted,
/// Virtual cam stopped successfully.
VirtualCamStopped,
// --------------------------------
// Replay Buffer
// --------------------------------
/// A request to start the replay buffer has been issued.
ReplayStarting,
/// Replay Buffer started successfully.
ReplayStarted,
/// A request to stop the replay buffer has been issued.
ReplayStopping,
/// Replay Buffer stopped successfully.
ReplayStopped,
// --------------------------------
// Other
// --------------------------------
/// OBS is exiting.
Exiting,
// --------------------------------
// General
// --------------------------------
/// A custom broadcast message, sent by the server, requested by one of the websocket clients.
BroadcastCustomMessage {
/// Identifier provided by the sender.
realm: String,
/// User-defined data.
data: serde_json::Map<String, serde_json::Value>,
#[serde(rename_all = "camelCase")]
InputActiveStateChanged {
input_name: String,
video_active: bool,
},
#[serde(rename_all = "camelCase")]
InputShowStateChanged {
input_name: String,
video_showing: bool,
},
#[serde(rename_all = "camelCase")]
InputMuteStateChanged {
input_name: String,
input_muted: bool,
},
#[serde(rename_all = "camelCase")]
InputVolumeChanged {
input_name: String,
input_volume_mul: f64,
input_volume_db: f64,
},
#[serde(rename_all = "camelCase")]
InputAudioSyncOffsetChanged {
input_name: String,
input_audio_sync_offset: i64,
},
#[serde(rename_all = "camelCase")]
InputAudioTracksChanged {
input_name: String,
input_audio_tracks: BTreeMap<String, bool>,
},
// --------------------------------
// Sources
// Media Inputs
// --------------------------------
/// A source has been created. A source can be an input, a scene or a transition.
#[serde(rename_all = "camelCase")]
SourceCreated {
/// Source name.
source_name: String,
/// Source type. Can be "input", "scene", "transition" or "filter".
source_type: SourceType,
/// Source kind.
source_kind: String,
/// Source settings.
source_settings: serde_json::Value,
},
/// A source has been destroyed/removed. A source can be an input, a scene or a transition.
#[serde(rename_all = "camelCase")]
SourceDestroyed {
/// Source name.
source_name: String,
/// Source type. Can be "input", "scene", "transition" or "filter".
source_type: SourceType,
/// Source kind.
source_kind: String,
},
/// The volume of a source has changed.
#[serde(rename_all = "camelCase")]
SourceVolumeChanged {
/// Source name.
source_name: String,
/// Source volume.
volume: f32,
/// Source volume in Decibel
volume_db: f32,
},
/// A source has been muted or unmuted.
#[serde(rename_all = "camelCase")]
SourceMuteStateChanged {
/// Source name.
source_name: String,
/// Mute status of the source.
muted: bool,
},
/// A source has removed audio.
#[serde(rename_all = "camelCase")]
SourceAudioDeactivated {
/// Source name.
source_name: String,
},
/// A source has added audio.
#[serde(rename_all = "camelCase")]
SourceAudioActivated {
/// Source name.
source_name: String,
},
/// The audio sync offset of a source has changed.
#[serde(rename_all = "camelCase")]
SourceAudioSyncOffsetChanged {
/// Source name.
source_name: String,
/// Audio sync offset of the source (in nanoseconds).
#[serde(deserialize_with = "crate::de::duration_nanos")]
sync_offset: Duration,
},
/// Audio mixer routing changed on a source.
#[serde(rename_all = "camelCase")]
SourceAudioMixersChanged {
/// Source name.
source_name: String,
/// Routing status of the source for each audio mixer (array of 6 values).
mixers: [AudioMixer; 6],
/// Raw mixer flags (little-endian, one bit per mixer) as an hexadecimal value.
hex_mixers_value: String,
},
/// A source has been renamed.
#[serde(rename_all = "camelCase")]
SourceRenamed {
/// Previous source name.
previous_name: String,
/// New source name.
new_name: String,
/// Type of source (input, scene, filter, transition).
source_type: SourceType,
},
/// A filter was added to a source.
#[serde(rename_all = "camelCase")]
SourceFilterAdded {
/// Source name.
source_name: String,
/// Filter name.
filter_name: String,
/// Filter type.
filter_type: String,
/// Filter settings.
filter_settings: serde_json::Value,
},
/// A filter was removed from a source.
#[serde(rename_all = "camelCase")]
SourceFilterRemoved {
/// Source name.
source_name: String,
/// Filter name.
filter_name: String,
/// Filter type.
filter_type: String,
},
/// The visibility/enabled state of a filter changed.
#[serde(rename_all = "camelCase")]
SourceFilterVisibilityChanged {
/// Source name.
source_name: String,
/// Filter name.
filter_name: String,
/// New filter state.
filter_enabled: bool,
},
/// Filters in a source have been reordered.
#[serde(rename_all = "camelCase")]
SourceFiltersReordered {
/// Source name.
source_name: String,
/// Ordered Filters list.
filters: Vec<SourceFilter>,
#[serde(rename_all = "camelCase")]
MediaInputPlaybackStarted {
input_name: String,
},
#[serde(rename_all = "camelCase")]
MediaInputPlaybackEnded {
input_name: String,
},
#[serde(rename_all = "camelCase")]
MediaInputActionTriggered {
input_name: String,
media_action: MediaAction,
},
// --------------------------------
// Media
// Outputs
// --------------------------------
/// Media is playing.
///
/// Note: This event is only emitted when something actively controls the media/VLC source. In
/// other words, the source will never emit this on its own naturally.
#[serde(rename_all = "camelCase")]
MediaPlaying {
/// Source name.
source_name: String,
/// The ID type of the source (Eg. `vlc_source` or `ffmpeg_source`).
source_kind: String,
},
/// Media playback paused.
///
/// Note: This event is only emitted when something actively controls the media/VLC source. In
/// other words, the source will never emit this on its own naturally.
#[serde(rename_all = "camelCase")]
MediaPaused {
/// Source name.
source_name: String,
/// The ID type of the source (Eg. `vlc_source` or `ffmpeg_source`).
source_kind: String,
},
/// Media playback restarted.
///
/// Note: This event is only emitted when something actively controls the media/VLC source. In
/// other words, the source will never emit this on its own naturally.
#[serde(rename_all = "camelCase")]
MediaRestarted {
/// Source name.
source_name: String,
/// The ID type of the source (Eg. `vlc_source` or `ffmpeg_source`).
source_kind: String,
},
/// Media playback stopped.
///
/// Note: This event is only emitted when something actively controls the media/VLC source. In
/// other words, the source will never emit this on its own naturally.
#[serde(rename_all = "camelCase")]
MediaStopped {
/// Source name.
source_name: String,
/// The ID type of the source (Eg. `vlc_source` or `ffmpeg_source`).
source_kind: String,
},
/// Next media started.
///
/// Note: This event is only emitted when something actively controls the media/VLC source. In
/// other words, the source will never emit this on its own naturally.
#[serde(rename_all = "camelCase")]
MediaNext {
/// Source name.
source_name: String,
/// The ID type of the source (Eg. `vlc_source` or `ffmpeg_source`).
source_kind: String,
},
/// Previous media started.
///
/// Note: This event is only emitted when something actively controls the media/VLC source. In
/// other words, the source will never emit this on its own naturally.
#[serde(rename_all = "camelCase")]
MediaPrevious {
/// Source name.
source_name: String,
/// The ID type of the source (Eg. `vlc_source` or `ffmpeg_source`).
source_kind: String,
},
/// Media playback started.
///
/// Note: This event is only emitted when something actively controls the media/VLC source. In
/// other words, the source will never emit this on its own naturally.
#[serde(rename_all = "camelCase")]
MediaStarted {
/// Source name.
source_name: String,
/// The ID type of the source (Eg. `vlc_source` or `ffmpeg_source`).
source_kind: String,
},
/// Media playback ended.
///
/// Note: This event is only emitted when something actively controls the media/VLC source. In
/// other words, the source will never emit this on its own naturally.
#[serde(rename_all = "camelCase")]
MediaEnded {
/// Source name.
source_name: String,
/// The ID type of the source (Eg. `vlc_source` or `ffmpeg_source`).
source_kind: String,
#[serde(rename_all = "camelCase")]
StreamStateChanged {
output_active: bool,
output_state: OutputState,
},
#[serde(rename_all = "camelCase")]
RecordStateChanged {
output_active: bool,
output_state: OutputState,
},
#[serde(rename_all = "camelCase")]
ReplayBufferStateChanged {
output_active: bool,
output_state: OutputState,
},
#[serde(rename_all = "camelCase")]
VirtualcamStateChanged {
output_active: bool,
output_state: OutputState,
},
#[serde(rename_all = "camelCase")]
ReplayBufferSaved {
saved_replay_path: PathBuf,
},
// --------------------------------
// Scene Items
// --------------------------------
/// Scene items within a scene have been reordered.
#[serde(rename_all = "kebab-case")]
SourceOrderChanged {
/// Name of the scene where items have been reordered.
#[serde(rename_all = "camelCase")]
SceneItemCreated {
scene_name: String,
input_name: String,
scene_item_id: u64,
scene_item_index: u32,
},
#[serde(rename_all = "camelCase")]
SceneItemRemoved {
scene_name: String,
/// Ordered list of scene items.
scene_items: Vec<SourceOrderSceneItem>,
input_name: String,
scene_item_id: u64,
scene_item_index: u32,
},
/// A scene item has been added to a scene.
#[serde(rename_all = "kebab-case")]
SceneItemAdded {
/// Name of the scene.
#[serde(rename_all = "camelCase")]
SceneItemReindexed {
scene_name: String,
/// Name of the item added to the scene.
item_name: String,
/// Scene item ID.
item_id: i64,
scene_items: Vec<BasicSceneItem>,
},
/// A scene item has been removed from a scene.
#[serde(rename_all = "kebab-case")]
SceneItemRemoved {
/// Name of the scene.
#[serde(rename_all = "camelCase")]
SceneItemEnableStateChanged {
scene_name: String,
/// Name of the item removed from the scene.
item_name: String,
/// Scene item ID.
item_id: i64,
},
/// A scene item's visibility has been toggled.
#[serde(rename_all = "kebab-case")]
SceneItemVisibilityChanged {
/// Name of the scene.
scene_item_id: u64,
scene_item_enabled: bool,
},
#[serde(rename_all = "camelCase")]
SceneItemLockStateChanged {
scene_name: String,
/// Name of the item in the scene.
item_name: String,
/// Scene item ID.
item_id: i64,
/// New visibility state of the item.
item_visible: bool,
},
/// A scene item's locked status has been toggled.
#[serde(rename_all = "kebab-case")]
SceneItemLockChanged {
/// Name of the scene.
scene_item_id: u64,
scene_item_locked: bool,
},
SceneItemTransformChanged,
// --------------------------------
// Scenes
// --------------------------------
#[serde(rename_all = "camelCase")]
SceneCreated {
scene_name: String,
/// Name of the item in the scene.
item_name: String,
/// Scene item ID.
item_id: i64,
/// New locked state of the item.
item_locked: bool,
},
/// A scene item's transform has been changed.
#[serde(rename_all = "kebab-case")]
SceneItemTransformChanged {
/// Name of the scene.
is_group: bool,
},
#[serde(rename_all = "camelCase")]
SceneRemoved {
scene_name: String,
/// Name of the item in the scene.
item_name: String,
/// Scene item ID.
item_id: i64,
/// Scene item transform properties.
transform: SceneItemTransform,
},
/// A scene item is selected.
#[serde(rename_all = "kebab-case")]
SceneItemSelected {
/// Name of the scene.
is_group: bool,
},
#[serde(rename_all = "camelCase")]
SceneNameChanged {
old_scene_name: String,
scene_name: String,
/// Name of the item in the scene.
item_name: String,
/// ID of the item in the scene.
item_id: i64,
},
/// A scene item is deselected.
#[serde(rename_all = "kebab-case")]
SceneItemDeselected {
/// Name of the scene.
},
#[serde(rename_all = "camelCase")]
CurrentSceneChanged {
scene_name: String,
},
#[serde(rename_all = "camelCase")]
CurrentPreviewSceneChanged {
scene_name: String,
/// Name of the item in the scene.
item_name: String,
/// ID of the item in the scene.
item_id: i64,
},
#[serde(rename_all = "camelCase")]
SceneListChanged {
scenes: Vec<Scene>,
},
// --------------------------------
// Studio Mode
// Transitions
// --------------------------------
/// The selected preview scene has changed (only available in Studio Mode).
#[serde(rename_all = "kebab-case")]
PreviewSceneChanged {
/// Name of the scene being previewed.
scene_name: String,
/// List of sources composing the scene.
sources: Vec<SceneItem>,
#[serde(rename_all = "camelCase")]
TransitionCreated {
transition_name: String,
transition_kind: String,
transition_fixed: bool,
},
/// Studio Mode has been enabled or disabled.
#[serde(rename_all = "kebab-case")]
StudioModeSwitched {
/// The new enabled state of Studio Mode.
new_state: bool,
#[serde(rename_all = "camelCase")]
TransitionRemoved {
transition_name: String,
},
#[serde(rename_all = "camelCase")]
TransitionNameChanged {
old_transition_name: String,
transition_name: String,
},
// --------------------------------
// Custom
// --------------------------------
/// WebSocket server is stopping.
ServerStopping,
/// WebSocket server has stopped.
@ -593,78 +227,53 @@ pub enum EventType {
Unknown,
}
/// Part of [`EventType::ScenesChanged`].
#[derive(Clone, Debug, Deserialize)]
pub struct Scene {
/// Name of the currently active scene.
pub name: String,
/// Ordered list of the current scene's source items.
pub sources: Vec<SceneItem>,
}
/// Part of [`EventType::SceneCollectionListChanged`].
#[derive(Clone, Debug, Deserialize)]
pub struct SceneCollection {
/// Scene collection name.
pub name: String,
}
/// Part of [`EventType::TransitionListChanged`].
#[derive(Clone, Debug, Deserialize)]
pub struct Transition {
/// Transition name.
pub name: String,
}
/// Part of [`EventType::ProfileListChanged`].
#[derive(Clone, Debug, Deserialize)]
pub struct Profile {
/// Profile name.
pub name: String,
#[derive(Clone, Copy, Debug, Deserialize)]
pub enum MediaAction {
#[serde(rename = "OBS_WEBSOCKET_MEDIA_INPUT_ACTION_PAUSE")]
Pause,
#[serde(rename = "OBS_WEBSOCKET_MEDIA_INPUT_ACTION_PLAY")]
Play,
#[serde(rename = "OBS_WEBSOCKET_MEDIA_INPUT_ACTION_RESTART")]
Restart,
#[serde(rename = "OBS_WEBSOCKET_MEDIA_INPUT_ACTION_STOP")]
Stop,
#[serde(rename = "OBS_WEBSOCKET_MEDIA_INPUT_ACTION_NEXT")]
Next,
#[serde(rename = "OBS_WEBSOCKET_MEDIA_INPUT_ACTION_PREVIOUS")]
Previous,
#[serde(other)]
Unknown,
}
/// Part of [`EventType::SourceAudioMixersChanged`].
#[derive(Clone, Debug, Deserialize)]
pub struct AudioMixer {
/// Mixer number.
pub id: i64,
/// Routing status.
pub enabled: bool,
#[derive(Clone, Copy, Debug, Deserialize)]
pub enum OutputState {
#[serde(rename = "OBS_WEBSOCKET_OUTPUT_STARTING")]
Starting,
#[serde(rename = "OBS_WEBSOCKET_OUTPUT_STARTED")]
Started,
#[serde(rename = "OBS_WEBSOCKET_OUTPUT_STOPPING")]
Stopping,
#[serde(rename = "OBS_WEBSOCKET_OUTPUT_STOPPED")]
Stopped,
#[serde(rename = "OBS_WEBSOCKET_OUTPUT_PAUSED")]
Paused,
#[serde(rename = "OBS_WEBSOCKET_OUTPUT_RESUMED")]
Resumed,
#[serde(other)]
Unknown,
}
/// Part of [`EventType::SourceFiltersReordered`].
#[derive(Clone, Debug, Deserialize)]
pub struct SourceFilter {
/// Filter name.
pub name: String,
/// Filter type.
#[serde(rename = "type")]
pub ty: String,
/// Filter visibility status.
pub enabled: bool,
#[serde(rename_all = "camelCase")]
pub struct BasicSceneItem {
scene_item_id: u64,
scene_item_index: u32,
}
/// Part of [`EventType::SourceOrderChanged`].
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct SourceOrderSceneItem {
/// Item source name.
pub source_name: String,
/// Scene item unique ID.
pub item_id: i64,
}
/// Part of [`EventType::SourceCreated`], [`EventType::SourceDestroyed`] and
/// [`EventType::SourceRenamed`].
#[derive(Clone, Copy, Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum SourceType {
/// An input source.
Input,
/// A scene.
Scene,
/// Transition between scenes.
Transition,
/// Filter for scene items.
Filter,
#[serde(rename_all = "camelCase")]
pub struct Scene {
scene_name: String,
scene_index: u32,
is_group: bool,
}

@ -13,15 +13,12 @@
//! #[tokio::main]
//! async fn main() -> Result<()> {
//! /// Connect to the OBS instance through obs-websocket.
//! let client = Client::connect("localhost", 4444).await?;
//! let client = Client::connect("localhost", 4444, Some("password")).await?;
//!
//! /// Get and print out version information of OBS and obs-websocket.
//! let version = client.general().get_version().await?;
//! println!("{:#?}", version);
//!
//! /// Optionally log-in (if enabled in obs-websocket) to allow other APIs and receive events.
//! client.login(Some("password")).await?;
//!
//! /// Get a list of available scenes and print them out.
//! let scene_list = client.scenes().get_scene_list().await?;
//! println!("{:#?}", scene_list);
@ -31,29 +28,31 @@
//! ```
#![warn(missing_docs, rust_2018_idioms, clippy::all)]
#![allow(dead_code, missing_docs)] // FIXME: TEMPORARY! Only during v5 development
use responses::StatusCode;
pub use semver::{Comparator, Version};
pub use self::client::Client;
pub mod client;
pub mod common;
#[cfg(feature = "events")]
pub mod events;
pub mod requests;
pub mod responses;
mod de;
/// Result type used throughout the crate that uses [`Error`] as default error.
pub type Result<T, E = Error> = std::result::Result<T, E>;
/// Errors that can occur while using this crate.
#[derive(Debug, thiserror::Error)]
pub enum Error {
/// An error occured while trying to connect to the websocket.
/// An error occurred while trying to connect to the websocket.
#[error("failed to connect to the obs-websocket plugin")]
Connect(#[source] tokio_tungstenite::tungstenite::Error),
/// The initial handshake with `obs-websocket` didn't succeed.
#[error("failed to execute the handshake with obs-websocket")]
Handshake(#[from] crate::client::HandshakeError),
/// Failed to serialize the message to be send to the websocket.
#[error("failed to serialize message")]
SerializeMessage(#[source] serde_json::Error),
@ -70,8 +69,11 @@ pub enum Error {
#[error("failed to serialize custom data")]
SerializeCustomData(#[source] serde_json::Error),
/// An error returned from the obs-websocket API.
#[error("API error: {0}")]
Api(String),
#[error("API error: {code:?}")]
Api {
code: StatusCode,
message: Option<String>,
},
/// The obs-websocket API requires authentication but no password was given.
#[error("authentication required but no password provided")]
NoPassword,
@ -89,4 +91,7 @@ pub enum Error {
/// The obs-websocket plugin version doesn't match the required version for this crate.
#[error("obs-websocket version {0} doesn't match required {1}")]
ObsWebsocketVersion(Version, Comparator),
/// The obs-websocket plugin negotiated a different RPC version than requested.
#[error("RPC version {requested} request but server negotiated version {negotiated}")]
RpcVersion { requested: u32, negotiated: u32 },
}

File diff suppressed because it is too large Load Diff

@ -1,261 +0,0 @@
use chrono::Duration;
use rgb::RGBA8;
use serde::ser::{self, Serializer};
#[derive(Debug, thiserror::Error)]
enum Error {
#[error("duration of {} days is too big to be serialized as nanoseconds", .0.num_days())]
DurationTooBig(Duration),
}
pub fn duration_millis_opt<S>(value: &Option<Duration>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match value {
Some(duration) => serializer.serialize_some(&duration.num_milliseconds()),
None => serializer.serialize_none(),
}
}
pub fn duration_millis<S>(value: &Duration, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_i64(value.num_milliseconds())
}
pub fn duration_nanos<S>(value: &Duration, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match value.num_nanoseconds() {
Some(nanos) => serializer.serialize_i64(nanos),
None => Err(ser::Error::custom(Error::DurationTooBig(*value))),
}
}
pub fn bitflags_u8_opt<S, T>(value: &Option<T>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: Into<u8> + Copy,
{
match value {
Some(flags) => serializer.serialize_some(&(*flags).into()),
None => serializer.serialize_none(),
}
}
pub fn rgba8_inverse_opt<S>(value: &Option<RGBA8>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match value {
Some(rgba) => {
let abgr = (rgba.a as u32) << 24
| (rgba.b as u32) << 16
| (rgba.g as u32) << 8
| (rgba.r as u32);
serializer.serialize_some(&abgr)
}
None => serializer.serialize_none(),
}
}
#[cfg(test)]
mod tests {
use bitflags::bitflags;
use serde::Serialize;
use serde_test::{assert_ser_tokens, assert_ser_tokens_error, Token};
use super::*;
#[test]
fn ser_duration_millis_opt() {
#[derive(Serialize)]
struct SimpleDuration {
#[serde(serialize_with = "duration_millis_opt")]
value: Option<Duration>,
}
assert_ser_tokens(
&SimpleDuration {
value: Some(Duration::milliseconds(150)),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::Some,
Token::I64(150),
Token::StructEnd,
],
);
assert_ser_tokens(
&SimpleDuration { value: None },
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::None,
Token::StructEnd,
],
);
}
#[test]
fn ser_duration_millis() {
#[derive(Serialize)]
struct SimpleDuration {
#[serde(serialize_with = "duration_millis")]
value: Duration,
}
assert_ser_tokens(
&SimpleDuration {
value: Duration::milliseconds(150),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::I64(150),
Token::StructEnd,
],
);
}
#[test]
fn ser_duration_nanos() {
#[derive(Serialize)]
struct SimpleDuration {
#[serde(serialize_with = "duration_nanos")]
value: Duration,
}
assert_ser_tokens(
&SimpleDuration {
value: Duration::nanoseconds(150),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::I64(150),
Token::StructEnd,
],
);
assert_ser_tokens_error(
&SimpleDuration {
value: Duration::days(365_000_000),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
],
"duration of 365000000 days is too big to be serialized as nanoseconds",
);
}
#[test]
fn ser_bitflags_u8_opt() {
bitflags! {
struct Flags: u8 {
const ONE = 1;
const TWO = 2;
}
}
impl From<Flags> for u8 {
fn from(value: Flags) -> Self {
value.bits
}
}
#[derive(Serialize)]
struct SimpleFlags {
#[serde(serialize_with = "bitflags_u8_opt")]
value: Option<Flags>,
}
assert_ser_tokens(
&SimpleFlags {
value: Some(Flags::ONE | Flags::TWO),
},
&[
Token::Struct {
name: "SimpleFlags",
len: 1,
},
Token::Str("value"),
Token::Some,
Token::U8(3),
Token::StructEnd,
],
);
assert_ser_tokens(
&SimpleFlags { value: None },
&[
Token::Struct {
name: "SimpleFlags",
len: 1,
},
Token::Str("value"),
Token::None,
Token::StructEnd,
],
);
}
#[test]
fn ser_rgba8_inverse_opt() {
#[derive(Serialize)]
struct SimpleDuration {
#[serde(serialize_with = "rgba8_inverse_opt")]
value: Option<RGBA8>,
}
assert_ser_tokens(
&SimpleDuration {
value: Some(RGBA8::new(1, 2, 3, 4)),
},
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::Some,
Token::U32(0x04030201),
Token::StructEnd,
],
);
assert_ser_tokens(
&SimpleDuration { value: None },
&[
Token::Struct {
name: "SimpleDuration",
len: 1,
},
Token::Str("value"),
Token::None,
Token::StructEnd,
],
);
}
}

@ -1,246 +0,0 @@
use std::{convert::TryFrom, fmt, iter::FromIterator, marker::PhantomData};
use rgb::RGBA8;
use serde::de::{Deserializer, Error, Visitor};
pub fn string_comma_list<'de, D, T>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: FromIterator<String>,
{
deserializer.deserialize_str(StringListVisitor {
sep: ',',
container: PhantomData,
})
}
struct StringListVisitor<T> {
sep: char,
container: PhantomData<T>,
}
impl<'de, T> Visitor<'de> for StringListVisitor<T>
where
T: FromIterator<String>,
{
type Value = T;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
formatter,
"a string containing values separated by '{}'",
self.sep
)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: Error,
{
Ok(v.split(self.sep).map(|s| s.to_owned()).collect())
}
}
pub fn rgba8_inverse_opt<'de, D>(deserializer: D) -> Result<Option<RGBA8>, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_option(Rgba8InverseOptVisitor)
}
struct Rgba8InverseOptVisitor;
impl<'de> Visitor<'de> for Rgba8InverseOptVisitor {
type Value = Option<RGBA8>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a RGBA color value encoded as integer in inverse order (ABGR)")
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: Error,
{
match u32::try_from(v) {
Ok(v) => self.visit_u32(v),
Err(e) => Err(Error::custom(e)),
}
}
fn visit_u32<E>(self, v: u32) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Some(RGBA8::new(
(v & 0xff) as u8,
(v >> 8 & 0xff) as u8,
(v >> 16 & 0xff) as u8,
(v >> 24 & 0xff) as u8,
)))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: Error,
{
match u32::try_from(v) {
Ok(v) => self.visit_u32(v),
Err(e) => Err(Error::custom(e)),
}
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: Error,
{
Ok(None)
}
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_u32(Self)
}
}
#[cfg(test)]
mod tests {
use serde::Deserialize;
use serde_test::{assert_de_tokens, assert_de_tokens_error, Token};
use super::*;
#[test]
fn deser_string_comma_list() {
#[derive(Debug, PartialEq, Eq, Deserialize)]
struct SimpleList {
#[serde(deserialize_with = "string_comma_list")]
value: Vec<String>,
}
assert_de_tokens(
&SimpleList {
value: vec!["a".to_owned(), "b".to_owned(), "c".to_owned()],
},
&[
Token::Struct {
name: "SimpleList",
len: 1,
},
Token::Str("value"),
Token::Str("a,b,c"),
Token::StructEnd,
],
);
}
#[test]
fn deser_rgba8_inverse_opt() {
#[derive(Debug, PartialEq, Eq, Deserialize)]
struct SimpleColor {
#[serde(deserialize_with = "rgba8_inverse_opt")]
value: Option<RGBA8>,
}
assert_de_tokens(
&SimpleColor {
value: Some(RGBA8::new(1, 2, 3, 4)),
},
&[
Token::Struct {
name: "SimpleColor",
len: 1,
},
Token::Str("value"),
Token::I64(0x04030201),
Token::StructEnd,
],
);
assert_de_tokens_error::<SimpleColor>(
&[
Token::Struct {
name: "SimpleColor",
len: 1,
},
Token::Str("value"),
Token::I64(i64::MIN),
Token::StructEnd,
],
"out of range integral type conversion attempted",
);
assert_de_tokens(
&SimpleColor {
value: Some(RGBA8::new(1, 2, 3, 4)),
},
&[
Token::Struct {
name: "SimpleColor",
len: 1,
},
Token::Str("value"),
Token::U32(0x04030201),
Token::StructEnd,
],
);
assert_de_tokens(
&SimpleColor {
value: Some(RGBA8::new(1, 2, 3, 4)),
},
&[
Token::Struct {
name: "SimpleColor",
len: 1,
},
Token::Str("value"),
Token::U64(0x04030201),
Token::StructEnd,
],
);
assert_de_tokens_error::<SimpleColor>(
&[
Token::Struct {
name: "SimpleColor",
len: 1,
},
Token::Str("value"),
Token::U64(u64::MAX),
Token::StructEnd,
],
"out of range integral type conversion attempted",
);
assert_de_tokens(
&SimpleColor { value: None },
&[
Token::Struct {
name: "SimpleColor",
len: 1,
},
Token::Str("value"),
Token::None,
Token::StructEnd,
],
);
assert_de_tokens(
&SimpleColor {
value: Some(RGBA8::new(1, 2, 3, 4)),
},
&[
Token::Struct {
name: "SimpleColor",
len: 1,
},
Token::Str("value"),
Token::Some,
Token::U32(0x04030201),
Token::StructEnd,
],
);
}
}

File diff suppressed because it is too large Load Diff

@ -7,7 +7,7 @@ For this to work, a few settings need to be set and some scene items created so
items to work on. This has to be done manually as the API doesn't allow to create new sources and
scenes or modify specific settings.
- Use at least OBS version `26.1.2`.
- Use at least OBS version `27.0.0`.
- Create a **source collection** called `OBWS-TEST`.
- Create a **profile** called `OBWS-TEST`.
- Create two **scene**s called `OBWS-TEST-Scene` and `OBWS-TEST-Scene2`.

@ -1,46 +1,24 @@
use std::{env, sync::Once, time::Duration};
use std::{env, sync::Once};
use anyhow::{ensure, Result};
use obws::{
requests::SceneItem,
responses::{Output, Profile, Scene, SceneCollection, SourceListItem, Transition},
responses::{Input, Scene},
Client,
};
use tokio::time;
pub const TEST_OUTPUT: &str = "virtualcam_output";
pub const TEST_COLLECTION: &str = "OBWS-TEST";
pub const TEST_PROFILE: &str = "OBWS-TEST";
pub const TEST_SCENE: &str = "OBWS-TEST-Scene";
pub const TEST_SCENE_2: &str = "OBWS-TEST-Scene2";
pub const TEXT_SOURCE: &str = "OBWS-TEST-Text";
pub const TEXT_SOURCE_2: &str = "OBWS-TEST-Text2";
pub const TEST_TRANSITION: &str = "OBWS-TEST-Transition";
pub const TEST_TRANSITION_2: &str = "OBWS-TEST-Transition2";
pub const TEST_SCENE_RENAME: &str = "OBWS-TEST-Scene-Renamed";
pub const TEST_SCENE_CREATE: &str = "OBWS-TEST-Scene-Created";
pub const TEST_TEXT: &str = "OBWS-TEST-Text";
pub const TEST_TEXT_2: &str = "OBWS-TEST-Text2";
pub const TEST_BROWSER: &str = "OBWS-TEST-Browser";
pub const TEST_BROWSER_RENAME: &str = "OBWS-TEST-Browser-Renamed";
pub const TEST_MEDIA: &str = "OBWS-TEST-Media";
pub const SOURCE_KIND_TEXT_FT2: &str = "text_ft2_source_v2";
pub const SOURCE_KIND_BROWSER: &str = "browser_source";
pub const SOURCE_KIND_VLC: &str = "vlc_source";
const SCENE_ORDER: &[SceneItem] = &[
SceneItem {
id: None,
name: Some(TEXT_SOURCE),
},
SceneItem {
id: None,
name: Some(TEXT_SOURCE_2),
},
SceneItem {
id: None,
name: Some(TEST_BROWSER),
},
SceneItem {
id: None,
name: Some(TEST_MEDIA),
},
];
pub const INPUT_KIND_TEXT_FT2: &str = "text_ft2_source_v2";
pub const INPUT_KIND_BROWSER: &str = "browser_source";
pub const INPUT_KIND_VLC: &str = "vlc_source";
static INIT: Once = Once::new();
@ -51,23 +29,7 @@ pub async fn new_client() -> Result<Client> {
});
let host = env::var("OBS_HOST").unwrap_or_else(|_| "localhost".to_owned());
let client = Client::connect(host, 4444).await?;
client.login(env::var("OBS_PASSWORD").ok()).await?;
let collections = client.scene_collections().list_scene_collections().await?;
ensure!(
collections.iter().any(is_required_scene_collection),
"scene collection `{}` not found, required for all tests",
TEST_COLLECTION
);
client
.scene_collections()
.set_current_scene_collection("OBWS-TEST")
.await?;
// Give OBS some time to load the scene collection
time::sleep(Duration::from_secs(1)).await;
let client = Client::connect(host, 4444, env::var("OBS_PASSWORD").ok()).await?;
ensure_obs_setup(&client).await?;
@ -75,13 +37,6 @@ pub async fn new_client() -> Result<Client> {
}
async fn ensure_obs_setup(client: &Client) -> Result<()> {
let outputs = client.outputs().list_outputs().await?;
ensure!(
outputs.iter().any(is_required_output),
"output `{}` not found, required for output tests",
TEST_OUTPUT
);
let scenes = client.scenes().get_scene_list().await?;
ensure!(
scenes.scenes.iter().any(is_required_scene),
@ -93,127 +48,115 @@ async fn ensure_obs_setup(client: &Client) -> Result<()> {
"scene `{}` not found, required for scenes tests",
TEST_SCENE
);
ensure!(
!scenes.scenes.iter().any(is_renamed_scene),
"scene `{}` found, must NOT be present for scenes tests",
TEST_SCENE_RENAME
);
ensure!(
!scenes.scenes.iter().any(is_created_scene),
"scene `{}` found, must NOT be present for scenes tests",
TEST_SCENE_CREATE
);
let sources = client.sources().get_sources_list().await?;
let inputs = client.inputs().get_input_list(None).await?;
ensure!(
sources.iter().any(is_required_source),
"text source `{}` not found, required for sources tests",
TEXT_SOURCE
inputs.iter().any(is_required_text_input),
"text input `{}` not found, required for inputs tests",
TEST_TEXT
);
ensure!(
sources.iter().any(is_required_source_2),
"text source `{}` not found, required for sources tests",
TEXT_SOURCE
inputs.iter().any(is_required_text_2_input),
"text input `{}` not found, required for inputs tests",
TEST_TEXT
);
ensure!(
sources.iter().any(is_required_browser_source),
"media source `{}` not found, required for sources tests",
inputs.iter().any(is_required_browser_input),
"media input `{}` not found, required for inputs tests",
TEST_BROWSER
);
ensure!(
sources.iter().any(is_required_media_source),
"media source `{}` not found, required for media control tests",
inputs.iter().any(is_required_media_input),
"media input `{}` not found, required for inputs tests",
TEST_MEDIA
);
let special_sources = client.sources().get_special_sources().await?;
ensure!(
special_sources.desktop_1.is_some(),
"desktop audio device required for sources tests"
!inputs.iter().any(is_renamed_input),
"browser input `{}` found, must NOT be present for inputs tests",
TEST_BROWSER_RENAME
);
let profiles = client.profiles().list_profiles().await?;
let profiles = client.config().get_profile_list().await?.profiles;
ensure!(
profiles.iter().any(is_required_profile),
profiles.iter().map(String::as_str).any(is_required_profile),
"profile `{}` not found, required for profiles tests",
TEST_PROFILE
);
let studio_mode_enabled = client.studio_mode().get_studio_mode_status().await?;
let studio_mode_enabled = client.general().get_studio_mode_enabled().await?;
ensure!(
!studio_mode_enabled,
"studio mode enabled, required to be disabled for studio mode tests"
);
let transitions = client.transitions().get_transition_list().await?;
ensure!(
transitions.transitions.iter().any(is_required_transition),
"transition `{}` not found, required for transitions tests",
TEST_TRANSITION
);
ensure!(
transitions.transitions.iter().any(is_required_transition_2),
"transition `{}` not found, required for transitions tests",
TEST_TRANSITION
);
client.scenes().set_current_scene(TEST_SCENE).await?;
client
.scenes()
.reorder_scene_items(Some(TEST_SCENE), SCENE_ORDER)
.await?;
client
.transitions()
.set_current_transition(TEST_TRANSITION)
.set_current_program_scene(TEST_SCENE)
.await?;
Ok(())
}
fn is_required_output(output: &Output) -> bool {
output.name == TEST_OUTPUT
}
fn is_required_scene_collection(output: &SceneCollection) -> bool {
output.sc_name == TEST_COLLECTION
}
fn is_required_scene(scene: &Scene) -> bool {
scene.name == TEST_SCENE
scene.scene_name == TEST_SCENE
}
fn is_required_scene_2(scene: &Scene) -> bool {
scene.name == TEST_SCENE_2
scene.scene_name == TEST_SCENE_2
}
fn is_renamed_scene(scene: &Scene) -> bool {
scene.scene_name == TEST_SCENE_RENAME
}
fn is_required_source(source: &SourceListItem) -> bool {
source.name == TEXT_SOURCE && is_text_input_source(source)
fn is_created_scene(scene: &Scene) -> bool {
scene.scene_name == TEST_SCENE_CREATE
}
fn is_required_source_2(source: &SourceListItem) -> bool {
source.name == TEXT_SOURCE_2 && is_text_input_source(source)
fn is_required_text_input(input: &Input) -> bool {
input.input_name == TEST_TEXT && is_text_input(input)
}
fn is_required_browser_source(source: &SourceListItem) -> bool {
source.name == TEST_BROWSER && is_browser_input_source(source)
fn is_required_text_2_input(input: &Input) -> bool {
input.input_name == TEST_TEXT_2 && is_text_input(input)
}
fn is_required_media_source(source: &SourceListItem) -> bool {
source.name == TEST_MEDIA && is_media_input_source(source)
fn is_required_browser_input(input: &Input) -> bool {
input.input_name == TEST_BROWSER && is_browser_input(input)
}
fn is_text_input_source(source: &SourceListItem) -> bool {
source.ty == "input" && source.type_id == SOURCE_KIND_TEXT_FT2
fn is_required_media_input(input: &Input) -> bool {
input.input_name == TEST_MEDIA && is_media_input(input)
}
fn is_browser_input_source(source: &SourceListItem) -> bool {
source.ty == "input" && source.type_id == SOURCE_KIND_BROWSER
fn is_renamed_input(input: &Input) -> bool {
input.input_name == TEST_BROWSER_RENAME
}
fn is_media_input_source(source: &SourceListItem) -> bool {
source.ty == "input" && source.type_id == SOURCE_KIND_VLC
fn is_text_input(input: &Input) -> bool {
input.input_kind == INPUT_KIND_TEXT_FT2
}
fn is_required_profile(profile: &Profile) -> bool {
profile.profile_name == TEST_PROFILE
fn is_browser_input(input: &Input) -> bool {
input.input_kind == INPUT_KIND_BROWSER
}
fn is_required_transition(transition: &Transition) -> bool {
transition.name == TEST_TRANSITION
fn is_media_input(input: &Input) -> bool {
input.input_kind == INPUT_KIND_VLC
}
fn is_required_transition_2(transition: &Transition) -> bool {
transition.name == TEST_TRANSITION_2
fn is_required_profile(profile: &str) -> bool {
profile == TEST_PROFILE
}
#[allow(unused_macros)]

@ -0,0 +1,57 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use obws::{
requests::SetProfileParameter,
responses::{Profiles, SceneCollections},
};
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let client = client.config();
let SceneCollections {
current_scene_collection_name,
scene_collections,
} = client.get_scene_collection_list().await?;
let other = scene_collections
.iter()
.find(|sc| *sc != &current_scene_collection_name)
.unwrap();
client.set_current_scene_collection(&other).await?;
client
.set_current_scene_collection(&current_scene_collection_name)
.await?;
let Profiles {
current_profile_name,
profiles,
} = client.get_profile_list().await?;
let other = profiles
.iter()
.find(|p| *p != &current_profile_name)
.unwrap();
client.set_current_profile(&other).await?;
client.set_current_profile(&current_profile_name).await?;
client.get_profile_parameter("General", "Name").await?;
client
.set_profile_parameter(SetProfileParameter {
parameter_category: "OBWS",
parameter_name: "Test",
parameter_value: Some("Value"),
})
.await?;
client
.set_profile_parameter(SetProfileParameter {
parameter_category: "OBWS",
parameter_name: "Test",
parameter_value: None,
})
.await?;
Ok(())
}

@ -1,7 +1,7 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use obws::requests::{Projector, ProjectorType, QtGeometry, QtRect};
use obws::requests::KeyModifiers;
use serde_json::json;
mod common;
@ -12,37 +12,19 @@ async fn main() -> Result<()> {
let client = client.general();
client.get_version().await?;
client.get_auth_required().await?;
let original = client.get_filename_formatting().await?;
client.set_filename_formatting("test").await?;
client.set_filename_formatting(&original).await?;
client.get_stats().await?;
client
.broadcast_custom_message("test", &json! {{"greeting":"hello"}})
.broadcast_custom_event(json! {{"hello": "world!"}})
.await?;
client.get_video_info().await?;
// Currently no API function available to close the projector again.
client.get_hotkey_list().await?;
client.trigger_hotkey_by_name("ReplayBuffer.Save").await?;
client
.open_projector(Projector {
ty: Some(ProjectorType::Multiview),
geometry: Some(&QtGeometry::new(QtRect {
left: 100,
top: 100,
right: 300,
bottom: 300,
})),
..Default::default()
})
.trigger_hotkey_by_key_sequence("OBS_KEY_P", KeyModifiers::default())
.await?;
client.trigger_hotkey_by_name("ReplayBuffer.Save").await?;
client.trigger_hotkey_by_sequence("OBS_KEY_P", &[]).await?;
let enabled = client.get_studio_mode_enabled().await?;
client.set_studio_mode_enabled(!enabled).await?;
client.set_studio_mode_enabled(enabled).await?;
Ok(())
}

@ -0,0 +1,52 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use obws::requests::{SetInputSettings, Volume};
use crate::common::{INPUT_KIND_BROWSER, TEST_BROWSER, TEST_BROWSER_RENAME, TEST_MEDIA};
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let client = client.inputs();
client.get_input_list(None).await?;
client.get_input_kind_list(false).await?;
client
.get_input_default_settings(INPUT_KIND_BROWSER)
.await?;
let settings = client
.get_input_settings(TEST_BROWSER)
.await?
.input_settings;
client
.set_input_settings(SetInputSettings {
input_name: TEST_BROWSER,
input_settings: settings,
overlay: false,
})
.await?;
let muted = client.get_input_mute(TEST_MEDIA).await?;
client.set_input_mute(TEST_MEDIA, !muted).await?;
client.set_input_mute(TEST_MEDIA, muted).await?;
client.toggle_input_mute(TEST_MEDIA).await?;
client.toggle_input_mute(TEST_MEDIA).await?;
let volume = client.get_input_volume(TEST_MEDIA).await?;
client
.set_input_volume(TEST_MEDIA, Volume::Mul(volume.input_volume_mul))
.await?;
client
.set_input_name(TEST_BROWSER, TEST_BROWSER_RENAME)
.await?;
client
.set_input_name(TEST_BROWSER_RENAME, TEST_BROWSER)
.await?;
Ok(())
}

@ -1,41 +0,0 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use futures_util::{pin_mut, StreamExt};
use obws::events::{Event, EventType};
use crate::common::TEST_MEDIA;
#[macro_use]
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let events = client.events()?;
let client = client.media_control();
pin_mut!(events);
client.play_pause_media(TEST_MEDIA, Some(false)).await?;
wait_for!(events, EventType::MediaPlaying { .. });
client.next_media(TEST_MEDIA).await?;
wait_for!(events, EventType::MediaNext { .. });
client.previous_media(TEST_MEDIA).await?;
wait_for!(events, EventType::MediaPrevious { .. });
client.play_pause_media(TEST_MEDIA, Some(true)).await?;
wait_for!(events, EventType::MediaPaused { .. });
let duration = client.get_media_duration(TEST_MEDIA).await?;
client.set_media_time(TEST_MEDIA, duration / 2).await?;
client.get_media_time(TEST_MEDIA).await?;
client.scrub_media(TEST_MEDIA, duration / 4).await?;
client.get_media_state(TEST_MEDIA).await?;
client.restart_media(TEST_MEDIA).await?;
wait_for!(events, EventType::MediaRestarted { .. });
client.stop_media(TEST_MEDIA).await?;
wait_for!(events, EventType::MediaStopped { .. });
Ok(())
}

@ -1,20 +0,0 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use crate::common::TEST_OUTPUT;
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let client = client.outputs();
client.list_outputs().await?;
client.get_output_info(TEST_OUTPUT).await?;
client.start_output(TEST_OUTPUT).await?;
client.stop_output(TEST_OUTPUT, Some(true)).await?;
Ok(())
}

@ -1,28 +0,0 @@
#![cfg(feature = "test-integration")]
use std::time::Duration;
use anyhow::Result;
use tokio::time;
use crate::common::TEST_PROFILE;
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let client = client.profiles();
client.list_profiles().await?;
let original = client.get_current_profile().await?;
client.set_current_profile(TEST_PROFILE).await?;
// Give OBS some time to switch profiles
time::sleep(Duration::from_millis(200)).await;
client.set_current_profile(&original).await?;
Ok(())
}

@ -1,48 +0,0 @@
#![cfg(feature = "test-integration")]
use std::{path::Path, time::Duration};
use anyhow::Result;
use futures_util::{pin_mut, StreamExt};
use obws::events::{Event, EventType};
use tokio::time;
#[macro_use]
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let events = client.events()?;
let client = client.recording();
pin_mut!(events);
client.get_recording_status().await?;
client.start_stop_recording().await?;
wait_for!(events, EventType::RecordingStarted { .. });
client.start_stop_recording().await?;
wait_for!(events, EventType::RecordingStopped { .. });
// Wait a little more as recording sometimes doesn't start when started/stopped frequently.
time::sleep(Duration::from_secs(1)).await;
client.start_recording().await?;
wait_for!(events, EventType::RecordingStarted { .. });
time::sleep(Duration::from_secs(1)).await;
client.pause_recording().await?;
wait_for!(events, EventType::RecordingPaused);
time::sleep(Duration::from_secs(1)).await;
client.resume_recording().await?;
wait_for!(events, EventType::RecordingResumed);
time::sleep(Duration::from_secs(1)).await;
client.stop_recording().await?;
wait_for!(events, EventType::RecordingStopped { .. });
let original = client.get_recording_folder().await?;
client.set_recording_folder(Path::new("test")).await?;
client.set_recording_folder(&original).await?;
Ok(())
}

@ -1,39 +0,0 @@
#![cfg(feature = "test-integration")]
use std::time::Duration;
use anyhow::Result;
use futures_util::{pin_mut, StreamExt};
use obws::events::{Event, EventType};
use tokio::time;
#[macro_use]
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let events = client.events()?;
let client = client.replay_buffer();
pin_mut!(events);
client.get_replay_buffer_status().await?;
client.start_stop_replay_buffer().await?;
wait_for!(events, EventType::ReplayStarted { .. });
client.start_stop_replay_buffer().await?;
wait_for!(events, EventType::ReplayStopped { .. });
// Wait a little more as the replay buffer sometimes doesn't start when started/stopped
// frequently.
time::sleep(Duration::from_secs(1)).await;
client.start_replay_buffer().await?;
wait_for!(events, EventType::ReplayStarted { .. });
client.save_replay_buffer().await?;
client.stop_replay_buffer().await?;
wait_for!(events, EventType::ReplayStopped { .. });
Ok(())
}

@ -1,34 +0,0 @@
#![cfg(feature = "test-integration")]
use std::time::Duration;
use anyhow::{Context, Result};
use tokio::time;
use crate::common::TEST_COLLECTION;
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let client = client.scene_collections();
let other = client
.list_scene_collections()
.await?
.into_iter()
.find(|sc| sc.sc_name != TEST_COLLECTION)
.context("only the test scene collection exists, but another is needed for tests")?
.sc_name;
let original = client.get_current_scene_collection().await?;
client.set_current_scene_collection(&other).await?;
// Give OBS some time to load the scene collection
time::sleep(Duration::from_secs(1)).await;
client.set_current_scene_collection(&original).await?;
Ok(())
}

@ -1,82 +0,0 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use either::Either;
use obws::requests::{
DuplicateSceneItem, SceneItemProperties, SceneItemRender, SceneItemSpecification,
};
use crate::common::{TEST_SCENE, TEXT_SOURCE};
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let client = client.scene_items();
client.get_scene_item_list(Some(TEST_SCENE)).await?;
let props = client
.get_scene_item_properties(Some(TEST_SCENE), Either::Left(TEXT_SOURCE))
.await?;
client
.reset_scene_item(Some(TEST_SCENE), Either::Left(TEXT_SOURCE))
.await?;
client
.set_scene_item_properties(SceneItemProperties {
scene_name: Some(TEST_SCENE),
item: Either::Left(TEXT_SOURCE),
position: Some((&props.position).into()),
rotation: Some(props.rotation),
scale: Some((&props.scale).into()),
crop: Some((&props.crop).into()),
visible: Some(props.visible),
locked: Some(props.locked),
bounds: Some((&props.bounds).into()),
})
.await?;
client
.set_scene_item_render(SceneItemRender {
scene_name: Some(TEST_SCENE),
source: TEXT_SOURCE,
item: None,
render: !props.visible,
})
.await?;
client
.set_scene_item_render(SceneItemRender {
scene_name: Some(TEST_SCENE),
source: TEXT_SOURCE,
item: None,
render: props.visible,
})
.await?;
let item = client
.duplicate_scene_item(DuplicateSceneItem {
from_scene: Some(TEST_SCENE),
to_scene: Some(TEST_SCENE),
item: SceneItemSpecification {
id: None,
name: Some(TEXT_SOURCE),
},
})
.await?;
client
.delete_scene_item(
Some(TEST_SCENE),
SceneItemSpecification {
id: Some(item.item.id),
name: None,
},
)
.await?;
// TODO: Need to create a source first, but there is no way to delete it afterwards.
// Therefore, we don't call this function until a method becomes available.
//client.add_scene_item(AddSceneItem{});
Ok(())
}

@ -1,86 +1,45 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use chrono::Duration;
use obws::requests::{SceneItem, SceneTransitionOverride};
use crate::common::{
TEST_BROWSER, TEST_MEDIA, TEST_SCENE, TEST_SCENE_2, TEST_TRANSITION, TEXT_SOURCE, TEXT_SOURCE_2,
};
use common::{TEST_SCENE, TEST_SCENE_CREATE, TEST_SCENE_RENAME};
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let general = client.general();
let client = client.scenes();
let original = client.get_current_scene().await?.name;
client.set_current_scene(TEST_SCENE_2).await?;
client.set_current_scene(&original).await?;
general.set_studio_mode_enabled(true).await?;
let scenes = client.get_scene_list().await?.scenes;
let current = client.get_current_program_scene().await?;
let other = &scenes
.iter()
.find(|s| s.scene_name != current)
.unwrap()
.scene_name;
client.set_current_program_scene(other).await?;
client.set_current_program_scene(&current).await?;
client.get_scene_list().await?;
let current = client.get_current_preview_scene().await?.unwrap();
let other = &scenes
.iter()
.find(|s| s.scene_name != current)
.unwrap()
.scene_name;
client.set_current_preview_scene(other).await?;
client.set_current_preview_scene(&current).await?;
// TODO: Currently no way of deleting scenes so we skip this to not
// fill up OBS with random scenes on every run.
// client.create_scene("__TEMP").await?;
client.set_scene_name(TEST_SCENE, TEST_SCENE_RENAME).await?;
client.set_scene_name(TEST_SCENE_RENAME, TEST_SCENE).await?;
client
.reorder_scene_items(
Some(TEST_SCENE),
&[
SceneItem {
id: None,
name: Some(TEXT_SOURCE_2),
},
SceneItem {
id: None,
name: Some(TEXT_SOURCE),
},
SceneItem {
id: None,
name: Some(TEST_BROWSER),
},
SceneItem {
id: None,
name: Some(TEST_MEDIA),
},
],
)
.await?;
client
.reorder_scene_items(
Some(TEST_SCENE),
&[
SceneItem {
id: None,
name: Some(TEXT_SOURCE),
},
SceneItem {
id: None,
name: Some(TEXT_SOURCE_2),
},
SceneItem {
id: None,
name: Some(TEST_BROWSER),
},
SceneItem {
id: None,
name: Some(TEST_MEDIA),
},
],
)
.await?;
client.create_scene(TEST_SCENE_CREATE).await?;
client.remove_scene(TEST_SCENE_CREATE).await?;
client
.set_scene_transition_override(SceneTransitionOverride {
scene_name: TEST_SCENE,
transition_name: TEST_TRANSITION,
transition_duration: Some(Duration::milliseconds(10)),
})
.await?;
client.get_scene_transition_override(TEST_SCENE).await?;
client.remove_scene_transition_override(TEST_SCENE).await?;
general.set_studio_mode_enabled(false).await?;
Ok(())
}

@ -1,17 +1,11 @@
#![cfg(feature = "test-integration")]
use anyhow::{Context, Result};
use chrono::Duration;
use obws::{
common::MonitorType,
requests::{
AddFilter, MoveFilter, ReorderFilter, SourceFilterSettings, SourceFilterVisibility,
SourceScreenshot, SourceSettings, Volume,
},
};
use serde_json::json;
use std::env;
use crate::common::{SOURCE_KIND_VLC, TEST_BROWSER, TEST_MEDIA, TEXT_SOURCE};
use anyhow::Result;
use obws::requests::{GetSourceScreenshot, SaveSourceScreenshot};
use crate::common::TEST_TEXT;
mod common;
@ -20,178 +14,26 @@ async fn main() -> Result<()> {
let client = common::new_client().await?;
let client = client.sources();
client.get_media_sources_list().await?;
client.get_sources_list().await?;
client.get_sources_types_list().await?;
client.get_source_active(TEST_MEDIA).await?;
client.get_audio_active(TEST_MEDIA).await?;
client.get_source_default_settings(SOURCE_KIND_VLC).await?;
client.refresh_browser_source(TEST_BROWSER).await?;
// Volume
let original = client.get_volume(TEXT_SOURCE, None).await?.volume;
client.get_volume(TEXT_SOURCE, Some(true)).await?;
client.get_source_active(TEST_TEXT).await?;
client
.set_volume(Volume {
source: TEXT_SOURCE,
volume: 0.5,
use_decibel: None,
.get_source_screenshot(GetSourceScreenshot {
source_name: TEST_TEXT,
image_width: Some(100),
image_height: Some(100),
image_compression_quality: Some(50),
image_format: "jpg",
})
.await?;
let file = env::temp_dir().join("obws-test-image.png");
client
.set_volume(Volume {
source: TEXT_SOURCE,
volume: original,
use_decibel: None,
})
.await?;
// Mute
let original = client.get_mute(TEXT_SOURCE).await?.muted;
client.toggle_mute(TEXT_SOURCE).await?;
client.set_mute(TEXT_SOURCE, original).await?;
// Source name
let new_name = format!("{}-Test", TEXT_SOURCE);
client.set_source_name(TEXT_SOURCE, &new_name).await?;
client.set_source_name(&new_name, TEXT_SOURCE).await?;
// Sync offset
let original = client.get_sync_offset(TEXT_SOURCE).await?;
client
.set_sync_offset(TEXT_SOURCE, Duration::milliseconds(200))
.await?;
client.set_sync_offset(TEXT_SOURCE, original.offset).await?;
// Source settings
let settings = client
.get_source_settings::<serde_json::Value>(TEXT_SOURCE, None)
.await?;
client
.set_source_settings::<serde_json::Value>(SourceSettings {
source_name: &settings.source_name,
source_type: Some(&settings.source_type),
source_settings: &settings.source_settings,
})
.await?;
// TODO: GDI+ only on windows?
// Freetype2 properties
let props = client.get_text_freetype2_properties(TEXT_SOURCE).await?;
client
.set_text_freetype2_properties((&props).into())
.await?;
// Special sources
client.get_special_sources().await?;
// Filters
const FILTER1: &str = "Scroll-Test1";
const FILTER2: &str = "Scroll-Test2";
client.get_source_filters(TEXT_SOURCE).await?;
client
.add_filter_to_source(AddFilter {
source_name: TEXT_SOURCE,
filter_name: FILTER1,
filter_type: "scroll_filter",
filter_settings: &json! {{
"limit_cx": false,
"limit_cy": false,
"speed_x": 50.0
}},
})
.await?;
client
.get_source_filter_info::<serde_json::Value>(TEXT_SOURCE, FILTER1)
.await?;
client
.add_filter_to_source(AddFilter {
source_name: TEXT_SOURCE,
filter_name: FILTER2,
filter_type: "scroll_filter",
filter_settings: &json! {{
"limit_cx": false,
"limit_cy": false,
"speed_x": 20.0
}},
})
.await?;
client
.reorder_source_filter(ReorderFilter {
source_name: TEXT_SOURCE,
filter_name: FILTER1,
new_index: 1,
})
.await?;
client
.move_source_filter(MoveFilter {
source_name: TEXT_SOURCE,
filter_name: FILTER1,
movement_type: obws::requests::MovementType::Up,
})
.await?;
client
.set_source_filter_settings(SourceFilterSettings {
source_name: TEXT_SOURCE,
filter_name: FILTER1,
filter_settings: &json! {{
"limit_cx": false,
"limit_cy": false,
"speed_x": -100.0
}},
})
.await?;
client
.set_source_filter_visibility(SourceFilterVisibility {
source_name: TEXT_SOURCE,
filter_name: FILTER1,
filter_enabled: false,
})
.await?;
client
.remove_filter_from_source(TEXT_SOURCE, FILTER1)
.await?;
client
.remove_filter_from_source(TEXT_SOURCE, FILTER2)
.await?;
// Audio monitor type
let source = client
.get_special_sources()
.await?
.desktop_1
.context("desktop audio device required for tests")?;
let original = client.get_audio_monitor_type(&source).await?;
client
.set_audio_monitor_type(&source, MonitorType::MonitorAndOutput)
.await?;
client.set_audio_monitor_type(&source, original).await?;
// Take source screenshot
client
.take_source_screenshot(SourceScreenshot {
source_name: Some(TEXT_SOURCE),
embed_picture_format: Some("png"),
width: Some(10),
..Default::default()
.save_source_screenshot(SaveSourceScreenshot {
source_name: TEST_TEXT,
image_file_path: &file,
image_width: None,
image_height: None,
image_compression_quality: None,
image_format: "png",
})
.await?;

@ -1,35 +0,0 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use chrono::Duration;
use obws::requests::Transition;
use crate::common::{TEST_SCENE_2, TEST_TRANSITION};
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let client = client.studio_mode();
client.get_studio_mode_status().await?;
client.enable_studio_mode().await?;
let original = client.get_preview_scene().await?.name;
client.set_preview_scene(TEST_SCENE_2).await?;
client.set_preview_scene(&original).await?;
client
.transition_to_program(Some(Transition {
name: TEST_TRANSITION,
duration: Some(Duration::milliseconds(10)),
}))
.await?;
client.disable_studio_mode().await?;
client.toggle_studio_mode().await?;
client.toggle_studio_mode().await?;
Ok(())
}

@ -1,37 +0,0 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use crate::common::TEST_TRANSITION_2;
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let studio_mode = client.studio_mode();
let client = client.transitions();
client.get_transition_list().await?;
let original = client.get_current_transition().await?.name;
client.set_current_transition(TEST_TRANSITION_2).await?;
client.set_current_transition(&original).await?;
let original = client.get_transition_duration().await?;
client.set_transition_duration(original * 2).await?;
client.set_transition_duration(original).await?;
client.get_transition_position().await?;
let settings = client.get_transition_settings(TEST_TRANSITION_2).await?;
client
.set_transition_settings(TEST_TRANSITION_2, &settings)
.await?;
studio_mode.enable_studio_mode().await?;
client.set_t_bar_position(0.5, Some(false)).await?;
client.set_t_bar_position(0.0, Some(false)).await?;
client.release_t_bar().await?;
studio_mode.disable_studio_mode().await?;
Ok(())
}

@ -1,38 +0,0 @@
#![cfg(feature = "test-integration")]
use std::time::Duration;
use anyhow::Result;
use futures_util::{pin_mut, StreamExt};
use obws::events::{Event, EventType};
use tokio::time;
#[macro_use]
mod common;
#[tokio::test]
async fn main() -> Result<()> {
let client = common::new_client().await?;
let events = client.events()?;
let client = client.virtual_cam();
pin_mut!(events);
client.get_virtual_cam_status().await?;
client.start_stop_virtual_cam().await?;
wait_for!(events, EventType::VirtualCamStarted { .. });
client.start_stop_virtual_cam().await?;
wait_for!(events, EventType::VirtualCamStopped { .. });
// Wait a little more as the virtual cam sometimes doesn't start when started/stopped
// frequently.
time::sleep(Duration::from_secs(1)).await;
client.start_virtual_cam().await?;
wait_for!(events, EventType::VirtualCamStarted { .. });
client.stop_virtual_cam().await?;
wait_for!(events, EventType::VirtualCamStopped { .. });
Ok(())
}
Loading…
Cancel
Save