Add integration tests and fix some API calls

pull/1/head
Dominik Nakamura 3 years ago
parent da5dc4a6dc
commit 601ce2dded
No known key found for this signature in database
GPG Key ID: E4C6A749B2491910

@ -19,9 +19,10 @@ chrono = { version = "0.4.19", default-features = false, features = ["std"] }
either = { version = "1.6.1", features = ["serde"] }
futures-util = { version = "0.3.8", features = ["sink"] }
log = "0.4.11"
rgb = { version = "0.8.25", default-features = false }
semver = { version = "0.11.0", features = ["serde"] }
serde = { version = "1.0.118", features = ["derive"] }
serde_json = "1.0.60"
serde_json = "1.0.61"
serde_with = "1.6.0"
sha2 = "0.9.2"
thiserror = "1.0.23"
@ -30,7 +31,10 @@ tokio-tungstenite = "0.12.0"
tungstenite = { version = "0.11.1", default-features = false }
[dev-dependencies]
anyhow = "1.0.36"
anyhow = "1.0.37"
dotenv = "0.15.0"
pretty_env_logger = "0.4.0"
tokio = { version = "0.3.6", features = ["fs", "macros", "rt-multi-thread", "time"] }
[features]
test-integration = []

@ -19,13 +19,9 @@ async fn main() -> Result<()> {
let screenshot = client
.sources()
.take_source_screenshot(SourceScreenshot {
source_name: Some("Start"),
source_name: "Start",
embed_picture_format: Some("png"),
save_to_file_path: None,
file_format: None,
compress_quality: None,
width: None,
height: None,
..Default::default()
})
.await?;

@ -27,8 +27,7 @@ impl<'a> Outputs<'a> {
.map(|o| o.output_info)
}
/// Note: Controlling outputs is an experimental feature of obs-websocket. Some plugins which
/// add outputs to OBS may not function properly when they are controlled in this way.
/// Start an output.
///
/// - `output_name`: Output name.
pub async fn start_output(&self, output_name: &str) -> Result<()> {
@ -37,8 +36,7 @@ impl<'a> Outputs<'a> {
.await
}
/// Note: Controlling outputs is an experimental feature of obs-websocket. Some plugins which
/// add outputs to OBS may not function properly when they are controlled in this way.
/// Stop an output.
///
/// - `output_name`: Output name.
/// - `force`: Force stop (default: false).

@ -87,16 +87,6 @@ impl<'a> Sources<'a> {
.await
}
/// Get the audio's active status of a specified source.
///
/// - `source_name`: Source name.
pub async fn get_audio_active(&self, source_name: &str) -> Result<bool> {
self.client
.send_message::<responses::AudioActive>(RequestType::GetAudioActive { source_name })
.await
.map(|aa| aa.audio_active)
}
/// Note: If the new name already exists as a source, obs-websocket will return an error.
///
/// - `source_name`: Source name.

@ -35,21 +35,13 @@ pub enum EventType {
/// List of scene items in the new scene.
sources: Vec<SceneItem>,
},
/// Note: This event is not fired when the scenes are reordered.
/// The scene list has been modified. Scenes have been added, removed, or renamed.
ScenesChanged,
/// Triggered when switching to another scene collection or when renaming the current scene
/// collection.
#[serde(rename_all = "camelCase")]
SceneCollectionChanged {
/// Name of the new current scene collection.
scene_collection: String,
},
SceneCollectionChanged,
/// Triggered when a scene collection is created, added, renamed, or removed.
#[serde(rename_all = "camelCase")]
SceneCollectionListChanged {
/// Scene collections list.
scene_collections: Vec<SceneCollection>,
},
SceneCollectionListChanged,
// --------------------------------
// Transitions
// --------------------------------
@ -191,8 +183,7 @@ pub enum EventType {
// --------------------------------
// Recording
// --------------------------------
/// Note: `recording_filename` is not provided in this event because this information is not
/// available at the time this event is emitted.
/// A request to start recording has been issued.
RecordingStarting,
/// Recording started successfully.
RecordingStarted,
@ -228,7 +219,7 @@ pub enum EventType {
/// Identifier provided by the sender.
realm: String,
/// User-defined data.
data: serde_json::Value,
data: serde_json::Map<String, serde_json::Value>,
},
// --------------------------------
// Sources
@ -448,7 +439,7 @@ pub enum EventType {
/// Name of the scene being previewed.
scene_name: String,
/// List of sources composing the scene.
soruces: Vec<SceneItem>,
sources: Vec<SceneItem>,
},
/// Studio Mode has been enabled or disabled.
#[serde(rename_all = "kebab-case")]

@ -7,6 +7,8 @@ use either::Either;
use serde::Serialize;
use serde_with::skip_serializing_none;
pub use rgb::RGBA8;
use crate::common::{Align, Alignment, BoundsType, FontFlags, MonitorType, StreamType, Valign};
mod ser;
@ -74,11 +76,6 @@ pub(crate) enum RequestType<'a> {
source: &'a str,
},
#[serde(rename_all = "camelCase")]
GetAudioActive {
/// Source name.
source_name: &'a str,
},
#[serde(rename_all = "camelCase")]
SetSourceName {
/// Source name.
source_name: &'a str,
@ -458,9 +455,11 @@ pub struct TextFreetype2Properties<'a> {
/// Source name.
pub source: &'a str,
/// Gradient top color.
pub color1: Option<u32>,
#[serde(serialize_with = "ser::rgba8_inverse_opt")]
pub color1: Option<RGBA8>,
/// Gradient bottom color.
pub color2: Option<u32>,
#[serde(serialize_with = "ser::rgba8_inverse_opt")]
pub color2: Option<RGBA8>,
/// Custom width (0 to disable).
pub custom_width: Option<u32>,
/// Drop shadow.
@ -482,6 +481,25 @@ pub struct TextFreetype2Properties<'a> {
pub word_wrap: Option<bool>,
}
impl<'a> From<&'a crate::responses::TextFreetype2Properties> for TextFreetype2Properties<'a> {
fn from(p: &'a crate::responses::TextFreetype2Properties) -> Self {
Self {
source: &p.source,
color1: p.color1,
color2: p.color2,
custom_width: p.custom_width,
drop_shadow: Some(p.drop_shadow),
font: p.font.as_ref().map(Into::into),
from_file: Some(p.from_file),
log_mode: Some(p.log_mode),
outline: Some(p.outline),
text: Some(&p.text),
text_file: p.text_file.as_deref(),
word_wrap: Some(p.word_wrap),
}
}
}
/// Request information for [`add_filter_to_source`](crate::client::Sources::add_filter_to_source).
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
@ -569,9 +587,8 @@ pub struct SourceFilterVisibility<'a> {
#[derive(Debug, Default, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct SourceScreenshot<'a> {
/// Source name. Note that, since scenes are also sources, you can also provide a scene name. If
/// not provided, the currently active scene is used.
pub source_name: Option<&'a str>,
/// Source name. Note that, since scenes are also sources, you can also provide a scene name.
pub source_name: &'a str,
/// Format of the Data URI encoded picture. Can be "png", "jpg", "jpeg" or "bmp" (or any other
/// value supported by Qt's Image module).
pub embed_picture_format: Option<&'a str>,
@ -725,6 +742,17 @@ pub struct Font<'a> {
pub style: Option<&'a str>,
}
impl<'a> From<&'a crate::responses::Font> for Font<'a> {
fn from(f: &'a crate::responses::Font) -> Self {
Self {
face: Some(&f.face),
flags: Some(f.flags),
size: Some(f.size),
style: Some(&f.style),
}
}
}
/// Request information for
/// [`get_scene_item_properties`](crate::client::SceneItems::get_scene_item_properties),
/// [`set_scene_item_properties`](crate::client::SceneItems::set_scene_item_properties) as part of

@ -1,4 +1,5 @@
use chrono::Duration;
use rgb::RGBA8;
use serde::ser::{Error, Serializer};
pub fn duration_millis_opt<S>(value: &Option<Duration>, serializer: S) -> Result<S::Ok, S::Error>
@ -40,3 +41,19 @@ where
None => serializer.serialize_none(),
}
}
pub fn rgba8_inverse_opt<S>(value: &Option<RGBA8>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match value {
Some(rgba) => {
let abgr = (rgba.a as u32) << 24
| (rgba.b as u32) << 16
| (rgba.g as u32) << 8
| (rgba.r as u32);
serializer.serialize_some(&abgr)
}
None => serializer.serialize_none(),
}
}

@ -1,7 +1,9 @@
use std::convert::TryFrom;
use std::fmt;
use std::iter::FromIterator;
use std::marker::PhantomData;
use rgb::RGBA8;
use serde::de::{Deserializer, Error, Visitor};
pub fn string_comma_list<'de, D, T>(deserializer: D) -> Result<T, D::Error>
@ -42,6 +44,69 @@ where
}
}
pub fn rgba8_inverse<'de, D>(deserializer: D) -> Result<Option<RGBA8>, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_u32(Rgba8InverseOptVisitor)
}
struct Rgba8InverseOptVisitor;
impl<'de> Visitor<'de> for Rgba8InverseOptVisitor {
type Value = Option<RGBA8>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a RGBA color value encoded as integer in inverse order (ABGR)")
}
fn visit_u32<E>(self, v: u32) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Some(RGBA8::new(
(v & 0xff) as u8,
(v >> 8 & 0xff) as u8,
(v >> 16 & 0xff) as u8,
(v >> 24 & 0xff) as u8,
)))
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: Error,
{
match u32::try_from(v) {
Ok(v) => self.visit_u32(v),
Err(e) => Err(Error::custom(e)),
}
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: Error,
{
match u32::try_from(v) {
Ok(v) => self.visit_u32(v),
Err(e) => Err(Error::custom(e)),
}
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: Error,
{
Ok(None)
}
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_u32(Self)
}
}
#[cfg(test)]
mod tests {
use serde::Deserialize;

@ -6,6 +6,7 @@ use std::path::PathBuf;
use chrono::Duration;
use serde::Deserialize;
pub use rgb::RGBA8;
pub use semver::Version as SemVerVersion;
use crate::common::{
@ -209,14 +210,6 @@ pub struct Mute {
pub muted: bool,
}
/// Response value for [`get_audio_active`](crate::client::Sources::get_audio_active).
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct AudioActive {
/// Audio active status of the source.
pub audio_active: bool,
}
/// Response value for [`get_sync_offset`](crate::client::Sources::get_sync_offset).
#[derive(Debug, Deserialize)]
pub struct SyncOffset {
@ -302,9 +295,11 @@ pub struct TextFreetype2Properties {
/// Source name.
pub source: String,
/// Gradient top color.
pub color1: Option<u32>,
#[serde(default, deserialize_with = "de::rgba8_inverse")]
pub color1: Option<RGBA8>,
/// Gradient bottom color.
pub color2: Option<u32>,
#[serde(default, deserialize_with = "de::rgba8_inverse")]
pub color2: Option<RGBA8>,
/// Custom width (0 to disable).
pub custom_width: Option<u32>,
/// Drop shadow.
@ -312,7 +307,7 @@ pub struct TextFreetype2Properties {
pub drop_shadow: bool,
/// Holds data for the font. Ex:
/// `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`.
pub font: Font,
pub font: Option<Font>,
/// Read text from the specified file.
#[serde(default)]
pub from_file: bool,
@ -823,6 +818,7 @@ pub struct Scene {
/// Name of the scene.
pub name: String,
/// Ordered list of the scene's source items.
#[serde(default)]
pub sources: Vec<SceneItem>,
}

@ -0,0 +1,14 @@
# Integration tests
To run integration tests, obws will connect to your OBS instance and send several commands against
the obs-websocket API to make sure most of the API works as expected.
For this to work, a few settings need to be set and some scene items created so that the tests have
items to work on. This has to be done manually as the API doesn't allow to create new sources and
scenes or modify specific settings.
- Use at least OBS version `26.1.0`.
- Create a **source collection** called `OBWS-TEST`.
- Create a **scene** called `OBWS-TEST-Scene`.
- Create a **Freetype2 text source** called `OBWS-TEST-Text`.
- Make sure a global **Desktop Audio** device is configured.

@ -0,0 +1,92 @@
use std::sync::Once;
use std::time::Duration;
use anyhow::{ensure, Result};
use obws::{
responses::{Output, Scene, SceneCollection, SourceListItem},
Client,
};
use tokio::time;
pub const TEST_OUTPUT: &str = "virtualcam_output";
pub const TEST_COLLECTION: &str = "OBWS-TEST";
pub const TEST_SCENE: &str = "OBWS-TEST-Scene";
pub const TEXT_SOURCE: &str = "OBWS-TEST-Text";
static INIT: Once = Once::new();
pub async fn new_client() -> Result<Client> {
INIT.call_once(|| {
dotenv::dotenv().ok();
pretty_env_logger::init();
});
let client = Client::connect("localhost", 4444).await?;
client.login(std::env::var("OBS_PASSWORD").ok()).await?;
let collections = client.scene_collections().list_scene_collections().await?;
ensure!(
collections.iter().any(is_required_scene_collection),
"scene collection `{}` not found, required for all tests",
TEST_COLLECTION
);
client
.scene_collections()
.set_current_scene_collection("OBWS-TEST")
.await?;
// Give OBS some time to load the scene collection
time::sleep(Duration::from_millis(500)).await;
ensure_obs_setup(&client).await?;
Ok(client)
}
async fn ensure_obs_setup(client: &Client) -> Result<()> {
let outputs = client.outputs().list_outputs().await?;
ensure!(
outputs.iter().any(is_required_output),
"output `{}` not found, required for output tests",
TEST_OUTPUT
);
let scenes = client.scenes().get_scene_list().await?;
ensure!(
scenes.scenes.iter().any(is_required_scene),
"scene `{}` not found, required for scenes tests",
TEST_SCENE
);
let sources = client.sources().get_sources_list().await?;
ensure!(
sources.iter().any(is_required_source),
"text source `{}` not found, required for sources tests",
TEXT_SOURCE
);
let special_sources = client.sources().get_special_sources().await?;
ensure!(
special_sources.desktop_1.is_some(),
"desktop audio device required for sources tests"
);
Ok(())
}
fn is_required_output(output: &Output) -> bool {
output.name == TEST_OUTPUT
}
fn is_required_scene_collection(output: &SceneCollection) -> bool {
output.sc_name == TEST_COLLECTION
}
fn is_required_scene(scene: &Scene) -> bool {
scene.name == TEST_SCENE
}
fn is_required_source(source: &SourceListItem) -> bool {
source.name == TEXT_SOURCE && source.ty == "input" && source.type_id == "text_ft2_source_v2"
}

@ -0,0 +1,39 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use obws::requests::{Projector, ProjectorType};
use serde_json::json;
mod common;
#[tokio::test]
async fn general() -> Result<()> {
let client = common::new_client().await?;
let client = client.general();
client.get_version().await?;
client.get_auth_required().await?;
let original = client.get_filename_formatting().await?;
client.set_filename_formatting("test").await?;
client.set_filename_formatting(&original).await?;
client.get_stats().await?;
client
.broadcast_custom_message("test", &json! {{"greeting":"hello"}})
.await?;
client.get_video_info().await?;
// Currently no API function available to close the projector again.
client
.open_projector(Projector {
ty: Some(ProjectorType::Multiview),
..Default::default()
})
.await?;
Ok(())
}

@ -0,0 +1,20 @@
#![cfg(feature = "test-integration")]
use anyhow::Result;
use common::TEST_OUTPUT;
mod common;
#[tokio::test]
async fn general() -> Result<()> {
let client = common::new_client().await?;
let client = client.outputs();
client.list_outputs().await?;
client.get_output_info(TEST_OUTPUT).await?;
client.start_output(TEST_OUTPUT).await?;
client.stop_output(TEST_OUTPUT, Some(true)).await?;
Ok(())
}

@ -0,0 +1,192 @@
#![cfg(feature = "test-integration")]
use anyhow::{Context, Result};
use chrono::Duration;
use obws::{
common::MonitorType,
requests::{
AddFilter, MoveFilter, ReorderFilter, SourceFilterSettings, SourceFilterVisibility,
SourceScreenshot, SourceSettings, Volume,
},
};
use serde_json::json;
use common::TEXT_SOURCE;
mod common;
#[tokio::test]
async fn sources() -> Result<()> {
let client = common::new_client().await?;
let client = client.sources();
client.get_sources_list().await?;
client.get_sources_types_list().await?;
// Volume
let original = client.get_volume(TEXT_SOURCE, None).await?.volume;
client.get_volume(TEXT_SOURCE, Some(true)).await?;
client
.set_volume(Volume {
source: TEXT_SOURCE,
volume: 0.5,
use_decibel: None,
})
.await?;
client
.set_volume(Volume {
source: TEXT_SOURCE,
volume: original,
use_decibel: None,
})
.await?;
// Mute
let original = client.get_mute(TEXT_SOURCE).await?.muted;
client.toggle_mute(TEXT_SOURCE).await?;
client.set_mute(TEXT_SOURCE, original).await?;
// Source name
let new_name = format!("{}-Test", TEXT_SOURCE);
client.set_source_name(TEXT_SOURCE, &new_name).await?;
client.set_source_name(&new_name, TEXT_SOURCE).await?;
// Sync offset
let original = client.get_sync_offset(TEXT_SOURCE).await?;
client
.set_sync_offset(TEXT_SOURCE, Duration::milliseconds(200))
.await?;
client.set_sync_offset(TEXT_SOURCE, original.offset).await?;
// Source settings
let settings = client
.get_source_settings::<serde_json::Value>(TEXT_SOURCE, None)
.await?;
client
.set_source_settings::<serde_json::Value>(SourceSettings {
source_name: &settings.source_name,
source_type: Some(&settings.source_type),
source_settings: &settings.source_settings,
})
.await?;
// TODO: GDI+ only on windows?
// Freetype2 properties
let props = client.get_text_freetype2_properties(TEXT_SOURCE).await?;
client
.set_text_freetype2_properties((&props).into())
.await?;
// Special sources
client.get_special_sources().await?;
// Filters
const FILTER1: &str = "Scroll-Test1";
const FILTER2: &str = "Scroll-Test2";
client.get_source_filters(TEXT_SOURCE).await?;
client
.add_filter_to_source(AddFilter {
source_name: TEXT_SOURCE,
filter_name: FILTER1,
filter_type: "scroll_filter",
filter_settings: &json! {{
"limit_cx": false,
"limit_cy": false,
"speed_x": 50.0
}},
})
.await?;
client
.get_source_filter_info::<serde_json::Value>(TEXT_SOURCE, FILTER1)
.await?;
client
.add_filter_to_source(AddFilter {
source_name: TEXT_SOURCE,
filter_name: FILTER2,
filter_type: "scroll_filter",
filter_settings: &json! {{
"limit_cx": false,
"limit_cy": false,
"speed_x": 20.0
}},
})
.await?;
client
.reorder_source_filter(ReorderFilter {
source_name: TEXT_SOURCE,
filter_name: FILTER1,
new_index: 1,
})
.await?;
client
.move_source_filter(MoveFilter {
source_name: TEXT_SOURCE,
filter_name: FILTER1,
movement_type: obws::requests::MovementType::Up,
})
.await?;
client
.set_source_filter_settings(SourceFilterSettings {
source_name: TEXT_SOURCE,
filter_name: FILTER1,
filter_settings: &json! {{
"limit_cx": false,
"limit_cy": false,
"speed_x": -100.0
}},
})
.await?;
client
.set_source_filter_visibility(SourceFilterVisibility {
source_name: TEXT_SOURCE,
filter_name: FILTER1,
filter_enabled: false,
})
.await?;
client
.remove_filter_from_source(TEXT_SOURCE, FILTER1)
.await?;
client
.remove_filter_from_source(TEXT_SOURCE, FILTER2)
.await?;
// Audio monitor type
let source = client
.get_special_sources()
.await?
.desktop_1
.context("desktop audio device required for tests")?;
let original = client.get_audio_monitor_type(&source).await?;
client
.set_audio_monitor_type(&source, MonitorType::MonitorAndOutput)
.await?;
client.set_audio_monitor_type(&source, original).await?;
// Take source screenshot
client
.take_source_screenshot(SourceScreenshot {
source_name: TEXT_SOURCE,
embed_picture_format: Some("png"),
width: Some(10),
..Default::default()
})
.await?;
Ok(())
}
Loading…
Cancel
Save