diff --git a/Cargo.lock b/Cargo.lock index b183cf65..bfc3e711 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2228,9 +2228,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.15.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl" @@ -4401,7 +4401,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.48.5", ] [[package]] @@ -4421,17 +4421,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -4442,9 +4442,9 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" @@ -4466,9 +4466,9 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" @@ -4490,9 +4490,9 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" @@ -4514,9 +4514,9 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" @@ -4538,9 +4538,9 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" @@ -4550,9 +4550,9 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" @@ -4574,9 +4574,9 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winit" diff --git a/code/intermediate/tutorial13-hdr/Cargo.toml b/code/intermediate/tutorial13-hdr/Cargo.toml index 8c359ac1..bdb969f3 100644 --- a/code/intermediate/tutorial13-hdr/Cargo.toml +++ b/code/intermediate/tutorial13-hdr/Cargo.toml @@ -29,7 +29,6 @@ features = ["png", "jpeg", "hdr"] reqwest = { version = "0.11" } console_error_panic_hook = "0.1" console_log = "1.0" -wgpu = { version = "0.17", features = ["webgl"]} wasm-bindgen = "0.2" wasm-bindgen-futures = "0.4" web-sys = { version = "0.3", features = [ diff --git a/code/intermediate/tutorial13-hdr/src/equirectangular.wgsl b/code/intermediate/tutorial13-hdr/src/equirectangular.wgsl index a1374427..846ae598 100644 --- a/code/intermediate/tutorial13-hdr/src/equirectangular.wgsl +++ b/code/intermediate/tutorial13-hdr/src/equirectangular.wgsl @@ -1,4 +1,4 @@ -const PI: f32 = 3.14159; +const PI: f32 = 3.1415926535897932384626433832795; struct Face { forward: vec3, @@ -12,15 +12,22 @@ var src: texture_2d; @group(0) @binding(1) -var output: texture_storage_2d_array; +var dst: texture_storage_2d_array; @compute -@workgroup_size(1, 1, 1) +@workgroup_size(16, 16, 1) fn compute_equirect_to_cubemap( @builtin(global_invocation_id) gid: vec3, ) { + // If texture size is not divisible by 32 we + // need to make sure we don't try to write to + // pixels that don't exist. + if gid.x >= u32(textureDimensions(dst).x) { + return; + } + var FACES: array = array( // FACES +X Face( @@ -60,13 +67,12 @@ fn compute_equirect_to_cubemap( ), ); - let face = FACES[gid.z]; - // Get texture coords relative to cubemap face - let dst_dimensions = vec2(textureDimensions(output)); + let dst_dimensions = vec2(textureDimensions(dst)); let cube_uv = vec2(gid.xy) / dst_dimensions * 2.0 - 1.0; // Get spherical coordinate from cube_uv + let face = FACES[gid.z]; let spherical = normalize(face.forward + face.right * cube_uv.x + face.up * cube_uv.y); // Get coordinate on the equirectangular texture @@ -77,8 +83,5 @@ fn compute_equirect_to_cubemap( // We use textureLoad() as textureSample() is not allowed in compute shaders var sample = textureLoad(src, eq_pixel, 0); - // sample = vec4(cube_uv * 0.5 + 0.5, 0.0, 1.0); - // sample = vec4(spherical * 0.5 + 0.5, 1.0); - - textureStore(output, gid.xy, gid.z, sample); + textureStore(dst, gid.xy, gid.z, sample); } \ No newline at end of file diff --git a/code/intermediate/tutorial13-hdr/src/lib.rs b/code/intermediate/tutorial13-hdr/src/lib.rs index 848bb22b..d32a51a5 100644 --- a/code/intermediate/tutorial13-hdr/src/lib.rs +++ b/code/intermediate/tutorial13-hdr/src/lib.rs @@ -16,6 +16,8 @@ mod hdr; mod model; mod resources; mod texture; + +#[cfg(feature = "debug")] mod debug; use model::{DrawLight, DrawModel, Vertex}; @@ -52,8 +54,6 @@ impl CameraUniform { self.view = view.into(); self.view_proj = view_proj.into(); self.inv_proj = proj.invert().unwrap().into(); - // self.inv_proj = proj.transpose().into(); - // self.inv_view = view.invert().unwrap().into(); self.inv_view = view.transpose().into(); } } @@ -176,6 +176,7 @@ struct State { hdr: hdr::HdrPipeline, environment_bind_group: wgpu::BindGroup, sky_pipeline: wgpu::RenderPipeline, + #[cfg(feature = "debug")] debug: debug::Debug, } @@ -244,6 +245,10 @@ impl State { // The instance is a handle to our GPU // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { + // UPDATED + #[cfg(target_arch="wasm32")] + backends: wgpu::Backends::BROWSER_WEBGPU, + #[cfg(not(target_arch="wasm32"))] backends: wgpu::Backends::all(), dx12_shader_compiler: Default::default(), }); @@ -267,7 +272,8 @@ impl State { &wgpu::DeviceDescriptor { label: None, // UPDATED! - features: wgpu::Features::all_webgpu_mask(), + features: wgpu::Features::empty(), + // UPDATED! limits: wgpu::Limits::downlevel_defaults(), }, None, // Trace path @@ -599,6 +605,7 @@ impl State { ) }; + #[cfg(feature = "debug")] let debug = debug::Debug::new(&device, &camera_bind_group_layout, surface_format); Ok(Self { @@ -630,6 +637,8 @@ impl State { hdr, environment_bind_group, sky_pipeline, + + #[cfg(feature = "debug")] debug, }) } @@ -768,6 +777,7 @@ impl State { // Apply tonemapping self.hdr.process(&mut encoder, &view); + #[cfg(feature = "debug")] { let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { label: Some("Debug"), @@ -792,7 +802,7 @@ impl State { } #[cfg_attr(target_arch = "wasm32", wasm_bindgen(start))] -pub async fn run() -> anyhow::Result<()> { +pub async fn run() { cfg_if::cfg_if! { if #[cfg(target_arch = "wasm32")] { std::panic::set_hook(Box::new(console_error_panic_hook::hook)); @@ -828,7 +838,7 @@ pub async fn run() -> anyhow::Result<()> { .expect("Couldn't append canvas to document body."); } - let mut state = State::new(window).await?; // NEW! + let mut state = State::new(window).await.unwrap(); // NEW! let mut last_render_time = instant::Instant::now(); event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Poll; diff --git a/code/intermediate/tutorial13-hdr/src/light.wgsl b/code/intermediate/tutorial13-hdr/src/light.wgsl index 2723d100..d320f209 100644 --- a/code/intermediate/tutorial13-hdr/src/light.wgsl +++ b/code/intermediate/tutorial13-hdr/src/light.wgsl @@ -5,6 +5,7 @@ struct Camera { view: mat4x4, view_proj: mat4x4, inv_proj: mat4x4, + inv_view: mat4x4, } @group(0) @binding(0) var camera: Camera; diff --git a/code/intermediate/tutorial13-hdr/src/main.rs b/code/intermediate/tutorial13-hdr/src/main.rs index 83550509..c8d2ed96 100644 --- a/code/intermediate/tutorial13-hdr/src/main.rs +++ b/code/intermediate/tutorial13-hdr/src/main.rs @@ -1,5 +1,5 @@ use tutorial13_hdr::run; fn main() { - pollster::block_on(run()).unwrap(); + pollster::block_on(run()); } diff --git a/code/intermediate/tutorial13-hdr/src/resources.rs b/code/intermediate/tutorial13-hdr/src/resources.rs index ec64aed4..e1c3d340 100644 --- a/code/intermediate/tutorial13-hdr/src/resources.rs +++ b/code/intermediate/tutorial13-hdr/src/resources.rs @@ -358,9 +358,10 @@ impl HdrLoader { let mut encoder = device.create_command_encoder(&Default::default()); let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label }); + let num_workgroups = (dst_size + 15) / 16; pass.set_pipeline(&self.equirect_to_cubemap); pass.set_bind_group(0, &bind_group, &[]); - pass.dispatch_workgroups(dst_size, dst_size, 6); + pass.dispatch_workgroups(num_workgroups, num_workgroups, 6); drop(pass); diff --git a/code/intermediate/tutorial13-hdr/src/shader.wgsl b/code/intermediate/tutorial13-hdr/src/shader.wgsl index ec9d8c59..e30625a0 100644 --- a/code/intermediate/tutorial13-hdr/src/shader.wgsl +++ b/code/intermediate/tutorial13-hdr/src/shader.wgsl @@ -133,7 +133,8 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4 { let reflection = textureSample(env_map, env_sampler, world_reflect).rgb; let shininess = 0.1; - let result = (diffuse_color + specular_color) * object_color.xyz + reflection * shininess; + // let result = (diffuse_color + specular_color) * object_color.xyz + reflection * shininess; + let result = reflection; return vec4(result, object_color.a); } \ No newline at end of file diff --git a/code/intermediate/tutorial13-hdr/src/sky.wgsl b/code/intermediate/tutorial13-hdr/src/sky.wgsl index b1bbf239..613227eb 100644 --- a/code/intermediate/tutorial13-hdr/src/sky.wgsl +++ b/code/intermediate/tutorial13-hdr/src/sky.wgsl @@ -29,9 +29,8 @@ fn vs_main( (id >> 1u) & 1u, )); var out: VertexOutput; - // out.clip_position = vec4(uv * vec2(4.0, -4.0) + vec2(-1.0, 1.0), 0.0, 1.0); out.clip_position = vec4(uv * 4.0 - 1.0, 1.0, 1.0); - out.frag_position = vec4(uv * 4.0 - 1.0, 1.0, 1.0); + out.frag_position = out.clip_position; return out; } @@ -40,10 +39,7 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4 { let view_pos_homogeneous = camera.inv_proj * in.clip_position; let view_ray_direction = view_pos_homogeneous.xyz / view_pos_homogeneous.w; var ray_direction = normalize((camera.inv_view * vec4(view_ray_direction, 0.0)).xyz); - // ray_direction.z *= -1.0; - // let sample = vec4(ray_direction, 1.0); let sample = textureSample(env_map, env_sampler, ray_direction); - // let sample = in.clip_position; return sample; } \ No newline at end of file diff --git a/code/intermediate/tutorial13-hdr/src/texture.rs b/code/intermediate/tutorial13-hdr/src/texture.rs index f4971dc9..0826a9c2 100644 --- a/code/intermediate/tutorial13-hdr/src/texture.rs +++ b/code/intermediate/tutorial13-hdr/src/texture.rs @@ -181,15 +181,6 @@ impl Texture { } } -pub enum CubeSide { - PosX = 0, - NegX = 1, - PosY = 2, - NegY = 3, - PosZ = 4, - NegZ = 5, -} - pub struct CubeTexture { texture: wgpu::Texture, sampler: wgpu::Sampler, diff --git a/docs/intermediate/tutorial13-hdr/equirectangular.svg b/docs/intermediate/tutorial13-hdr/equirectangular.svg new file mode 100644 index 00000000..9ccdc85a --- /dev/null +++ b/docs/intermediate/tutorial13-hdr/equirectangular.svg @@ -0,0 +1,118 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Prime Meridian + + + + + + Equator + + + + + + North Pole + + + + + + South Pole + + + + + + North Pole + + + + + + South Pole + + + + \ No newline at end of file diff --git a/docs/intermediate/tutorial13-hdr/figure_work-groups.jpg b/docs/intermediate/tutorial13-hdr/figure_work-groups.jpg new file mode 100644 index 00000000..94699a9a Binary files /dev/null and b/docs/intermediate/tutorial13-hdr/figure_work-groups.jpg differ diff --git a/docs/intermediate/tutorial13-hdr/just-reflections.png b/docs/intermediate/tutorial13-hdr/just-reflections.png new file mode 100644 index 00000000..a8e11204 Binary files /dev/null and b/docs/intermediate/tutorial13-hdr/just-reflections.png differ diff --git a/docs/intermediate/tutorial13-hdr/readme.md b/docs/intermediate/tutorial13-hdr/readme.md index 5b4b9a20..956bfdd5 100644 --- a/docs/intermediate/tutorial13-hdr/readme.md +++ b/docs/intermediate/tutorial13-hdr/readme.md @@ -27,10 +27,18 @@ greater than 1.0 meaning you can have a dynamic range of brighter objects. ## Switching to HDR As of writing, wgpu doesn't allow us to use a floating point format such as -`TextureFormat::Rgba16Float` (not all monitors support that anyways), so we -will have to render our scene in an HDR format, then convert the values to a -supported format such as `TextureFormat::Bgra8UnormSrgb` using a technique -called tonemapping. +`TextureFormat::Rgba16Float` as the surface texture format (not all +monitors support that anyways), so we will have to render our scene in +an HDR format, then convert the values to a supported format such as +`TextureFormat::Bgra8UnormSrgb` using a technique called tonemapping. + +
+ +There are some talks about implementing HDR surface texture support in +wgpu. Here is a github issues if you want to contribute to that +effort: https://github.com/gfx-rs/wgpu/issues/2920 + +
Before we do that though we need to switch to using an HDR texture for rendering. @@ -386,18 +394,966 @@ Here's what it looks like after implementing HDR: ## Loading HDR textures -## Equirectangular textures and cube maps +Now that we have an HDR render buffer, we can start leveraging +HDR textures to their fullest. One of the main uses for HDR +textures is to store lighting information in the form of an +environment map. + +This map can be used to light objects, display reflections and +also to make a skybox. We're going to create a skybox using HDR +texture, but first we need to talk about how environment maps are +stored. + +## Equirectangular textures + +An equirectangluar texture is a texture where a sphere is stretched +across a rectangular surface using what's known as an equirectangular +projection. This map of the Earth is an example of this projection. + +![map of the earth](https://upload.wikimedia.org/wikipedia/commons/thumb/8/83/Equirectangular_projection_SW.jpg/1024px-Equirectangular_projection_SW.jpg) + +This projection maps the latitude values of the sphere to the +horizontal coordinates of the texture. The longitude values get +mapped to the vertical coordinates. This means that the vertical +middle of the texture is the equator (0° longitude) of the sphere, +the horizontal middle is the prime meridian (0° latitude) of the +sphere, the left and right edges of the texture are the anti-meridian +(+180°/-180° latitude) the top and bottom edges of the texture are +the north pole (90° longitude) and south pole (-90° longitude) +respectively. + +![equirectangular diagram](./equirectangular.svg) + +This simple projection is easy to use, leading it to be one of the +most popular projections for storing spherical textures. You can +see the particular environment map we are going to use below. ![equirectangular skybox](./kloofendal_43d_clear_puresky.jpg) +## Cube Maps + +While we technically can use an equirectangular map directly as long +as we do some math to figure out the correct coordinates, it is a lot +more convenient to convert our environment map into a cube map. + +
+ +A cube map is special kind of texture that has 6 layers. Each layer +corresponds to a different face of an imaginary cube that is aligned +to the X, Y and Z axes. The layers are stored in the following order: ++X, -X, +Y, -Y, +Z, -Z. + +
+ +To prepare to store the cube texture, we are going to create +a new struct called `CubeTexture` in `texture.rs`. + +```rust +pub struct CubeTexture { + texture: wgpu::Texture, + sampler: wgpu::Sampler, + view: wgpu::TextureView, +} + +impl CubeTexture { + pub fn create_2d( + device: &wgpu::Device, + width: u32, + height: u32, + format: wgpu::TextureFormat, + mip_level_count: u32, + usage: wgpu::TextureUsages, + mag_filter: wgpu::FilterMode, + label: Option<&str>, + ) -> Self { + let texture = device.create_texture(&wgpu::TextureDescriptor { + label, + size: wgpu::Extent3d { + width, + height, + // A cube has 6 sides, so we need 6 layers + depth_or_array_layers: 6, + }, + mip_level_count, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format, + usage, + view_formats: &[], + }); + + let view = texture.create_view(&wgpu::TextureViewDescriptor { + label, + dimension: Some(wgpu::TextureViewDimension::Cube), + array_layer_count: Some(6), + ..Default::default() + }); + + let sampler = device.create_sampler(&wgpu::SamplerDescriptor { + label, + address_mode_u: wgpu::AddressMode::ClampToEdge, + address_mode_v: wgpu::AddressMode::ClampToEdge, + address_mode_w: wgpu::AddressMode::ClampToEdge, + mag_filter, + min_filter: wgpu::FilterMode::Nearest, + mipmap_filter: wgpu::FilterMode::Nearest, + ..Default::default() + }); + + Self { + texture, + sampler, + view, + } + } + + pub fn texture(&self) -> &wgpu::Texture { &self.texture } + + pub fn view(&self) -> &wgpu::TextureView { &self.view } + + pub fn sampler(&self) -> &wgpu::Sampler { &self.sampler } + +} +``` + +With this we can now write the code to load the HDR into +a cube texture. + ## Compute shaders +Up to this point we've been exclusively using render +pipelines, but I felt this was a good time to introduce +compute pipelines and by extension compute shaders. Compute +pipelines are a lot easier to setup. All you need is to tell +the pipeline what resources you want to use, what code you +want to run, and how many threads you'd like the GPU to use +when running your code. We're going to use a compute shader +to give each pixel in our cube textue a color from the +HDR image. + +Before we can use compute shaders, we need to enable them +in wgpu. We can do that just need to change the line where +we specify what features we want to use. In `lib.rs`, change +the code where we request a device: + +```rust +let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + // UPDATED! + features: wgpu::Features::all_webgpu_mask(), + // UPDATED! + limits: wgpu::Limits::downlevel_defaults(), + }, + None, // Trace path + ) + .await + .unwrap(); +``` + +
+ +You may have noted that we have switched from +`downlevel_webgl2_defaults()` to `downlevel_defaults()`. +This means that we are dropping support for WebGL2. The +reason for this is that WebGL2 doesn't support compute +shaders. WebGPU was built with compute shaders in mind. As +of writing the only browser that supports WebGPU is Chrome, +and some experimental browsers such as Firefox Nightly. + +Consequently we are going to remove the webgl feature from +`Cargo.toml`. This line in particular: + +```toml +wgpu = { version = "0.17", features = ["webgl"]} +``` + +
+ +Now that we've told wgpu that we want to use compute +shaders, let's create a struct in `resource.rs` that we'll +use to load the HDR image into our cube map. + +```rust +pub struct HdrLoader { + texture_format: wgpu::TextureFormat, + equirect_layout: wgpu::BindGroupLayout, + equirect_to_cubemap: wgpu::ComputePipeline, +} + +impl HdrLoader { + pub fn new(device: &wgpu::Device) -> Self { + let module = device.create_shader_module(wgpu::include_wgsl!("equirectangular.wgsl")); + let texture_format = wgpu::TextureFormat::Rgba32Float; + let equirect_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: Some("HdrLoader::equirect_layout"), + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::COMPUTE, + ty: wgpu::BindingType::Texture { + sample_type: wgpu::TextureSampleType::Float { filterable: false }, + view_dimension: wgpu::TextureViewDimension::D2, + multisampled: false, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::COMPUTE, + ty: wgpu::BindingType::StorageTexture { + access: wgpu::StorageTextureAccess::WriteOnly, + format: texture_format, + view_dimension: wgpu::TextureViewDimension::D2Array, + }, + count: None, + }, + ], + }); + + let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: None, + bind_group_layouts: &[&equirect_layout], + push_constant_ranges: &[], + }); + + let equirect_to_cubemap = + device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor { + label: Some("equirect_to_cubemap"), + layout: Some(&pipeline_layout), + module: &module, + entry_point: "compute_equirect_to_cubemap", + }); + + Self { + equirect_to_cubemap, + texture_format, + equirect_layout, + } + } + + pub fn from_equirectangular_bytes( + &self, + device: &wgpu::Device, + queue: &wgpu::Queue, + data: &[u8], + dst_size: u32, + label: Option<&str>, + ) -> anyhow::Result { + let hdr_decoder = HdrDecoder::new(Cursor::new(data))?; + let meta = hdr_decoder.metadata(); + let mut pixels = vec![[0.0, 0.0, 0.0, 0.0]; meta.width as usize * meta.height as usize]; + hdr_decoder.read_image_transform( + |pix| { + // There's no Rgb32Float format, so we need + // an extra float + let rgb = pix.to_hdr(); + [rgb.0[0], rgb.0[1], rgb.0[2], 1.0f32] + }, + &mut pixels[..], + )?; + + let src = texture::Texture::create_2d_texture( + device, + meta.width, + meta.height, + self.texture_format, + wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, + wgpu::FilterMode::Linear, + None, + ); + + queue.write_texture( + wgpu::ImageCopyTexture { + texture: &src.texture, + mip_level: 0, + origin: wgpu::Origin3d::ZERO, + aspect: wgpu::TextureAspect::All, + }, + &bytemuck::cast_slice(&pixels), + wgpu::ImageDataLayout { + offset: 0, + bytes_per_row: Some(src.size.width * std::mem::size_of::<[f32; 4]>() as u32), + rows_per_image: Some(src.size.height), + }, + src.size, + ); + + let dst = texture::CubeTexture::create_2d( + device, + dst_size, + dst_size, + self.texture_format, + 1, + // We are going to write to `dst` texture so we + // need to use a `STORAGE_BINDING`. + wgpu::TextureUsages::STORAGE_BINDING + | wgpu::TextureUsages::TEXTURE_BINDING, + wgpu::FilterMode::Nearest, + label, + ); + + let dst_view = dst.texture().create_view(&wgpu::TextureViewDescriptor { + label, + // Normally you'd use `TextureViewDimension::Cube` + // for a cube texture, but we can't use that + // view dimension with a `STORAGE_BINDING`. + // We need to access the cube texure layers + // directly. + dimension: Some(wgpu::TextureViewDimension::D2Array), + ..Default::default() + }); + + let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + label, + layout: &self.equirect_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&src.view), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::TextureView(&dst_view), + }, + ], + }); + + let mut encoder = device.create_command_encoder(&Default::default()); + let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label }); + + let num_workgroups = (dst_size + 15) / 16; + pass.set_pipeline(&self.equirect_to_cubemap); + pass.set_bind_group(0, &bind_group, &[]); + pass.dispatch_workgroups(num_workgroups, num_workgroups, 6); + + drop(pass); + + queue.submit([encoder.finish()]); + + Ok(dst) + } +} +``` + +The `dispatch_workgroups` call tells the gpu to run our +code in batchs called workgroups. Each workgroup has a +number of worker threads called invocations that run the +code in parallel. Workgroups are organized as a 3d grid +with the dimensions we pass to `dispatch_workgroups`. + +In this example we have a workgroup grid divided into 16x16 +chunks and storing the layer in z dimension. + +## The compute shader + +Now let's write a compute shader that will convert +our equirectangular texture to a cube texture. Create a file +called `equirectangular.wgsl`. We're going to break it down +chunk by chunk. + +```wgsl +const PI: f32 = 3.1415926535897932384626433832795; + +struct Face { + forward: vec3, + up: vec3, + right: vec3, +} +``` + +Two things here: + +1. wgsl doesn't have a builtin for PI so we need to specify + it ourselves. +2. each face of the cube map has an orientation to it, so we + need to store that. + +```wgsl +@group(0) +@binding(0) +var src: texture_2d; + +@group(0) +@binding(1) +var dst: texture_storage_2d_array; +``` + +Here we have the only two bindings we need. The equirectangular +`src` texture and our `dst` cube texture. Some things to note: +about `dst`: + +1. While `dst` is a cube texture, it's stored as a array of + 2d textures. +2. The type of binding we're using here is a storage texture. + An array storage texture to be precise. This is a unique + binding only available to compute shaders. It allows us + to directly write to the texture. +3. When using a storage texture binding we need to specify the + format of the texture. If you try to bind a texture with + a different format, wgpu will panic. + +```wgsl +@compute +@workgroup_size(16, 16, 1) +fn compute_equirect_to_cubemap( + @builtin(global_invocation_id) + gid: vec3, +) { + // If texture size is not divisible by 32 we + // need to make sure we don't try to write to + // pixels that don't exist. + if gid.x >= u32(textureDimensions(dst).x) { + return; + } + + var FACES: array = array( + // FACES +X + Face( + vec3(1.0, 0.0, 0.0), // forward + vec3(0.0, 1.0, 0.0), // up + vec3(0.0, 0.0, -1.0), // right + ), + // FACES -X + Face ( + vec3(-1.0, 0.0, 0.0), + vec3(0.0, 1.0, 0.0), + vec3(0.0, 0.0, 1.0), + ), + // FACES +Y + Face ( + vec3(0.0, -1.0, 0.0), + vec3(0.0, 0.0, 1.0), + vec3(1.0, 0.0, 0.0), + ), + // FACES -Y + Face ( + vec3(0.0, 1.0, 0.0), + vec3(0.0, 0.0, -1.0), + vec3(1.0, 0.0, 0.0), + ), + // FACES +Z + Face ( + vec3(0.0, 0.0, 1.0), + vec3(0.0, 1.0, 0.0), + vec3(1.0, 0.0, 0.0), + ), + // FACES -Z + Face ( + vec3(0.0, 0.0, -1.0), + vec3(0.0, 1.0, 0.0), + vec3(-1.0, 0.0, 0.0), + ), + ); + + // Get texture coords relative to cubemap face + let dst_dimensions = vec2(textureDimensions(dst)); + let cube_uv = vec2(gid.xy) / dst_dimensions * 2.0 - 1.0; + + // Get spherical coordinate from cube_uv + let face = FACES[gid.z]; + let spherical = normalize(face.forward + face.right * cube_uv.x + face.up * cube_uv.y); + + // Get coordinate on the equirectangular texture + let inv_atan = vec2(0.1591, 0.3183); + let eq_uv = vec2(atan2(spherical.z, spherical.x), asin(spherical.y)) * inv_atan + 0.5; + let eq_pixel = vec2(eq_uv * vec2(textureDimensions(src))); + + // We use textureLoad() as textureSample() is not allowed in compute shaders + var sample = textureLoad(src, eq_pixel, 0); + + textureStore(dst, gid.xy, gid.z, sample); +} +``` + +While I commented some the previous code, there are some +things I want to go over that wouldn't fit well in a +comment. + +The `workgroup_size` decorator tells the dimensions of the +workgroup's local grid of invocations. Because we are +dispatching one workgroup for every pixel in the texture, +we have each workgroup be a 16x16x1 grid. This means that each workgroup can have 256 threads to work with. + +
+ +For Webgpu each workgroup can only have a max of 256 threads (also +called invocations). + +
+ +With this we can load the environment map in the `new()` function: + +```rust +let hdr_loader = resources::HdrLoader::new(&device); +let sky_bytes = resources::load_binary("pure-sky.hdr").await?; +let sky_texture = hdr_loader.from_equirectangular_bytes( + &device, + &queue, + &sky_bytes, + 1080, + Some("Sky Texture"), +)?; +``` + ## Skybox -![debugging skybox](./debugging-skybox.png) +No that we have an environment map to render. Let's use +it to make our skybox. There are different ways to render +a skybox. A standard way is to render a cube and map the +environment map on it. While that method works, it can +have some artifacts in the corners and edges where the +cubes faces meet. + +Instead we are going to render to the entire screen and +compute the view direction from each pixel, and use that +to sample the texture. First though we need to create a +bindgroup for the environment map so that we can use it +for rendering. Add the following to `new()`: + +```rust +let environment_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: Some("environment_layout"), + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + sample_type: wgpu::TextureSampleType::Float { filterable: false }, + view_dimension: wgpu::TextureViewDimension::Cube, + multisampled: false, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::NonFiltering), + count: None, + }, + ], + }); + +let environment_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + label: Some("environment_bind_group"), + layout: &environment_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&sky_texture.view()), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(sky_texture.sampler()), + }, + ], +}); +``` + +Now that we have the bindgroup, we need a render pipeline +to render the skybox. + +```rust +// NEW! +let sky_pipeline = { + let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Sky Pipeline Layout"), + bind_group_layouts: &[&camera_bind_group_layout, &environment_layout], + push_constant_ranges: &[], + }); + let shader = wgpu::include_wgsl!("sky.wgsl"); + create_render_pipeline( + &device, + &layout, + hdr.format(), + Some(texture::Texture::DEPTH_FORMAT), + &[], + wgpu::PrimitiveTopology::TriangleList, + shader, + ) +}; +``` + +One thing to not here. We added the primitive format to +`create_render_pipeline()`. Also we changed the depth compare +function to `CompareFunction::LessEqual` (we'll discuss why when +we go over the sky shader). Here's the changes to that: + +```rust + +fn create_render_pipeline( + device: &wgpu::Device, + layout: &wgpu::PipelineLayout, + color_format: wgpu::TextureFormat, + depth_format: Option, + vertex_layouts: &[wgpu::VertexBufferLayout], + topology: wgpu::PrimitiveTopology, // NEW! + shader: wgpu::ShaderModuleDescriptor, +) -> wgpu::RenderPipeline { + let shader = device.create_shader_module(shader); + + device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + // ... + primitive: wgpu::PrimitiveState { + topology, // NEW! + // ... + }, + depth_stencil: depth_format.map(|format| wgpu::DepthStencilState { + format, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::LessEqual, // UDPATED! + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }), + // ... + }) +} +``` + +Don't forget to add the new bindgroup and pipeline to the +to `State`. + +```rust +struct State { + // ... + // NEW! + hdr: hdr::HdrPipeline, + environment_bind_group: wgpu::BindGroup, + sky_pipeline: wgpu::RenderPipeline, +} + +// ... +impl State { + async fn new(window: Window) -> anyhow::Result { + // ... + Ok(Self { + // ... + // NEW! + hdr, + environment_bind_group, + sky_pipeline, + debug, + }) + } +} +``` + +Now let's cover `sky.wgsl`. + +```wgsl +struct Camera { + view_pos: vec4, + view: mat4x4, + view_proj: mat4x4, + inv_proj: mat4x4, + inv_view: mat4x4, +} +@group(0) @binding(0) +var camera: Camera; + +@group(1) +@binding(0) +var env_map: texture_cube; +@group(1) +@binding(1) +var env_sampler: sampler; + +struct VertexOutput { + @builtin(position) frag_position: vec4, + @location(0) clip_position: vec4, +} + +@vertex +fn vs_main( + @builtin(vertex_index) id: u32, +) -> VertexOutput { + let uv = vec2(vec2( + id & 1u, + (id >> 1u) & 1u, + )); + var out: VertexOutput; + // out.clip_position = vec4(uv * vec2(4.0, -4.0) + vec2(-1.0, 1.0), 0.0, 1.0); + out.clip_position = vec4(uv * 4.0 - 1.0, 1.0, 1.0); + out.frag_position = vec4(uv * 4.0 - 1.0, 1.0, 1.0); + return out; +} + +@fragment +fn fs_main(in: VertexOutput) -> @location(0) vec4 { + let view_pos_homogeneous = camera.inv_proj * in.clip_position; + let view_ray_direction = view_pos_homogeneous.xyz / view_pos_homogeneous.w; + var ray_direction = normalize((camera.inv_view * vec4(view_ray_direction, 0.0)).xyz); + + let sample = textureSample(env_map, env_sampler, ray_direction); + return sample; +} +``` + +Let's break this down: + +1. We create a triangle twice the size of the screen. +2. In the fragment shader we get the view direction from + the clip position. We use the inverse projection + matrix to get convert the clip coordinates to view + direction. Then we use the inverse view matrix to + get the direction into world space as that's what we + need for to sample the sky box correctly. +3. We then sample the sky texture with the view direction. + + + +In order for this to work we need to change our camera +uniforms a bit. We need to add the inverse view matrix, +and inverse projection matrix to `CameraUniform` struct. + +```rust +#[repr(C)] +#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct CameraUniform { + view_position: [f32; 4], + view: [[f32; 4]; 4], // NEW! + view_proj: [[f32; 4]; 4], + inv_proj: [[f32; 4]; 4], // NEW! + inv_view: [[f32; 4]; 4], // NEW! +} + +impl CameraUniform { + fn new() -> Self { + Self { + view_position: [0.0; 4], + view: cgmath::Matrix4::identity().into(), + view_proj: cgmath::Matrix4::identity().into(), + inv_proj: cgmath::Matrix4::identity().into(), // NEW! + inv_view: cgmath::Matrix4::identity().into(), // NEW! + } + } + + // UPDATED! + fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) { + self.view_position = camera.position.to_homogeneous().into(); + let proj = projection.calc_matrix(); + let view = camera.calc_matrix(); + let view_proj = proj * view; + self.view = view.into(); + self.view_proj = view_proj.into(); + self.inv_proj = proj.invert().unwrap().into(); + self.inv_view = view.transpose().into(); + } +} +``` + +Make sure to change the `Camera` definition in +`shader.wgsl`, and `light.wgsl`. Just as a reminder +it looks like this: + +```wgsl +struct Camera { + view_pos: vec4, + view: mat4x4, + view_proj: mat4x4, + inv_proj: mat4x4, + inv_view: mat4x4, +} +var camera: Camera; +``` + +
+ +You may have noticed that we removed the `OPENGL_TO_WGPU_MATRIX`. The reason for this is +that it was messing with the projection of the +skybox. ![projection error](./project-error.png) +It wasn't technically needed, so I felt fine +removing it. + +
+ ## Reflections -![with-reflections](./with-reflections.png) \ No newline at end of file +Now that we have a sky, we can mess around with +using it for lighting. This won't be physically +accurate (we'll look into that later). That being +said, we have the environment map, we might as +well use it. + +In order to do that though we need to change our +shader to do lighting in world space instead of +tangent space because our environment map is in +world space. Because there are a lot of changes +I'll post the whole shader here: + +```wgsl +// Vertex shader + +struct Camera { + view_pos: vec4, + view: mat4x4, + view_proj: mat4x4, + inv_proj: mat4x4, + inv_view: mat4x4, +} +@group(0) @binding(0) +var camera: Camera; + +struct Light { + position: vec3, + color: vec3, +} +@group(2) @binding(0) +var light: Light; + +struct VertexInput { + @location(0) position: vec3, + @location(1) tex_coords: vec2, + @location(2) normal: vec3, + @location(3) tangent: vec3, + @location(4) bitangent: vec3, +} +struct InstanceInput { + @location(5) model_matrix_0: vec4, + @location(6) model_matrix_1: vec4, + @location(7) model_matrix_2: vec4, + @location(8) model_matrix_3: vec4, + @location(9) normal_matrix_0: vec3, + @location(10) normal_matrix_1: vec3, + @location(11) normal_matrix_2: vec3, +} + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) tex_coords: vec2, + // Updated! + @location(1) world_position: vec3, + @location(2) world_view_position: vec3, + @location(3) world_light_position: vec3, + @location(4) world_normal: vec3, + @location(5) world_tangent: vec3, + @location(6) world_bitangent: vec3, +} + +@vertex +fn vs_main( + model: VertexInput, + instance: InstanceInput, +) -> VertexOutput { + let model_matrix = mat4x4( + instance.model_matrix_0, + instance.model_matrix_1, + instance.model_matrix_2, + instance.model_matrix_3, + ); + let normal_matrix = mat3x3( + instance.normal_matrix_0, + instance.normal_matrix_1, + instance.normal_matrix_2, + ); + + // UPDATED! + let world_position = model_matrix * vec4(model.position, 1.0); + + var out: VertexOutput; + out.clip_position = camera.view_proj * world_position; + out.tex_coords = model.tex_coords; + out.world_normal = normalize(normal_matrix * model.normal); + out.world_tangent = normalize(normal_matrix * model.tangent); + out.world_bitangent = normalize(normal_matrix * model.bitangent); + out.world_position = world_position.xyz; + out.world_view_position = camera.view_pos.xyz; + return out; +} + +// Fragment shader + +@group(0) @binding(0) +var t_diffuse: texture_2d; +@group(0)@binding(1) +var s_diffuse: sampler; +@group(0)@binding(2) +var t_normal: texture_2d; +@group(0) @binding(3) +var s_normal: sampler; + +@group(3) +@binding(0) +var env_map: texture_cube; +@group(3) +@binding(1) +var env_sampler: sampler; + +@fragment +fn fs_main(in: VertexOutput) -> @location(0) vec4 { + let object_color: vec4 = textureSample(t_diffuse, s_diffuse, in.tex_coords); + let object_normal: vec4 = textureSample(t_normal, s_normal, in.tex_coords); + + // NEW! + // Adjust the tangent and bitangent using the Gramm-Schmidt process + // This makes sure that they are perpedicular to each other and the + // normal of the surface. + let world_tangent = normalize(in.world_tangent - dot(in.world_tangent, in.world_normal) * in.world_normal); + let world_bitangent = cross(world_tangent, in.world_normal); + + // Convert the normal sample to world space + let TBN = mat3x3( + world_tangent, + world_bitangent, + in.world_normal, + ); + let tangent_normal = object_normal.xyz * 2.0 - 1.0; + let world_normal = TBN * tangent_normal; + + // Create the lighting vectors + let light_dir = normalize(light.position - in.world_position); + let view_dir = normalize(in.world_view_position - in.world_position); + let half_dir = normalize(view_dir + light_dir); + + let diffuse_strength = max(dot(world_normal, light_dir), 0.0); + let diffuse_color = light.color * diffuse_strength; + + let specular_strength = pow(max(dot(world_normal, half_dir), 0.0), 32.0); + let specular_color = specular_strength * light.color; + + // NEW! + // Calculate reflections + let world_reflect = reflect(-view_dir, world_normal); + let reflection = textureSample(env_map, env_sampler, world_reflect).rgb; + let shininess = 0.1; + + let result = (diffuse_color + specular_color) * object_color.xyz + reflection * shininess; + + return vec4(result, object_color.a); +} +``` + +A little note on the reflection math. The `view_dir` +gives us the direction to the camera from the surface. +The reflection math needs the direction from the +camera to the surface so we negate `view_dir`. We +then use `wgsl`'s builtin `reflect` function to +reflect the inverted `view_dir` about the `world_normal`. +This gives us a direction that we can use sample the +environment map to get the color of the sky in that +direction. Just looking at the reflection component +gives us the following: + +![just-reflections](./just-reflections.png) + +Here's the finished scene: + +![with-reflections](./with-reflections.png) + +## Demo + +
+ +If your browser doesn't support WebGPU, this example +won't work for you. + +
+ + + + \ No newline at end of file diff --git a/wasm-targets.json b/wasm-targets.json index f10cf240..dd720410 100644 --- a/wasm-targets.json +++ b/wasm-targets.json @@ -47,6 +47,10 @@ "package": "tutorial12-camera", "out": "docs/.vuepress/components/wasm/tutorial12_camera" }, + { + "package": "tutorial13-hdr", + "out": "docs/.vuepress/components/wasm/tutorial13_hdr" + }, { "package": "pong", "out": "docs/.vuepress/components/wasm/pong"