diff --git a/code/beginner/tutorial8-depth/src/texture.rs b/code/beginner/tutorial8-depth/src/texture.rs index 6609465b..c09a098b 100644 --- a/code/beginner/tutorial8-depth/src/texture.rs +++ b/code/beginner/tutorial8-depth/src/texture.rs @@ -10,6 +10,7 @@ pub struct Texture { impl Texture { pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; + #[allow(unused)] pub fn create_depth_texture( device: &wgpu::Device, config: &wgpu::SurfaceConfiguration, diff --git a/code/intermediate/tutorial13-hdr/src/hdr.rs b/code/intermediate/tutorial13-hdr/src/hdr.rs index 337e357f..53011727 100644 --- a/code/intermediate/tutorial13-hdr/src/hdr.rs +++ b/code/intermediate/tutorial13-hdr/src/hdr.rs @@ -79,7 +79,7 @@ impl HdrPipeline { let pipeline = create_render_pipeline( device, &pipeline_layout, - config.format, + config.format.add_srgb_suffix(), None, &[], wgpu::PrimitiveTopology::TriangleList, diff --git a/code/intermediate/tutorial13-hdr/src/lib.rs b/code/intermediate/tutorial13-hdr/src/lib.rs index d4f8e899..91ea6fb6 100644 --- a/code/intermediate/tutorial13-hdr/src/lib.rs +++ b/code/intermediate/tutorial13-hdr/src/lib.rs @@ -295,7 +295,8 @@ impl<'a> State<'a> { height: size.height, present_mode: surface_caps.present_modes[0], alpha_mode: surface_caps.alpha_modes[0], - view_formats: vec![], + // NEW! + view_formats: vec![surface_format.add_srgb_suffix()], desired_maximum_frame_latency: 2, }; @@ -714,7 +715,10 @@ impl<'a> State<'a> { let output = self.surface.get_current_texture()?; let view = output .texture - .create_view(&wgpu::TextureViewDescriptor::default()); + .create_view(&wgpu::TextureViewDescriptor { + format: Some(self.config.format.add_srgb_suffix()), + ..Default::default() + }); let mut encoder = self .device diff --git a/code/intermediate/tutorial13-hdr/src/resources.rs b/code/intermediate/tutorial13-hdr/src/resources.rs index 21f831aa..df9a4f6b 100644 --- a/code/intermediate/tutorial13-hdr/src/resources.rs +++ b/code/intermediate/tutorial13-hdr/src/resources.rs @@ -284,10 +284,9 @@ impl HdrLoader { dst_size: u32, label: Option<&str>, ) -> anyhow::Result { - log::info!("creating decoder"); let hdr_decoder = HdrDecoder::new(Cursor::new(data))?; let meta = hdr_decoder.metadata(); - log::info!("reading image"); + #[cfg(not(target_arch="wasm32"))] let pixels = { let mut pixels = vec![[0.0, 0.0, 0.0, 0.0]; meta.width as usize * meta.height as usize]; @@ -309,7 +308,6 @@ impl HdrLoader { }) .collect::>(); - log::info!("creating texture"); let src = texture::Texture::create_2d_texture( device, meta.width, @@ -320,7 +318,6 @@ impl HdrLoader { None, ); - log::info!("writing texture"); queue.write_texture( wgpu::ImageCopyTexture { texture: &src.texture, diff --git a/docs/intermediate/tutorial13-hdr/readme.md b/docs/intermediate/tutorial13-hdr/readme.md index 24887d5e..31f0b92c 100644 --- a/docs/intermediate/tutorial13-hdr/readme.md +++ b/docs/intermediate/tutorial13-hdr/readme.md @@ -105,7 +105,7 @@ impl HdrPipeline { let pipeline = create_render_pipeline( device, &pipeline_layout, - config.format, + config.format.add_srgb_suffix(), None, // We'll use some math to generate the vertex data in // the shader, so we don't need any vertex buffers @@ -567,16 +567,27 @@ impl HdrLoader { ) -> anyhow::Result { let hdr_decoder = HdrDecoder::new(Cursor::new(data))?; let meta = hdr_decoder.metadata(); - let mut pixels = vec![[0.0, 0.0, 0.0, 0.0]; meta.width as usize * meta.height as usize]; - hdr_decoder.read_image_transform( - |pix| { - // There's no Rgb32Float format, so we need - // an extra float + + #[cfg(not(target_arch="wasm32"))] + let pixels = { + let mut pixels = vec![[0.0, 0.0, 0.0, 0.0]; meta.width as usize * meta.height as usize]; + hdr_decoder.read_image_transform( + |pix| { + let rgb = pix.to_hdr(); + [rgb.0[0], rgb.0[1], rgb.0[2], 1.0f32] + }, + &mut pixels[..], + )?; + pixels + }; + #[cfg(target_arch="wasm32")] + let pixels = hdr_decoder.read_image_native()? + .into_iter() + .map(|pix| { let rgb = pix.to_hdr(); [rgb.0[0], rgb.0[1], rgb.0[2], 1.0f32] - }, - &mut pixels[..], - )?; + }) + .collect::>(); let src = texture::Texture::create_2d_texture( device, @@ -1201,6 +1212,48 @@ Here's the finished scene: ![with-reflections](./with-reflections.png) +## Output too dark on WebGPU? + +WebGPU doesn't support using sRGB texture formats as the +output for a surface. We can get around this by making the +texture view used to render use the sRGB version of the +format. To do this we need to change the surface config +we use to allow view formats with sRGB. + +```rust +let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface_format, + width: size.width, + height: size.height, + present_mode: surface_caps.present_modes[0], + alpha_mode: surface_caps.alpha_modes[0], + // NEW! + view_formats: vec![surface_format.add_srgb_suffix()], + desired_maximum_frame_latency: 2, +}; +``` + +Then we need to create a view with sRGB enabled in +`State::render()`. + +```rust +let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor { + format: Some(self.config.format.add_srgb_suffix()), + ..Default::default() + }); +``` + +You may have noticed as well that in `HdrPipeline::new()` +we use `config.format.add_srgb_suffix()` when creating +the render pipeline. This is required as if we don't +the sRGB enabled `TextureView` won't work with the +render pipeline. + +With that you should get the sRGB output as expected. + ## Demo