From 59f0ec16f0749056226a06622614acbe576d1a7a Mon Sep 17 00:00:00 2001 From: Ben Hansen Date: Thu, 30 Dec 2021 12:47:05 -0700 Subject: [PATCH] migrated tutorial to use lib.r --- code/beginner/tutorial1-window/src/lib.rs | 31 + code/beginner/tutorial1-window/src/main.rs | 32 +- code/beginner/tutorial2-surface/src/lib.rs | 178 ++++ code/beginner/tutorial2-surface/src/main.rs | 179 +--- code/beginner/tutorial3-pipeline/src/lib.rs | 237 ++++++ code/beginner/tutorial3-pipeline/src/main.rs | 238 +----- code/beginner/tutorial4-buffer/src/lib.rs | 311 +++++++ code/beginner/tutorial4-buffer/src/main.rs | 63 +- code/beginner/tutorial5-textures/src/lib.rs | 361 ++++++++ code/beginner/tutorial5-textures/src/main.rs | 63 +- code/beginner/tutorial6-uniforms/src/lib.rs | 570 +++++++++++++ code/beginner/tutorial6-uniforms/src/main.rs | 63 +- code/beginner/tutorial7-instancing/src/lib.rs | 668 +++++++++++++++ .../beginner/tutorial7-instancing/src/main.rs | 63 +- code/beginner/tutorial8-depth/src/lib.rs | 694 +++++++++++++++ code/beginner/tutorial8-depth/src/main.rs | 62 +- code/beginner/tutorial9-models/src/lib.rs | 605 +++++++++++++ code/beginner/tutorial9-models/src/main.rs | 594 +------------ .../tutorial10-lighting/src/lib.rs | 744 ++++++++++++++++ .../tutorial10-lighting/src/main.rs | 733 +--------------- .../tutorial11-normals/src/lib.rs | 793 ++++++++++++++++++ .../tutorial11-normals/src/main.rs | 782 +---------------- .../intermediate/tutorial12-camera/src/lib.rs | 710 ++++++++++++++++ .../tutorial12-camera/src/main.rs | 711 +--------------- .../tutorial13-threading/src/lib.rs | 703 ++++++++++++++++ .../tutorial13-threading/src/main.rs | 704 +--------------- 26 files changed, 6659 insertions(+), 4233 deletions(-) create mode 100644 code/beginner/tutorial1-window/src/lib.rs create mode 100644 code/beginner/tutorial2-surface/src/lib.rs create mode 100644 code/beginner/tutorial3-pipeline/src/lib.rs create mode 100644 code/beginner/tutorial4-buffer/src/lib.rs create mode 100644 code/beginner/tutorial5-textures/src/lib.rs create mode 100644 code/beginner/tutorial6-uniforms/src/lib.rs create mode 100644 code/beginner/tutorial7-instancing/src/lib.rs create mode 100644 code/beginner/tutorial8-depth/src/lib.rs create mode 100644 code/beginner/tutorial9-models/src/lib.rs create mode 100644 code/intermediate/tutorial10-lighting/src/lib.rs create mode 100644 code/intermediate/tutorial11-normals/src/lib.rs create mode 100644 code/intermediate/tutorial12-camera/src/lib.rs create mode 100644 code/intermediate/tutorial13-threading/src/lib.rs diff --git a/code/beginner/tutorial1-window/src/lib.rs b/code/beginner/tutorial1-window/src/lib.rs new file mode 100644 index 00000000..2c75ff10 --- /dev/null +++ b/code/beginner/tutorial1-window/src/lib.rs @@ -0,0 +1,31 @@ +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::WindowBuilder, +}; + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let window = WindowBuilder::new().build(&event_loop).unwrap(); + + event_loop.run(move |event, _, control_flow| match event { + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + _ => {} + }, + _ => {} + }); +} diff --git a/code/beginner/tutorial1-window/src/main.rs b/code/beginner/tutorial1-window/src/main.rs index aa0be122..7d702ffd 100644 --- a/code/beginner/tutorial1-window/src/main.rs +++ b/code/beginner/tutorial1-window/src/main.rs @@ -1,31 +1,5 @@ -use winit::{ - event::*, - event_loop::{ControlFlow, EventLoop}, - window::WindowBuilder, -}; +use tutorial1_window::run; fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let window = WindowBuilder::new().build(&event_loop).unwrap(); - - event_loop.run(move |event, _, control_flow| match event { - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - _ => {} - }, - _ => {} - }); -} + run(); +} \ No newline at end of file diff --git a/code/beginner/tutorial2-surface/src/lib.rs b/code/beginner/tutorial2-surface/src/lib.rs new file mode 100644 index 00000000..14d94d6b --- /dev/null +++ b/code/beginner/tutorial2-surface/src/lib.rs @@ -0,0 +1,178 @@ +use std::iter; + +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::{Window, WindowBuilder}, +}; + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + size: winit::dpi::PhysicalSize, +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + // Some(&std::path::Path::new("trace")), // Trace path + None, + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + surface.configure(&device, &config); + + Self { + surface, + device, + queue, + config, + size, + } + } + + pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + } + } + + #[allow(unused_variables)] + fn input(&mut self, event: &WindowEvent) -> bool { + false + } + + fn update(&mut self) {} + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let _render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: None, + }); + } + + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let window = WindowBuilder::new().build(&event_loop).unwrap(); + + // State::new uses async code, so we're going to wait for it to finish + let mut state: State = pollster::block_on(State::new(&window)); + + event_loop.run(move |event, _, control_flow| { + match event { + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + if !state.input(event) { + // UPDATED! + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + // new_inner_size is &&mut so w have to dereference it twice + state.resize(**new_inner_size); + } + _ => {} + } + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + state.update(); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + Event::RedrawEventsCleared => { + // RedrawRequested will only trigger once, unless we manually + // request it. + window.request_redraw(); + } + _ => {} + } + }); +} diff --git a/code/beginner/tutorial2-surface/src/main.rs b/code/beginner/tutorial2-surface/src/main.rs index 8763e236..f7c6aaba 100644 --- a/code/beginner/tutorial2-surface/src/main.rs +++ b/code/beginner/tutorial2-surface/src/main.rs @@ -1,178 +1,5 @@ -use std::iter; - -use winit::{ - event::*, - event_loop::{ControlFlow, EventLoop}, - window::{Window, WindowBuilder}, -}; - -struct State { - surface: wgpu::Surface, - device: wgpu::Device, - queue: wgpu::Queue, - config: wgpu::SurfaceConfiguration, - size: winit::dpi::PhysicalSize, -} - -impl State { - async fn new(window: &Window) -> Self { - let size = window.inner_size(); - - // The instance is a handle to our GPU - // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU - let instance = wgpu::Instance::new(wgpu::Backends::all()); - let surface = unsafe { instance.create_surface(window) }; - let adapter = instance - .request_adapter(&wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::default(), - compatible_surface: Some(&surface), - force_fallback_adapter: false, - }) - .await - .unwrap(); - - let (device, queue) = adapter - .request_device( - &wgpu::DeviceDescriptor { - label: None, - features: wgpu::Features::empty(), - limits: wgpu::Limits::default(), - }, - // Some(&std::path::Path::new("trace")), // Trace path - None, - ) - .await - .unwrap(); - - let config = wgpu::SurfaceConfiguration { - usage: wgpu::TextureUsages::RENDER_ATTACHMENT, - format: surface.get_preferred_format(&adapter).unwrap(), - width: size.width, - height: size.height, - present_mode: wgpu::PresentMode::Fifo, - }; - surface.configure(&device, &config); - - Self { - surface, - device, - queue, - config, - size, - } - } - - pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { - if new_size.width > 0 && new_size.height > 0 { - self.size = new_size; - self.config.width = new_size.width; - self.config.height = new_size.height; - self.surface.configure(&self.device, &self.config); - } - } - - #[allow(unused_variables)] - fn input(&mut self, event: &WindowEvent) -> bool { - false - } - - fn update(&mut self) {} - - fn render(&mut self) -> Result<(), wgpu::SurfaceError> { - let output = self.surface.get_current_texture()?; - let view = output - .texture - .create_view(&wgpu::TextureViewDescriptor::default()); - - let mut encoder = self - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor { - label: Some("Render Encoder"), - }); - - { - let _render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("Render Pass"), - color_attachments: &[wgpu::RenderPassColorAttachment { - view: &view, - resolve_target: None, - ops: wgpu::Operations { - load: wgpu::LoadOp::Clear(wgpu::Color { - r: 0.1, - g: 0.2, - b: 0.3, - a: 1.0, - }), - store: true, - }, - }], - depth_stencil_attachment: None, - }); - } - - self.queue.submit(iter::once(encoder.finish())); - output.present(); - - Ok(()) - } -} +use tutorial2_surface::run; fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let window = WindowBuilder::new().build(&event_loop).unwrap(); - - // State::new uses async code, so we're going to wait for it to finish - let mut state: State = pollster::block_on(State::new(&window)); - - event_loop.run(move |event, _, control_flow| { - match event { - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - if !state.input(event) { - // UPDATED! - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - // new_inner_size is &&mut so w have to dereference it twice - state.resize(**new_inner_size); - } - _ => {} - } - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - state.update(); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - Event::RedrawEventsCleared => { - // RedrawRequested will only trigger once, unless we manually - // request it. - window.request_redraw(); - } - _ => {} - } - }); -} + run() +} \ No newline at end of file diff --git a/code/beginner/tutorial3-pipeline/src/lib.rs b/code/beginner/tutorial3-pipeline/src/lib.rs new file mode 100644 index 00000000..14256b80 --- /dev/null +++ b/code/beginner/tutorial3-pipeline/src/lib.rs @@ -0,0 +1,237 @@ +use std::iter; + +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::{Window, WindowBuilder}, +}; + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + size: winit::dpi::PhysicalSize, + // NEW! + render_pipeline: wgpu::RenderPipeline, +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + None, // Trace path + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + surface.configure(&device, &config); + + let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor { + label: Some("Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[], + push_constant_ranges: &[], + }); + + let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Render Pipeline"), + layout: Some(&render_pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: &[], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: config.format, + blend: Some(wgpu::BlendState { + color: wgpu::BlendComponent::REPLACE, + alpha: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: None, + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }); + + Self { + surface, + device, + queue, + size, + config, + render_pipeline, + } + } + + pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + } + } + + #[allow(unused_variables)] + fn input(&mut self, event: &WindowEvent) -> bool { + false + } + + fn update(&mut self) {} + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: None, + }); + + render_pass.set_pipeline(&self.render_pipeline); + render_pass.draw(0..3, 0..1); + } + + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let window = WindowBuilder::new().build(&event_loop).unwrap(); + + // State::new uses async code, so we're going to wait for it to finish + let mut state = pollster::block_on(State::new(&window)); + + event_loop.run(move |event, _, control_flow| { + match event { + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + if !state.input(event) { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + // new_inner_size is &mut so w have to dereference it twice + state.resize(**new_inner_size); + } + _ => {} + } + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + state.update(); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + Event::MainEventsCleared => { + // RedrawRequested will only trigger once, unless we manually + // request it. + window.request_redraw(); + } + _ => {} + } + }); +} diff --git a/code/beginner/tutorial3-pipeline/src/main.rs b/code/beginner/tutorial3-pipeline/src/main.rs index 8d4ec8d4..12213137 100644 --- a/code/beginner/tutorial3-pipeline/src/main.rs +++ b/code/beginner/tutorial3-pipeline/src/main.rs @@ -1,237 +1,5 @@ -use std::iter; - -use winit::{ - event::*, - event_loop::{ControlFlow, EventLoop}, - window::{Window, WindowBuilder}, -}; - -struct State { - surface: wgpu::Surface, - device: wgpu::Device, - queue: wgpu::Queue, - config: wgpu::SurfaceConfiguration, - size: winit::dpi::PhysicalSize, - // NEW! - render_pipeline: wgpu::RenderPipeline, -} - -impl State { - async fn new(window: &Window) -> Self { - let size = window.inner_size(); - - // The instance is a handle to our GPU - // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU - let instance = wgpu::Instance::new(wgpu::Backends::all()); - let surface = unsafe { instance.create_surface(window) }; - let adapter = instance - .request_adapter(&wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::default(), - compatible_surface: Some(&surface), - force_fallback_adapter: false, - }) - .await - .unwrap(); - - let (device, queue) = adapter - .request_device( - &wgpu::DeviceDescriptor { - label: None, - features: wgpu::Features::empty(), - limits: wgpu::Limits::default(), - }, - None, // Trace path - ) - .await - .unwrap(); - - let config = wgpu::SurfaceConfiguration { - usage: wgpu::TextureUsages::RENDER_ATTACHMENT, - format: surface.get_preferred_format(&adapter).unwrap(), - width: size.width, - height: size.height, - present_mode: wgpu::PresentMode::Fifo, - }; - surface.configure(&device, &config); - - let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor { - label: Some("Shader"), - source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), - }); - - let render_pipeline_layout = - device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: Some("Render Pipeline Layout"), - bind_group_layouts: &[], - push_constant_ranges: &[], - }); - - let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some("Render Pipeline"), - layout: Some(&render_pipeline_layout), - vertex: wgpu::VertexState { - module: &shader, - entry_point: "vs_main", - buffers: &[], - }, - fragment: Some(wgpu::FragmentState { - module: &shader, - entry_point: "fs_main", - targets: &[wgpu::ColorTargetState { - format: config.format, - blend: Some(wgpu::BlendState { - color: wgpu::BlendComponent::REPLACE, - alpha: wgpu::BlendComponent::REPLACE, - }), - write_mask: wgpu::ColorWrites::ALL, - }], - }), - primitive: wgpu::PrimitiveState { - topology: wgpu::PrimitiveTopology::TriangleList, - strip_index_format: None, - front_face: wgpu::FrontFace::Ccw, - cull_mode: Some(wgpu::Face::Back), - // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE - polygon_mode: wgpu::PolygonMode::Fill, - // Requires Features::DEPTH_CLIP_CONTROL - unclipped_depth: false, - // Requires Features::CONSERVATIVE_RASTERIZATION - conservative: false, - }, - depth_stencil: None, - multisample: wgpu::MultisampleState { - count: 1, - mask: !0, - alpha_to_coverage_enabled: false, - }, - // If the pipeline will be used with a multiview render pass, this - // indicates how many array layers the attachments will have. - multiview: None, - }); - - Self { - surface, - device, - queue, - size, - config, - render_pipeline, - } - } - - pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { - if new_size.width > 0 && new_size.height > 0 { - self.size = new_size; - self.config.width = new_size.width; - self.config.height = new_size.height; - self.surface.configure(&self.device, &self.config); - } - } - - #[allow(unused_variables)] - fn input(&mut self, event: &WindowEvent) -> bool { - false - } - - fn update(&mut self) {} - - fn render(&mut self) -> Result<(), wgpu::SurfaceError> { - let output = self.surface.get_current_texture()?; - let view = output - .texture - .create_view(&wgpu::TextureViewDescriptor::default()); - - let mut encoder = self - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor { - label: Some("Render Encoder"), - }); - - { - let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("Render Pass"), - color_attachments: &[wgpu::RenderPassColorAttachment { - view: &view, - resolve_target: None, - ops: wgpu::Operations { - load: wgpu::LoadOp::Clear(wgpu::Color { - r: 0.1, - g: 0.2, - b: 0.3, - a: 1.0, - }), - store: true, - }, - }], - depth_stencil_attachment: None, - }); - - render_pass.set_pipeline(&self.render_pipeline); - render_pass.draw(0..3, 0..1); - } - - self.queue.submit(iter::once(encoder.finish())); - output.present(); - - Ok(()) - } -} +use tutorial3_pipeline::run; fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let window = WindowBuilder::new().build(&event_loop).unwrap(); - - // State::new uses async code, so we're going to wait for it to finish - let mut state = pollster::block_on(State::new(&window)); - - event_loop.run(move |event, _, control_flow| { - match event { - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - if !state.input(event) { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - // new_inner_size is &mut so w have to dereference it twice - state.resize(**new_inner_size); - } - _ => {} - } - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - state.update(); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - Event::MainEventsCleared => { - // RedrawRequested will only trigger once, unless we manually - // request it. - window.request_redraw(); - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file diff --git a/code/beginner/tutorial4-buffer/src/lib.rs b/code/beginner/tutorial4-buffer/src/lib.rs new file mode 100644 index 00000000..5e9c791c --- /dev/null +++ b/code/beginner/tutorial4-buffer/src/lib.rs @@ -0,0 +1,311 @@ +use std::iter; + +use wgpu::util::DeviceExt; +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::{Window, WindowBuilder}, +}; + +#[repr(C)] +#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)] +struct Vertex { + position: [f32; 3], + color: [f32; 3], +} + +impl Vertex { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + wgpu::VertexBufferLayout { + array_stride: std::mem::size_of::() as wgpu::BufferAddress, + step_mode: wgpu::VertexStepMode::Vertex, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + shader_location: 0, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress, + shader_location: 1, + format: wgpu::VertexFormat::Float32x3, + }, + ], + } + } +} + +const VERTICES: &[Vertex] = &[ + Vertex { + position: [-0.0868241, 0.49240386, 0.0], + color: [0.5, 0.0, 0.5], + }, // A + Vertex { + position: [-0.49513406, 0.06958647, 0.0], + color: [0.5, 0.0, 0.5], + }, // B + Vertex { + position: [-0.21918549, -0.44939706, 0.0], + color: [0.5, 0.0, 0.5], + }, // C + Vertex { + position: [0.35966998, -0.3473291, 0.0], + color: [0.5, 0.0, 0.5], + }, // D + Vertex { + position: [0.44147372, 0.2347359, 0.0], + color: [0.5, 0.0, 0.5], + }, // E +]; + +const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4, /* padding */ 0]; + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + size: winit::dpi::PhysicalSize, + render_pipeline: wgpu::RenderPipeline, + // NEW! + vertex_buffer: wgpu::Buffer, + index_buffer: wgpu::Buffer, + num_indices: u32, +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + None, // Trace path + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + surface.configure(&device, &config); + + let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor { + label: Some("Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[], + push_constant_ranges: &[], + }); + + let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Render Pipeline"), + layout: Some(&render_pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: &[Vertex::desc()], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: config.format, + blend: Some(wgpu::BlendState { + color: wgpu::BlendComponent::REPLACE, + alpha: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: None, + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }); + + let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Vertex Buffer"), + contents: bytemuck::cast_slice(VERTICES), + usage: wgpu::BufferUsages::VERTEX, + }); + let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Index Buffer"), + contents: bytemuck::cast_slice(INDICES), + usage: wgpu::BufferUsages::INDEX, + }); + let num_indices = INDICES.len() as u32; + + Self { + surface, + device, + queue, + config, + size, + render_pipeline, + vertex_buffer, + index_buffer, + num_indices, + } + } + + pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + } + } + + #[allow(unused_variables)] + fn input(&mut self, event: &WindowEvent) -> bool { + false + } + + fn update(&mut self) {} + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: None, + }); + + render_pass.set_pipeline(&self.render_pipeline); + render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); + render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); + render_pass.draw_indexed(0..self.num_indices, 0, 0..1); + } + + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let window = WindowBuilder::new().build(&event_loop).unwrap(); + + // State::new uses async code, so we're going to wait for it to finish + let mut state = pollster::block_on(State::new(&window)); + + event_loop.run(move |event, _, control_flow| { + match event { + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + if !state.input(event) { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + // new_inner_size is &mut so w have to dereference it twice + state.resize(**new_inner_size); + } + _ => {} + } + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + state.update(); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + Event::MainEventsCleared => { + // RedrawRequested will only trigger once, unless we manually + // request it. + window.request_redraw(); + } + _ => {} + } + }); +} diff --git a/code/beginner/tutorial4-buffer/src/main.rs b/code/beginner/tutorial4-buffer/src/main.rs index 5010974b..6d976706 100644 --- a/code/beginner/tutorial4-buffer/src/main.rs +++ b/code/beginner/tutorial4-buffer/src/main.rs @@ -1,3 +1,4 @@ +<<<<<<< HEAD use std::iter; use wgpu::util::DeviceExt; @@ -250,62 +251,10 @@ impl State { Ok(()) } } +======= +use tutorial4_buffer::run; +>>>>>>> c4cfea7 (migrated tutorial to use lib.r) fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let window = WindowBuilder::new().build(&event_loop).unwrap(); - - // State::new uses async code, so we're going to wait for it to finish - let mut state = pollster::block_on(State::new(&window)); - - event_loop.run(move |event, _, control_flow| { - match event { - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - if !state.input(event) { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - // new_inner_size is &mut so w have to dereference it twice - state.resize(**new_inner_size); - } - _ => {} - } - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - state.update(); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - Event::MainEventsCleared => { - // RedrawRequested will only trigger once, unless we manually - // request it. - window.request_redraw(); - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file diff --git a/code/beginner/tutorial5-textures/src/lib.rs b/code/beginner/tutorial5-textures/src/lib.rs new file mode 100644 index 00000000..bb897a00 --- /dev/null +++ b/code/beginner/tutorial5-textures/src/lib.rs @@ -0,0 +1,361 @@ +use std::iter; + +use wgpu::util::DeviceExt; +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::{Window, WindowBuilder}, +}; + +mod texture; + +#[repr(C)] +#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)] +struct Vertex { + position: [f32; 3], + tex_coords: [f32; 2], +} + +impl Vertex { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + step_mode: wgpu::VertexStepMode::Vertex, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + shader_location: 0, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress, + shader_location: 1, + format: wgpu::VertexFormat::Float32x2, + }, + ], + } + } +} + +const VERTICES: &[Vertex] = &[ + Vertex { + position: [-0.0868241, 0.49240386, 0.0], + tex_coords: [0.4131759, 0.00759614], + }, // A + Vertex { + position: [-0.49513406, 0.06958647, 0.0], + tex_coords: [0.0048659444, 0.43041354], + }, // B + Vertex { + position: [-0.21918549, -0.44939706, 0.0], + tex_coords: [0.28081453, 0.949397], + }, // C + Vertex { + position: [0.35966998, -0.3473291, 0.0], + tex_coords: [0.85967, 0.84732914], + }, // D + Vertex { + position: [0.44147372, 0.2347359, 0.0], + tex_coords: [0.9414737, 0.2652641], + }, // E +]; + +const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4, /* padding */ 0]; + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + size: winit::dpi::PhysicalSize, + render_pipeline: wgpu::RenderPipeline, + vertex_buffer: wgpu::Buffer, + index_buffer: wgpu::Buffer, + num_indices: u32, + // NEW! + #[allow(dead_code)] + diffuse_texture: texture::Texture, + diffuse_bind_group: wgpu::BindGroup, +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + None, // Trace path + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + surface.configure(&device, &config); + + let diffuse_bytes = include_bytes!("happy-tree.png"); + let diffuse_texture = + texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap(); + + let texture_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + view_dimension: wgpu::TextureViewDimension::D2, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("texture_bind_group_layout"), + }); + + let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &texture_bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), + }, + ], + label: Some("diffuse_bind_group"), + }); + + let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor { + label: Some("Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[&texture_bind_group_layout], + push_constant_ranges: &[], + }); + + let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Render Pipeline"), + layout: Some(&render_pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: &[Vertex::desc()], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: config.format, + blend: Some(wgpu::BlendState { + color: wgpu::BlendComponent::REPLACE, + alpha: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: None, + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }); + + let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Vertex Buffer"), + contents: bytemuck::cast_slice(VERTICES), + usage: wgpu::BufferUsages::VERTEX, + }); + let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Index Buffer"), + contents: bytemuck::cast_slice(INDICES), + usage: wgpu::BufferUsages::INDEX, + }); + let num_indices = INDICES.len() as u32; + + Self { + surface, + device, + queue, + config, + size, + render_pipeline, + vertex_buffer, + index_buffer, + num_indices, + diffuse_texture, + diffuse_bind_group, + } + } + + pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + } + } + + #[allow(unused_variables)] + fn input(&mut self, event: &WindowEvent) -> bool { + false + } + + fn update(&mut self) {} + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: None, + }); + + render_pass.set_pipeline(&self.render_pipeline); + render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); + render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); + render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); + render_pass.draw_indexed(0..self.num_indices, 0, 0..1); + } + + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let window = WindowBuilder::new().build(&event_loop).unwrap(); + + // State::new uses async code, so we're going to wait for it to finish + let mut state = pollster::block_on(State::new(&window)); + + event_loop.run(move |event, _, control_flow| { + match event { + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + if !state.input(event) { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + // new_inner_size is &mut so w have to dereference it twice + state.resize(**new_inner_size); + } + _ => {} + } + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + state.update(); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + Event::MainEventsCleared => { + // RedrawRequested will only trigger once, unless we manually + // request it. + window.request_redraw(); + } + _ => {} + } + }); +} diff --git a/code/beginner/tutorial5-textures/src/main.rs b/code/beginner/tutorial5-textures/src/main.rs index 3c3de742..4270acba 100644 --- a/code/beginner/tutorial5-textures/src/main.rs +++ b/code/beginner/tutorial5-textures/src/main.rs @@ -1,3 +1,4 @@ +<<<<<<< HEAD use std::iter; use wgpu::util::DeviceExt; @@ -300,62 +301,10 @@ impl State { Ok(()) } } +======= +use tutorial5_textures::run; +>>>>>>> c4cfea7 (migrated tutorial to use lib.r) fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let window = WindowBuilder::new().build(&event_loop).unwrap(); - - // State::new uses async code, so we're going to wait for it to finish - let mut state = pollster::block_on(State::new(&window)); - - event_loop.run(move |event, _, control_flow| { - match event { - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - if !state.input(event) { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - // new_inner_size is &mut so w have to dereference it twice - state.resize(**new_inner_size); - } - _ => {} - } - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - state.update(); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - Event::MainEventsCleared => { - // RedrawRequested will only trigger once, unless we manually - // request it. - window.request_redraw(); - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file diff --git a/code/beginner/tutorial6-uniforms/src/lib.rs b/code/beginner/tutorial6-uniforms/src/lib.rs new file mode 100644 index 00000000..adb811cf --- /dev/null +++ b/code/beginner/tutorial6-uniforms/src/lib.rs @@ -0,0 +1,570 @@ +use std::iter; + +use wgpu::util::DeviceExt; +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::{Window, WindowBuilder}, +}; + +mod texture; + +#[repr(C)] +#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)] +struct Vertex { + position: [f32; 3], + tex_coords: [f32; 2], +} + +impl Vertex { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + step_mode: wgpu::VertexStepMode::Vertex, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + shader_location: 0, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress, + shader_location: 1, + format: wgpu::VertexFormat::Float32x2, + }, + ], + } + } +} + +const VERTICES: &[Vertex] = &[ + Vertex { + position: [-0.0868241, 0.49240386, 0.0], + tex_coords: [0.4131759, 0.00759614], + }, // A + Vertex { + position: [-0.49513406, 0.06958647, 0.0], + tex_coords: [0.0048659444, 0.43041354], + }, // B + Vertex { + position: [-0.21918549, -0.44939706, 0.0], + tex_coords: [0.28081453, 0.949397], + }, // C + Vertex { + position: [0.35966998, -0.3473291, 0.0], + tex_coords: [0.85967, 0.84732914], + }, // D + Vertex { + position: [0.44147372, 0.2347359, 0.0], + tex_coords: [0.9414737, 0.2652641], + }, // E +]; + +const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4, /* padding */ 0]; + +#[rustfmt::skip] +pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4 = cgmath::Matrix4::new( + 1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 0.5, 0.0, + 0.0, 0.0, 0.5, 1.0, +); + +struct Camera { + eye: cgmath::Point3, + target: cgmath::Point3, + up: cgmath::Vector3, + aspect: f32, + fovy: f32, + znear: f32, + zfar: f32, +} + +impl Camera { + fn build_view_projection_matrix(&self) -> cgmath::Matrix4 { + let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up); + let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar); + proj * view + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct CameraUniform { + view_proj: [[f32; 4]; 4], +} + +impl CameraUniform { + fn new() -> Self { + use cgmath::SquareMatrix; + Self { + view_proj: cgmath::Matrix4::identity().into(), + } + } + + fn update_view_proj(&mut self, camera: &Camera) { + self.view_proj = (OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix()).into(); + } +} + +struct CameraController { + speed: f32, + is_up_pressed: bool, + is_down_pressed: bool, + is_forward_pressed: bool, + is_backward_pressed: bool, + is_left_pressed: bool, + is_right_pressed: bool, +} + +impl CameraController { + fn new(speed: f32) -> Self { + Self { + speed, + is_up_pressed: false, + is_down_pressed: false, + is_forward_pressed: false, + is_backward_pressed: false, + is_left_pressed: false, + is_right_pressed: false, + } + } + + fn process_events(&mut self, event: &WindowEvent) -> bool { + match event { + WindowEvent::KeyboardInput { + input: + KeyboardInput { + state, + virtual_keycode: Some(keycode), + .. + }, + .. + } => { + let is_pressed = *state == ElementState::Pressed; + match keycode { + VirtualKeyCode::Space => { + self.is_up_pressed = is_pressed; + true + } + VirtualKeyCode::LShift => { + self.is_down_pressed = is_pressed; + true + } + VirtualKeyCode::W | VirtualKeyCode::Up => { + self.is_forward_pressed = is_pressed; + true + } + VirtualKeyCode::A | VirtualKeyCode::Left => { + self.is_left_pressed = is_pressed; + true + } + VirtualKeyCode::S | VirtualKeyCode::Down => { + self.is_backward_pressed = is_pressed; + true + } + VirtualKeyCode::D | VirtualKeyCode::Right => { + self.is_right_pressed = is_pressed; + true + } + _ => false, + } + } + _ => false, + } + } + + fn update_camera(&self, camera: &mut Camera) { + use cgmath::InnerSpace; + let forward = camera.target - camera.eye; + let forward_norm = forward.normalize(); + let forward_mag = forward.magnitude(); + + // Prevents glitching when camera gets too close to the + // center of the scene. + if self.is_forward_pressed && forward_mag > self.speed { + camera.eye += forward_norm * self.speed; + } + if self.is_backward_pressed { + camera.eye -= forward_norm * self.speed; + } + + let right = forward_norm.cross(camera.up); + + // Redo radius calc in case the up/ down is pressed. + let forward = camera.target - camera.eye; + let forward_mag = forward.magnitude(); + + if self.is_right_pressed { + // Rescale the distance between the target and eye so + // that it doesn't change. The eye therefore still + // lies on the circle made by the target and eye. + camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag; + } + if self.is_left_pressed { + camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag; + } + } +} + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + size: winit::dpi::PhysicalSize, + render_pipeline: wgpu::RenderPipeline, + vertex_buffer: wgpu::Buffer, + index_buffer: wgpu::Buffer, + num_indices: u32, + #[allow(dead_code)] + diffuse_texture: texture::Texture, + diffuse_bind_group: wgpu::BindGroup, + // NEW! + camera: Camera, + camera_controller: CameraController, + camera_uniform: CameraUniform, + camera_buffer: wgpu::Buffer, + camera_bind_group: wgpu::BindGroup, +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + None, // Trace path + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + surface.configure(&device, &config); + + let diffuse_bytes = include_bytes!("happy-tree.png"); + let diffuse_texture = + texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap(); + + let texture_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + view_dimension: wgpu::TextureViewDimension::D2, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("texture_bind_group_layout"), + }); + + let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &texture_bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), + }, + ], + label: Some("diffuse_bind_group"), + }); + + let camera = Camera { + eye: (0.0, 1.0, 2.0).into(), + target: (0.0, 0.0, 0.0).into(), + up: cgmath::Vector3::unit_y(), + aspect: config.width as f32 / config.height as f32, + fovy: 45.0, + znear: 0.1, + zfar: 100.0, + }; + let camera_controller = CameraController::new(0.2); + + let mut camera_uniform = CameraUniform::new(); + camera_uniform.update_view_proj(&camera); + + let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Camera Buffer"), + contents: bytemuck::cast_slice(&[camera_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + let camera_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: Some("camera_bind_group_layout"), + }); + + let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &camera_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: camera_buffer.as_entire_binding(), + }], + label: Some("camera_bind_group"), + }); + + let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor { + label: Some("Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[&texture_bind_group_layout, &camera_bind_group_layout], + push_constant_ranges: &[], + }); + + let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Render Pipeline"), + layout: Some(&render_pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: &[Vertex::desc()], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: config.format, + blend: Some(wgpu::BlendState { + color: wgpu::BlendComponent::REPLACE, + alpha: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: None, + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }); + + let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Vertex Buffer"), + contents: bytemuck::cast_slice(VERTICES), + usage: wgpu::BufferUsages::VERTEX, + }); + let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Index Buffer"), + contents: bytemuck::cast_slice(INDICES), + usage: wgpu::BufferUsages::INDEX, + }); + let num_indices = INDICES.len() as u32; + + Self { + surface, + device, + queue, + config, + size, + render_pipeline, + vertex_buffer, + index_buffer, + num_indices, + diffuse_texture, + diffuse_bind_group, + camera, + camera_controller, + camera_buffer, + camera_bind_group, + camera_uniform, + } + } + + fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + + self.camera.aspect = self.config.width as f32 / self.config.height as f32; + } + } + + fn input(&mut self, event: &WindowEvent) -> bool { + self.camera_controller.process_events(event) + } + + fn update(&mut self) { + self.camera_controller.update_camera(&mut self.camera); + self.camera_uniform.update_view_proj(&self.camera); + self.queue.write_buffer( + &self.camera_buffer, + 0, + bytemuck::cast_slice(&[self.camera_uniform]), + ); + } + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: None, + }); + + render_pass.set_pipeline(&self.render_pipeline); + render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); + render_pass.set_bind_group(1, &self.camera_bind_group, &[]); + render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); + render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); + render_pass.draw_indexed(0..self.num_indices, 0, 0..1); + } + + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let window = WindowBuilder::new().build(&event_loop).unwrap(); + + // State::new uses async code, so we're going to wait for it to finish + let mut state = pollster::block_on(State::new(&window)); + + event_loop.run(move |event, _, control_flow| { + match event { + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + if !state.input(event) { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + // new_inner_size is &mut so w have to dereference it twice + state.resize(**new_inner_size); + } + _ => {} + } + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + state.update(); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + Event::MainEventsCleared => { + // RedrawRequested will only trigger once, unless we manually + // request it. + window.request_redraw(); + } + _ => {} + } + }); +} diff --git a/code/beginner/tutorial6-uniforms/src/main.rs b/code/beginner/tutorial6-uniforms/src/main.rs index 62e6f66f..0fc5e594 100644 --- a/code/beginner/tutorial6-uniforms/src/main.rs +++ b/code/beginner/tutorial6-uniforms/src/main.rs @@ -1,3 +1,4 @@ +<<<<<<< HEAD use std::iter; use wgpu::util::DeviceExt; @@ -497,62 +498,10 @@ impl State { Ok(()) } } +======= +use tutorial6_uniforms::run; +>>>>>>> c4cfea7 (migrated tutorial to use lib.r) fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let window = WindowBuilder::new().build(&event_loop).unwrap(); - - // State::new uses async code, so we're going to wait for it to finish - let mut state = pollster::block_on(State::new(&window)); - - event_loop.run(move |event, _, control_flow| { - match event { - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - if !state.input(event) { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - // new_inner_size is &mut so w have to dereference it twice - state.resize(**new_inner_size); - } - _ => {} - } - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - state.update(); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - Event::MainEventsCleared => { - // RedrawRequested will only trigger once, unless we manually - // request it. - window.request_redraw(); - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file diff --git a/code/beginner/tutorial7-instancing/src/lib.rs b/code/beginner/tutorial7-instancing/src/lib.rs new file mode 100644 index 00000000..8e534d95 --- /dev/null +++ b/code/beginner/tutorial7-instancing/src/lib.rs @@ -0,0 +1,668 @@ +use std::iter; + +use cgmath::prelude::*; +use wgpu::util::DeviceExt; +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::{Window, WindowBuilder}, +}; + +mod texture; + +const NUM_INSTANCES_PER_ROW: u32 = 10; +const INSTANCE_DISPLACEMENT: cgmath::Vector3 = cgmath::Vector3::new( + NUM_INSTANCES_PER_ROW as f32 * 0.5, + 0.0, + NUM_INSTANCES_PER_ROW as f32 * 0.5, +); + +#[repr(C)] +#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)] +struct Vertex { + position: [f32; 3], + tex_coords: [f32; 2], +} + +impl Vertex { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + step_mode: wgpu::VertexStepMode::Vertex, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + shader_location: 0, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress, + shader_location: 1, + format: wgpu::VertexFormat::Float32x2, + }, + ], + } + } +} + +const VERTICES: &[Vertex] = &[ + Vertex { + position: [-0.0868241, 0.49240386, 0.0], + tex_coords: [0.4131759, 0.00759614], + }, // A + Vertex { + position: [-0.49513406, 0.06958647, 0.0], + tex_coords: [0.0048659444, 0.43041354], + }, // B + Vertex { + position: [-0.21918549, -0.44939706, 0.0], + tex_coords: [0.28081453, 0.949397], + }, // C + Vertex { + position: [0.35966998, -0.3473291, 0.0], + tex_coords: [0.85967, 0.84732914], + }, // D + Vertex { + position: [0.44147372, 0.2347359, 0.0], + tex_coords: [0.9414737, 0.2652641], + }, // E +]; + +const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4, /* padding */ 0]; + +#[rustfmt::skip] +pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4 = cgmath::Matrix4::new( + 1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 0.5, 0.0, + 0.0, 0.0, 0.5, 1.0, +); + +struct Camera { + eye: cgmath::Point3, + target: cgmath::Point3, + up: cgmath::Vector3, + aspect: f32, + fovy: f32, + znear: f32, + zfar: f32, +} + +impl Camera { + fn build_view_projection_matrix(&self) -> cgmath::Matrix4 { + let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up); + let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar); + proj * view + } +} + +#[repr(C)] +#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct CameraUniform { + view_proj: [[f32; 4]; 4], +} + +impl CameraUniform { + fn new() -> Self { + Self { + view_proj: cgmath::Matrix4::identity().into(), + } + } + + fn update_view_proj(&mut self, camera: &Camera) { + self.view_proj = (OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix()).into(); + } +} + +struct CameraController { + speed: f32, + is_up_pressed: bool, + is_down_pressed: bool, + is_forward_pressed: bool, + is_backward_pressed: bool, + is_left_pressed: bool, + is_right_pressed: bool, +} + +impl CameraController { + fn new(speed: f32) -> Self { + Self { + speed, + is_up_pressed: false, + is_down_pressed: false, + is_forward_pressed: false, + is_backward_pressed: false, + is_left_pressed: false, + is_right_pressed: false, + } + } + + fn process_events(&mut self, event: &WindowEvent) -> bool { + match event { + WindowEvent::KeyboardInput { + input: + KeyboardInput { + state, + virtual_keycode: Some(keycode), + .. + }, + .. + } => { + let is_pressed = *state == ElementState::Pressed; + match keycode { + VirtualKeyCode::Space => { + self.is_up_pressed = is_pressed; + true + } + VirtualKeyCode::LShift => { + self.is_down_pressed = is_pressed; + true + } + VirtualKeyCode::W | VirtualKeyCode::Up => { + self.is_forward_pressed = is_pressed; + true + } + VirtualKeyCode::A | VirtualKeyCode::Left => { + self.is_left_pressed = is_pressed; + true + } + VirtualKeyCode::S | VirtualKeyCode::Down => { + self.is_backward_pressed = is_pressed; + true + } + VirtualKeyCode::D | VirtualKeyCode::Right => { + self.is_right_pressed = is_pressed; + true + } + _ => false, + } + } + _ => false, + } + } + + fn update_camera(&self, camera: &mut Camera) { + let forward = (camera.target - camera.eye).normalize(); + + if self.is_forward_pressed { + camera.eye += forward * self.speed; + } + if self.is_backward_pressed { + camera.eye -= forward * self.speed; + } + + let right = forward.cross(camera.up); + + if self.is_right_pressed { + camera.eye += right * self.speed; + } + if self.is_left_pressed { + camera.eye -= right * self.speed; + } + } +} + +// NEW! +struct Instance { + position: cgmath::Vector3, + rotation: cgmath::Quaternion, +} + +// NEW! +impl Instance { + fn to_raw(&self) -> InstanceRaw { + InstanceRaw { + model: (cgmath::Matrix4::from_translation(self.position) + * cgmath::Matrix4::from(self.rotation)) + .into(), + } + } +} + +// NEW! +#[repr(C)] +#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct InstanceRaw { + model: [[f32; 4]; 4], +} + +impl InstanceRaw { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + // We need to switch from using a step mode of Vertex to Instance + // This means that our shaders will only change to use the next + // instance when the shader starts processing a new instance + step_mode: wgpu::VertexStepMode::Instance, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll + // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later + shader_location: 5, + format: wgpu::VertexFormat::Float32x4, + }, + // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot + // for each vec4. We don't have to do this in code though. + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, + shader_location: 6, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, + shader_location: 7, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, + shader_location: 8, + format: wgpu::VertexFormat::Float32x4, + }, + ], + } + } +} + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + size: winit::dpi::PhysicalSize, + render_pipeline: wgpu::RenderPipeline, + vertex_buffer: wgpu::Buffer, + index_buffer: wgpu::Buffer, + num_indices: u32, + #[allow(dead_code)] + diffuse_texture: texture::Texture, + diffuse_bind_group: wgpu::BindGroup, + camera: Camera, + camera_controller: CameraController, + camera_uniform: CameraUniform, + camera_buffer: wgpu::Buffer, + camera_bind_group: wgpu::BindGroup, + // NEW! + instances: Vec, + #[allow(dead_code)] + instance_buffer: wgpu::Buffer, +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + None, // Trace path + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + surface.configure(&device, &config); + + let diffuse_bytes = include_bytes!("happy-tree.png"); + let diffuse_texture = + texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap(); + + let texture_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + view_dimension: wgpu::TextureViewDimension::D2, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("texture_bind_group_layout"), + }); + + let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &texture_bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), + }, + ], + label: Some("diffuse_bind_group"), + }); + + let camera = Camera { + eye: (0.0, 5.0, 10.0).into(), + target: (0.0, 0.0, 0.0).into(), + up: cgmath::Vector3::unit_y(), + aspect: config.width as f32 / config.height as f32, + fovy: 45.0, + znear: 0.1, + zfar: 100.0, + }; + let camera_controller = CameraController::new(0.2); + + let mut camera_uniform = CameraUniform::new(); + camera_uniform.update_view_proj(&camera); + + let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Camera Buffer"), + contents: bytemuck::cast_slice(&[camera_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + let instances = (0..NUM_INSTANCES_PER_ROW) + .flat_map(|z| { + (0..NUM_INSTANCES_PER_ROW).map(move |x| { + let position = cgmath::Vector3 { + x: x as f32, + y: 0.0, + z: z as f32, + } - INSTANCE_DISPLACEMENT; + + let rotation = if position.is_zero() { + // this is needed so an object at (0, 0, 0) won't get scaled to zero + // as Quaternions can effect scale if they're not created correctly + cgmath::Quaternion::from_axis_angle( + cgmath::Vector3::unit_z(), + cgmath::Deg(0.0), + ) + } else { + cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) + }; + + Instance { position, rotation } + }) + }) + .collect::>(); + + let instance_data = instances.iter().map(Instance::to_raw).collect::>(); + let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Instance Buffer"), + contents: bytemuck::cast_slice(&instance_data), + usage: wgpu::BufferUsages::VERTEX, + }); + + let camera_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: Some("camera_bind_group_layout"), + }); + + let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &camera_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: camera_buffer.as_entire_binding(), + }], + label: Some("camera_bind_group"), + }); + + let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor { + label: Some("Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[&texture_bind_group_layout, &camera_bind_group_layout], + push_constant_ranges: &[], + }); + + let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Render Pipeline"), + layout: Some(&render_pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: &[Vertex::desc(), InstanceRaw::desc()], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: config.format, + blend: Some(wgpu::BlendState { + color: wgpu::BlendComponent::REPLACE, + alpha: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: None, + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }); + + let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Vertex Buffer"), + contents: bytemuck::cast_slice(VERTICES), + usage: wgpu::BufferUsages::VERTEX, + }); + let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Index Buffer"), + contents: bytemuck::cast_slice(INDICES), + usage: wgpu::BufferUsages::INDEX, + }); + let num_indices = INDICES.len() as u32; + + Self { + surface, + device, + queue, + config, + size, + render_pipeline, + vertex_buffer, + index_buffer, + num_indices, + diffuse_texture, + diffuse_bind_group, + camera, + camera_controller, + camera_buffer, + camera_bind_group, + camera_uniform, + // NEW! + instances, + instance_buffer, + } + } + + fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + + self.camera.aspect = self.config.width as f32 / self.config.height as f32; + } + } + + fn input(&mut self, event: &WindowEvent) -> bool { + self.camera_controller.process_events(event) + } + + fn update(&mut self) { + self.camera_controller.update_camera(&mut self.camera); + self.camera_uniform.update_view_proj(&self.camera); + self.queue.write_buffer( + &self.camera_buffer, + 0, + bytemuck::cast_slice(&[self.camera_uniform]), + ); + } + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: None, + }); + + render_pass.set_pipeline(&self.render_pipeline); + render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); + render_pass.set_bind_group(1, &self.camera_bind_group, &[]); + render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); + render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); + render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); + // UPDATED! + render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as _); + } + + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let window = WindowBuilder::new().build(&event_loop).unwrap(); + + // State::new uses async code, so we're going to wait for it to finish + let mut state = pollster::block_on(State::new(&window)); + + event_loop.run(move |event, _, control_flow| { + match event { + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + if !state.input(event) { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + // new_inner_size is &mut so w have to dereference it twice + state.resize(**new_inner_size); + } + _ => {} + } + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + state.update(); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + Event::MainEventsCleared => { + // RedrawRequested will only trigger once, unless we manually + // request it. + window.request_redraw(); + } + _ => {} + } + }); +} diff --git a/code/beginner/tutorial7-instancing/src/main.rs b/code/beginner/tutorial7-instancing/src/main.rs index 912ad86c..c0599ca0 100644 --- a/code/beginner/tutorial7-instancing/src/main.rs +++ b/code/beginner/tutorial7-instancing/src/main.rs @@ -1,3 +1,4 @@ +<<<<<<< HEAD use std::iter; use cgmath::prelude::*; @@ -595,62 +596,10 @@ impl State { Ok(()) } } +======= +use tutorial7_instancing::run; +>>>>>>> c4cfea7 (migrated tutorial to use lib.r) fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let window = WindowBuilder::new().build(&event_loop).unwrap(); - - // State::new uses async code, so we're going to wait for it to finish - let mut state = pollster::block_on(State::new(&window)); - - event_loop.run(move |event, _, control_flow| { - match event { - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - if !state.input(event) { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - // new_inner_size is &mut so w have to dereference it twice - state.resize(**new_inner_size); - } - _ => {} - } - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - state.update(); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - Event::MainEventsCleared => { - // RedrawRequested will only trigger once, unless we manually - // request it. - window.request_redraw(); - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file diff --git a/code/beginner/tutorial8-depth/src/lib.rs b/code/beginner/tutorial8-depth/src/lib.rs new file mode 100644 index 00000000..6c6894da --- /dev/null +++ b/code/beginner/tutorial8-depth/src/lib.rs @@ -0,0 +1,694 @@ +use std::iter; + +use cgmath::prelude::*; +use wgpu::util::DeviceExt; +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::{Window, WindowBuilder}, +}; + +mod texture; + +#[repr(C)] +#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)] +struct Vertex { + position: [f32; 3], + tex_coords: [f32; 2], +} + +impl Vertex { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + step_mode: wgpu::VertexStepMode::Vertex, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + shader_location: 0, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress, + shader_location: 1, + format: wgpu::VertexFormat::Float32x2, + }, + ], + } + } +} + +const VERTICES: &[Vertex] = &[ + Vertex { + position: [-0.0868241, -0.49240386, 0.0], + tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], + }, // A + Vertex { + position: [-0.49513406, -0.06958647, 0.0], + tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], + }, // B + Vertex { + position: [-0.21918549, 0.44939706, 0.0], + tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397], + }, // C + Vertex { + position: [0.35966998, 0.3473291, 0.0], + tex_coords: [1.0 - 0.85967, 1.0 - 0.84732914], + }, // D + Vertex { + position: [0.44147372, -0.2347359, 0.0], + tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], + }, // E +]; + +const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4, /* padding */ 0]; + +#[rustfmt::skip] +pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4 = cgmath::Matrix4::new( + 1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 0.5, 0.0, + 0.0, 0.0, 0.5, 1.0, +); + +const NUM_INSTANCES_PER_ROW: u32 = 10; +const INSTANCE_DISPLACEMENT: cgmath::Vector3 = cgmath::Vector3::new( + NUM_INSTANCES_PER_ROW as f32 * 0.5, + 0.0, + NUM_INSTANCES_PER_ROW as f32 * 0.5, +); + +struct Camera { + eye: cgmath::Point3, + target: cgmath::Point3, + up: cgmath::Vector3, + aspect: f32, + fovy: f32, + znear: f32, + zfar: f32, +} + +impl Camera { + fn build_view_projection_matrix(&self) -> cgmath::Matrix4 { + let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up); + let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar); + proj * view + } +} + +#[repr(C)] +#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct CameraUniform { + view_proj: [[f32; 4]; 4], +} + +impl CameraUniform { + fn new() -> Self { + Self { + view_proj: cgmath::Matrix4::identity().into(), + } + } + + fn update_view_proj(&mut self, camera: &Camera) { + self.view_proj = (OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix()).into(); + } +} + +struct CameraController { + speed: f32, + is_up_pressed: bool, + is_down_pressed: bool, + is_forward_pressed: bool, + is_backward_pressed: bool, + is_left_pressed: bool, + is_right_pressed: bool, +} + +impl CameraController { + fn new(speed: f32) -> Self { + Self { + speed, + is_up_pressed: false, + is_down_pressed: false, + is_forward_pressed: false, + is_backward_pressed: false, + is_left_pressed: false, + is_right_pressed: false, + } + } + + fn process_events(&mut self, event: &WindowEvent) -> bool { + match event { + WindowEvent::KeyboardInput { + input: + KeyboardInput { + state, + virtual_keycode: Some(keycode), + .. + }, + .. + } => { + let is_pressed = *state == ElementState::Pressed; + match keycode { + VirtualKeyCode::Space => { + self.is_up_pressed = is_pressed; + true + } + VirtualKeyCode::LShift => { + self.is_down_pressed = is_pressed; + true + } + VirtualKeyCode::W | VirtualKeyCode::Up => { + self.is_forward_pressed = is_pressed; + true + } + VirtualKeyCode::A | VirtualKeyCode::Left => { + self.is_left_pressed = is_pressed; + true + } + VirtualKeyCode::S | VirtualKeyCode::Down => { + self.is_backward_pressed = is_pressed; + true + } + VirtualKeyCode::D | VirtualKeyCode::Right => { + self.is_right_pressed = is_pressed; + true + } + _ => false, + } + } + _ => false, + } + } + + fn update_camera(&self, camera: &mut Camera) { + let forward = camera.target - camera.eye; + let forward_norm = forward.normalize(); + let forward_mag = forward.magnitude(); + + // Prevents glitching when camera gets too close to the + // center of the scene. + if self.is_forward_pressed && forward_mag > self.speed { + camera.eye += forward_norm * self.speed; + } + if self.is_backward_pressed { + camera.eye -= forward_norm * self.speed; + } + + let right = forward_norm.cross(camera.up); + + // Redo radius calc in case the up/ down is pressed. + let forward = camera.target - camera.eye; + let forward_mag = forward.magnitude(); + + if self.is_right_pressed { + // Rescale the distance between the target and eye so + // that it doesn't change. The eye therefore still + // lies on the circle made by the target and eye. + camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag; + } + if self.is_left_pressed { + camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag; + } + } +} + +struct Instance { + position: cgmath::Vector3, + rotation: cgmath::Quaternion, +} + +impl Instance { + fn to_raw(&self) -> InstanceRaw { + InstanceRaw { + model: (cgmath::Matrix4::from_translation(self.position) + * cgmath::Matrix4::from(self.rotation)) + .into(), + } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct InstanceRaw { + #[allow(dead_code)] + model: [[f32; 4]; 4], +} + +impl InstanceRaw { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + // We need to switch from using a step mode of Vertex to Instance + // This means that our shaders will only change to use the next + // instance when the shader starts processing a new instance + step_mode: wgpu::VertexStepMode::Instance, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll + // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later + shader_location: 5, + format: wgpu::VertexFormat::Float32x4, + }, + // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot + // for each vec4. We don't have to do this in code though. + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, + shader_location: 6, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, + shader_location: 7, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, + shader_location: 8, + format: wgpu::VertexFormat::Float32x4, + }, + ], + } + } +} + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + size: winit::dpi::PhysicalSize, + render_pipeline: wgpu::RenderPipeline, + vertex_buffer: wgpu::Buffer, + index_buffer: wgpu::Buffer, + num_indices: u32, + #[allow(dead_code)] + diffuse_texture: texture::Texture, + diffuse_bind_group: wgpu::BindGroup, + camera: Camera, + camera_controller: CameraController, + camera_uniform: CameraUniform, + camera_buffer: wgpu::Buffer, + camera_bind_group: wgpu::BindGroup, + instances: Vec, + #[allow(dead_code)] + instance_buffer: wgpu::Buffer, + // NEW! + depth_texture: texture::Texture, +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + None, // Trace path + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + surface.configure(&device, &config); + + let diffuse_bytes = include_bytes!("happy-tree.png"); + let diffuse_texture = + texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap(); + + let texture_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + view_dimension: wgpu::TextureViewDimension::D2, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("texture_bind_group_layout"), + }); + + let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &texture_bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), + }, + ], + label: Some("diffuse_bind_group"), + }); + + let camera = Camera { + eye: (0.0, 5.0, -10.0).into(), + target: (0.0, 0.0, 0.0).into(), + up: cgmath::Vector3::unit_y(), + aspect: config.width as f32 / config.height as f32, + fovy: 45.0, + znear: 0.1, + zfar: 100.0, + }; + let camera_controller = CameraController::new(0.2); + + let mut camera_uniform = CameraUniform::new(); + camera_uniform.update_view_proj(&camera); + + let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Camera Buffer"), + contents: bytemuck::cast_slice(&[camera_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + let instances = (0..NUM_INSTANCES_PER_ROW) + .flat_map(|z| { + (0..NUM_INSTANCES_PER_ROW).map(move |x| { + let position = cgmath::Vector3 { + x: x as f32, + y: 0.0, + z: z as f32, + } - INSTANCE_DISPLACEMENT; + + let rotation = if position.is_zero() { + // this is needed so an object at (0, 0, 0) won't get scaled to zero + // as Quaternions can effect scale if they're not created correctly + cgmath::Quaternion::from_axis_angle( + cgmath::Vector3::unit_z(), + cgmath::Deg(0.0), + ) + } else { + cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) + }; + + Instance { position, rotation } + }) + }) + .collect::>(); + + let instance_data = instances.iter().map(Instance::to_raw).collect::>(); + let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Instance Buffer"), + contents: bytemuck::cast_slice(&instance_data), + usage: wgpu::BufferUsages::VERTEX, + }); + + let camera_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: Some("camera_bind_group_layout"), + }); + + let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &camera_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: camera_buffer.as_entire_binding(), + }], + label: Some("camera_bind_group"), + }); + + let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor { + label: Some("Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }); + + let depth_texture = + texture::Texture::create_depth_texture(&device, &config, "depth_texture"); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[&texture_bind_group_layout, &camera_bind_group_layout], + push_constant_ranges: &[], + }); + + let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Render Pipeline"), + layout: Some(&render_pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: &[Vertex::desc(), InstanceRaw::desc()], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: config.format, + blend: Some(wgpu::BlendState { + color: wgpu::BlendComponent::REPLACE, + alpha: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: Some(wgpu::DepthStencilState { + format: texture::Texture::DEPTH_FORMAT, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::Less, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }), + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }); + + let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Vertex Buffer"), + contents: bytemuck::cast_slice(VERTICES), + usage: wgpu::BufferUsages::VERTEX, + }); + let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Index Buffer"), + contents: bytemuck::cast_slice(INDICES), + usage: wgpu::BufferUsages::INDEX, + }); + let num_indices = INDICES.len() as u32; + + Self { + surface, + device, + queue, + config, + size, + render_pipeline, + vertex_buffer, + index_buffer, + num_indices, + diffuse_texture, + diffuse_bind_group, + camera, + camera_controller, + camera_buffer, + camera_bind_group, + camera_uniform, + instances, + instance_buffer, + depth_texture, + } + } + + fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + self.camera.aspect = self.config.width as f32 / self.config.height as f32; + // NEW! + self.depth_texture = + texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); + } + } + + fn input(&mut self, event: &WindowEvent) -> bool { + self.camera_controller.process_events(event) + } + + fn update(&mut self) { + self.camera_controller.update_camera(&mut self.camera); + self.camera_uniform.update_view_proj(&self.camera); + self.queue.write_buffer( + &self.camera_buffer, + 0, + bytemuck::cast_slice(&[self.camera_uniform]), + ); + } + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { + view: &self.depth_texture.view, + depth_ops: Some(wgpu::Operations { + load: wgpu::LoadOp::Clear(1.0), + store: true, + }), + stencil_ops: None, + }), + }); + + render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); + render_pass.set_pipeline(&self.render_pipeline); + render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); + render_pass.set_bind_group(1, &self.camera_bind_group, &[]); + render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); + render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); + render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32); + } + + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + let event_loop = EventLoop::new(); + let window = WindowBuilder::new().build(&event_loop).unwrap(); + + // State::new uses async code, so we're going to wait for it to finish + let mut state = pollster::block_on(State::new(&window)); + + event_loop.run(move |event, _, control_flow| { + match event { + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + if !state.input(event) { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + // new_inner_size is &mut so w have to dereference it twice + state.resize(**new_inner_size); + } + _ => {} + } + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + state.update(); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + Event::MainEventsCleared => { + // RedrawRequested will only trigger once, unless we manually + // request it. + window.request_redraw(); + } + _ => {} + } + }); +} diff --git a/code/beginner/tutorial8-depth/src/main.rs b/code/beginner/tutorial8-depth/src/main.rs index 18e0831d..ac8a6a9f 100644 --- a/code/beginner/tutorial8-depth/src/main.rs +++ b/code/beginner/tutorial8-depth/src/main.rs @@ -1,3 +1,4 @@ +<<<<<<< HEAD use std::iter; use cgmath::prelude::*; @@ -622,61 +623,10 @@ impl State { Ok(()) } } +======= +use tutorial8_depth::run; +>>>>>>> c4cfea7 (migrated tutorial to use lib.r) fn main() { - let event_loop = EventLoop::new(); - let window = WindowBuilder::new().build(&event_loop).unwrap(); - - // State::new uses async code, so we're going to wait for it to finish - let mut state = pollster::block_on(State::new(&window)); - - event_loop.run(move |event, _, control_flow| { - match event { - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - if !state.input(event) { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - // new_inner_size is &mut so w have to dereference it twice - state.resize(**new_inner_size); - } - _ => {} - } - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - state.update(); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - Event::MainEventsCleared => { - // RedrawRequested will only trigger once, unless we manually - // request it. - window.request_redraw(); - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file diff --git a/code/beginner/tutorial9-models/src/lib.rs b/code/beginner/tutorial9-models/src/lib.rs new file mode 100644 index 00000000..714da4dd --- /dev/null +++ b/code/beginner/tutorial9-models/src/lib.rs @@ -0,0 +1,605 @@ +use std::iter; + +use cgmath::prelude::*; +use wgpu::util::DeviceExt; +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::Window, +}; + +mod model; +mod texture; + +use model::{DrawModel, Vertex}; + +#[rustfmt::skip] +pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4 = cgmath::Matrix4::new( + 1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 0.5, 0.0, + 0.0, 0.0, 0.5, 1.0, +); + +const NUM_INSTANCES_PER_ROW: u32 = 10; + +struct Camera { + eye: cgmath::Point3, + target: cgmath::Point3, + up: cgmath::Vector3, + aspect: f32, + fovy: f32, + znear: f32, + zfar: f32, +} + +impl Camera { + fn build_view_projection_matrix(&self) -> cgmath::Matrix4 { + let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up); + let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar); + proj * view + } +} + +#[repr(C)] +#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct CameraUniform { + view_proj: [[f32; 4]; 4], +} + +impl CameraUniform { + fn new() -> Self { + Self { + view_proj: cgmath::Matrix4::identity().into(), + } + } + + fn update_view_proj(&mut self, camera: &Camera) { + self.view_proj = (OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix()).into(); + } +} + +struct CameraController { + speed: f32, + is_up_pressed: bool, + is_down_pressed: bool, + is_forward_pressed: bool, + is_backward_pressed: bool, + is_left_pressed: bool, + is_right_pressed: bool, +} + +impl CameraController { + fn new(speed: f32) -> Self { + Self { + speed, + is_up_pressed: false, + is_down_pressed: false, + is_forward_pressed: false, + is_backward_pressed: false, + is_left_pressed: false, + is_right_pressed: false, + } + } + + fn process_events(&mut self, event: &WindowEvent) -> bool { + match event { + WindowEvent::KeyboardInput { + input: + KeyboardInput { + state, + virtual_keycode: Some(keycode), + .. + }, + .. + } => { + let is_pressed = *state == ElementState::Pressed; + match keycode { + VirtualKeyCode::Space => { + self.is_up_pressed = is_pressed; + true + } + VirtualKeyCode::LShift => { + self.is_down_pressed = is_pressed; + true + } + VirtualKeyCode::W | VirtualKeyCode::Up => { + self.is_forward_pressed = is_pressed; + true + } + VirtualKeyCode::A | VirtualKeyCode::Left => { + self.is_left_pressed = is_pressed; + true + } + VirtualKeyCode::S | VirtualKeyCode::Down => { + self.is_backward_pressed = is_pressed; + true + } + VirtualKeyCode::D | VirtualKeyCode::Right => { + self.is_right_pressed = is_pressed; + true + } + _ => false, + } + } + _ => false, + } + } + + fn update_camera(&self, camera: &mut Camera) { + let forward = camera.target - camera.eye; + let forward_norm = forward.normalize(); + let forward_mag = forward.magnitude(); + + // Prevents glitching when camera gets too close to the + // center of the scene. + if self.is_forward_pressed && forward_mag > self.speed { + camera.eye += forward_norm * self.speed; + } + if self.is_backward_pressed { + camera.eye -= forward_norm * self.speed; + } + + let right = forward_norm.cross(camera.up); + + // Redo radius calc in case the up/ down is pressed. + let forward = camera.target - camera.eye; + let forward_mag = forward.magnitude(); + + if self.is_right_pressed { + // Rescale the distance between the target and eye so + // that it doesn't change. The eye therefore still + // lies on the circle made by the target and eye. + camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag; + } + if self.is_left_pressed { + camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag; + } + } +} + +struct Instance { + position: cgmath::Vector3, + rotation: cgmath::Quaternion, +} + +impl Instance { + fn to_raw(&self) -> InstanceRaw { + InstanceRaw { + model: (cgmath::Matrix4::from_translation(self.position) + * cgmath::Matrix4::from(self.rotation)) + .into(), + } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct InstanceRaw { + #[allow(dead_code)] + model: [[f32; 4]; 4], +} + +impl InstanceRaw { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + // We need to switch from using a step mode of Vertex to Instance + // This means that our shaders will only change to use the next + // instance when the shader starts processing a new instance + step_mode: wgpu::VertexStepMode::Instance, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll + // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later + shader_location: 5, + format: wgpu::VertexFormat::Float32x4, + }, + // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot + // for each vec4. We don't have to do this in code though. + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, + shader_location: 6, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, + shader_location: 7, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, + shader_location: 8, + format: wgpu::VertexFormat::Float32x4, + }, + ], + } + } +} + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + size: winit::dpi::PhysicalSize, + render_pipeline: wgpu::RenderPipeline, + obj_model: model::Model, + camera: Camera, + camera_controller: CameraController, + camera_uniform: CameraUniform, + camera_buffer: wgpu::Buffer, + camera_bind_group: wgpu::BindGroup, + instances: Vec, + #[allow(dead_code)] + instance_buffer: wgpu::Buffer, + depth_texture: texture::Texture, +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + // Some(&std::path::Path::new("trace")), // Trace path + None, // Trace path + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + + surface.configure(&device, &config); + + let texture_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + view_dimension: wgpu::TextureViewDimension::D2, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("texture_bind_group_layout"), + }); + + let camera = Camera { + eye: (0.0, 5.0, -10.0).into(), + target: (0.0, 0.0, 0.0).into(), + up: cgmath::Vector3::unit_y(), + aspect: config.width as f32 / config.height as f32, + fovy: 45.0, + znear: 0.1, + zfar: 100.0, + }; + let camera_controller = CameraController::new(0.2); + + let mut camera_uniform = CameraUniform::new(); + camera_uniform.update_view_proj(&camera); + + let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Camera Buffer"), + contents: bytemuck::cast_slice(&[camera_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + const SPACE_BETWEEN: f32 = 3.0; + let instances = (0..NUM_INSTANCES_PER_ROW) + .flat_map(|z| { + (0..NUM_INSTANCES_PER_ROW).map(move |x| { + let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); + let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); + + let position = cgmath::Vector3 { x, y: 0.0, z }; + + let rotation = if position.is_zero() { + cgmath::Quaternion::from_axis_angle( + cgmath::Vector3::unit_z(), + cgmath::Deg(0.0), + ) + } else { + cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) + }; + + Instance { position, rotation } + }) + }) + .collect::>(); + + let instance_data = instances.iter().map(Instance::to_raw).collect::>(); + let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Instance Buffer"), + contents: bytemuck::cast_slice(&instance_data), + usage: wgpu::BufferUsages::VERTEX, + }); + + let camera_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: Some("camera_bind_group_layout"), + }); + + let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &camera_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: camera_buffer.as_entire_binding(), + }], + label: Some("camera_bind_group"), + }); + + let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); + let obj_model = model::Model::load( + &device, + &queue, + &texture_bind_group_layout, + res_dir.join("cube.obj"), + ) + .unwrap(); + + let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor { + label: Some("shader.wgsl"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }); + + let depth_texture = + texture::Texture::create_depth_texture(&device, &config, "depth_texture"); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[&texture_bind_group_layout, &camera_bind_group_layout], + push_constant_ranges: &[], + }); + + let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Render Pipeline"), + layout: Some(&render_pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: &[model::ModelVertex::desc(), InstanceRaw::desc()], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: config.format, + blend: Some(wgpu::BlendState { + color: wgpu::BlendComponent::REPLACE, + alpha: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: Some(wgpu::DepthStencilState { + format: texture::Texture::DEPTH_FORMAT, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::Less, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }), + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }); + + Self { + surface, + device, + queue, + config, + size, + render_pipeline, + obj_model, + camera, + camera_controller, + camera_buffer, + camera_bind_group, + camera_uniform, + instances, + instance_buffer, + depth_texture, + } + } + + fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.camera.aspect = self.config.width as f32 / self.config.height as f32; + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + self.depth_texture = + texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); + } + } + fn input(&mut self, event: &WindowEvent) -> bool { + self.camera_controller.process_events(event) + } + + fn update(&mut self) { + self.camera_controller.update_camera(&mut self.camera); + self.camera_uniform.update_view_proj(&self.camera); + self.queue.write_buffer( + &self.camera_buffer, + 0, + bytemuck::cast_slice(&[self.camera_uniform]), + ); + } + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { + view: &self.depth_texture.view, + depth_ops: Some(wgpu::Operations { + load: wgpu::LoadOp::Clear(1.0), + store: true, + }), + stencil_ops: None, + }), + }); + + render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); + render_pass.set_pipeline(&self.render_pipeline); + render_pass.draw_model_instanced( + &self.obj_model, + 0..self.instances.len() as u32, + &self.camera_bind_group, + ); + } + + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let title = env!("CARGO_PKG_NAME"); + let window = winit::window::WindowBuilder::new() + .with_title(title) + .build(&event_loop) + .unwrap(); + + // State::new uses async code, so we're going to wait for it to finish + let mut state = pollster::block_on(State::new(&window)); + + event_loop.run(move |event, _, control_flow| { + *control_flow = ControlFlow::Poll; + match event { + Event::MainEventsCleared => window.request_redraw(), + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + if !state.input(event) { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + state.resize(**new_inner_size); + } + _ => {} + } + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + state.update(); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + _ => {} + } + }); +} diff --git a/code/beginner/tutorial9-models/src/main.rs b/code/beginner/tutorial9-models/src/main.rs index bb03f79f..cb7413ff 100644 --- a/code/beginner/tutorial9-models/src/main.rs +++ b/code/beginner/tutorial9-models/src/main.rs @@ -1,593 +1,5 @@ -use std::iter; - -use cgmath::prelude::*; -use wgpu::util::DeviceExt; -use winit::{ - event::*, - event_loop::{ControlFlow, EventLoop}, - window::Window, -}; - -mod model; -mod texture; - -use model::{DrawModel, Vertex}; - -#[rustfmt::skip] -pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4 = cgmath::Matrix4::new( - 1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 0.5, 0.0, - 0.0, 0.0, 0.5, 1.0, -); - -const NUM_INSTANCES_PER_ROW: u32 = 10; - -struct Camera { - eye: cgmath::Point3, - target: cgmath::Point3, - up: cgmath::Vector3, - aspect: f32, - fovy: f32, - znear: f32, - zfar: f32, -} - -impl Camera { - fn build_view_projection_matrix(&self) -> cgmath::Matrix4 { - let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up); - let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar); - proj * view - } -} - -#[repr(C)] -#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -struct CameraUniform { - view_proj: [[f32; 4]; 4], -} - -impl CameraUniform { - fn new() -> Self { - Self { - view_proj: cgmath::Matrix4::identity().into(), - } - } - - fn update_view_proj(&mut self, camera: &Camera) { - self.view_proj = (OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix()).into(); - } -} - -struct CameraController { - speed: f32, - is_forward_pressed: bool, - is_backward_pressed: bool, - is_left_pressed: bool, - is_right_pressed: bool, -} - -impl CameraController { - fn new(speed: f32) -> Self { - Self { - speed, - is_forward_pressed: false, - is_backward_pressed: false, - is_left_pressed: false, - is_right_pressed: false, - } - } - - fn process_events(&mut self, event: &WindowEvent) -> bool { - match event { - WindowEvent::KeyboardInput { - input: - KeyboardInput { - state, - virtual_keycode: Some(keycode), - .. - }, - .. - } => { - let is_pressed = *state == ElementState::Pressed; - match keycode { - VirtualKeyCode::W | VirtualKeyCode::Up => { - self.is_forward_pressed = is_pressed; - true - } - VirtualKeyCode::A | VirtualKeyCode::Left => { - self.is_left_pressed = is_pressed; - true - } - VirtualKeyCode::S | VirtualKeyCode::Down => { - self.is_backward_pressed = is_pressed; - true - } - VirtualKeyCode::D | VirtualKeyCode::Right => { - self.is_right_pressed = is_pressed; - true - } - _ => false, - } - } - _ => false, - } - } - - fn update_camera(&self, camera: &mut Camera) { - let forward = camera.target - camera.eye; - let forward_norm = forward.normalize(); - let forward_mag = forward.magnitude(); - - // Prevents glitching when camera gets too close to the - // center of the scene. - if self.is_forward_pressed && forward_mag > self.speed { - camera.eye += forward_norm * self.speed; - } - if self.is_backward_pressed { - camera.eye -= forward_norm * self.speed; - } - - let right = forward_norm.cross(camera.up); - - // Redo radius calc in case the up/ down is pressed. - let forward = camera.target - camera.eye; - let forward_mag = forward.magnitude(); - - if self.is_right_pressed { - // Rescale the distance between the target and eye so - // that it doesn't change. The eye therefore still - // lies on the circle made by the target and eye. - camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag; - } - if self.is_left_pressed { - camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag; - } - } -} - -struct Instance { - position: cgmath::Vector3, - rotation: cgmath::Quaternion, -} - -impl Instance { - fn to_raw(&self) -> InstanceRaw { - InstanceRaw { - model: (cgmath::Matrix4::from_translation(self.position) - * cgmath::Matrix4::from(self.rotation)) - .into(), - } - } -} - -#[repr(C)] -#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -struct InstanceRaw { - #[allow(dead_code)] - model: [[f32; 4]; 4], -} - -impl InstanceRaw { - fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { - use std::mem; - wgpu::VertexBufferLayout { - array_stride: mem::size_of::() as wgpu::BufferAddress, - // We need to switch from using a step mode of Vertex to Instance - // This means that our shaders will only change to use the next - // instance when the shader starts processing a new instance - step_mode: wgpu::VertexStepMode::Instance, - attributes: &[ - wgpu::VertexAttribute { - offset: 0, - // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll - // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later - shader_location: 5, - format: wgpu::VertexFormat::Float32x4, - }, - // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot - // for each vec4. We don't have to do this in code though. - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, - shader_location: 6, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, - shader_location: 7, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, - shader_location: 8, - format: wgpu::VertexFormat::Float32x4, - }, - ], - } - } -} - -struct State { - surface: wgpu::Surface, - device: wgpu::Device, - queue: wgpu::Queue, - config: wgpu::SurfaceConfiguration, - size: winit::dpi::PhysicalSize, - render_pipeline: wgpu::RenderPipeline, - obj_model: model::Model, - camera: Camera, - camera_controller: CameraController, - camera_uniform: CameraUniform, - camera_buffer: wgpu::Buffer, - camera_bind_group: wgpu::BindGroup, - instances: Vec, - #[allow(dead_code)] - instance_buffer: wgpu::Buffer, - depth_texture: texture::Texture, -} - -impl State { - async fn new(window: &Window) -> Self { - let size = window.inner_size(); - - // The instance is a handle to our GPU - // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU - let instance = wgpu::Instance::new(wgpu::Backends::all()); - let surface = unsafe { instance.create_surface(window) }; - let adapter = instance - .request_adapter(&wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::default(), - compatible_surface: Some(&surface), - force_fallback_adapter: false, - }) - .await - .unwrap(); - let (device, queue) = adapter - .request_device( - &wgpu::DeviceDescriptor { - label: None, - features: wgpu::Features::empty(), - limits: wgpu::Limits::default(), - }, - // Some(&std::path::Path::new("trace")), // Trace path - None, // Trace path - ) - .await - .unwrap(); - - let config = wgpu::SurfaceConfiguration { - usage: wgpu::TextureUsages::RENDER_ATTACHMENT, - format: surface.get_preferred_format(&adapter).unwrap(), - width: size.width, - height: size.height, - present_mode: wgpu::PresentMode::Fifo, - }; - - surface.configure(&device, &config); - - let texture_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[ - wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Texture { - multisampled: false, - view_dimension: wgpu::TextureViewDimension::D2, - sample_type: wgpu::TextureSampleType::Float { filterable: true }, - }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 1, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), - count: None, - }, - ], - label: Some("texture_bind_group_layout"), - }); - - let camera = Camera { - eye: (0.0, 5.0, -10.0).into(), - target: (0.0, 0.0, 0.0).into(), - up: cgmath::Vector3::unit_y(), - aspect: config.width as f32 / config.height as f32, - fovy: 45.0, - znear: 0.1, - zfar: 100.0, - }; - let camera_controller = CameraController::new(0.2); - - let mut camera_uniform = CameraUniform::new(); - camera_uniform.update_view_proj(&camera); - - let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Camera Buffer"), - contents: bytemuck::cast_slice(&[camera_uniform]), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - - const SPACE_BETWEEN: f32 = 3.0; - let instances = (0..NUM_INSTANCES_PER_ROW) - .flat_map(|z| { - (0..NUM_INSTANCES_PER_ROW).map(move |x| { - let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); - let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); - - let position = cgmath::Vector3 { x, y: 0.0, z }; - - let rotation = if position.is_zero() { - cgmath::Quaternion::from_axis_angle( - cgmath::Vector3::unit_z(), - cgmath::Deg(0.0), - ) - } else { - cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) - }; - - Instance { position, rotation } - }) - }) - .collect::>(); - - let instance_data = instances.iter().map(Instance::to_raw).collect::>(); - let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Instance Buffer"), - contents: bytemuck::cast_slice(&instance_data), - usage: wgpu::BufferUsages::VERTEX, - }); - - let camera_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }], - label: Some("camera_bind_group_layout"), - }); - - let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &camera_bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: camera_buffer.as_entire_binding(), - }], - label: Some("camera_bind_group"), - }); - - let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); - let obj_model = model::Model::load( - &device, - &queue, - &texture_bind_group_layout, - res_dir.join("cube.obj"), - ) - .unwrap(); - - let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor { - label: Some("shader.wgsl"), - source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), - }); - - let depth_texture = - texture::Texture::create_depth_texture(&device, &config, "depth_texture"); - - let render_pipeline_layout = - device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: Some("Render Pipeline Layout"), - bind_group_layouts: &[&texture_bind_group_layout, &camera_bind_group_layout], - push_constant_ranges: &[], - }); - - let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some("Render Pipeline"), - layout: Some(&render_pipeline_layout), - vertex: wgpu::VertexState { - module: &shader, - entry_point: "vs_main", - buffers: &[model::ModelVertex::desc(), InstanceRaw::desc()], - }, - fragment: Some(wgpu::FragmentState { - module: &shader, - entry_point: "fs_main", - targets: &[wgpu::ColorTargetState { - format: config.format, - blend: Some(wgpu::BlendState { - color: wgpu::BlendComponent::REPLACE, - alpha: wgpu::BlendComponent::REPLACE, - }), - write_mask: wgpu::ColorWrites::ALL, - }], - }), - primitive: wgpu::PrimitiveState { - topology: wgpu::PrimitiveTopology::TriangleList, - strip_index_format: None, - front_face: wgpu::FrontFace::Ccw, - cull_mode: Some(wgpu::Face::Back), - // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE - polygon_mode: wgpu::PolygonMode::Fill, - // Requires Features::DEPTH_CLIP_CONTROL - unclipped_depth: false, - // Requires Features::CONSERVATIVE_RASTERIZATION - conservative: false, - }, - depth_stencil: Some(wgpu::DepthStencilState { - format: texture::Texture::DEPTH_FORMAT, - depth_write_enabled: true, - depth_compare: wgpu::CompareFunction::Less, - stencil: wgpu::StencilState::default(), - bias: wgpu::DepthBiasState::default(), - }), - multisample: wgpu::MultisampleState { - count: 1, - mask: !0, - alpha_to_coverage_enabled: false, - }, - // If the pipeline will be used with a multiview render pass, this - // indicates how many array layers the attachments will have. - multiview: None, - }); - - Self { - surface, - device, - queue, - config, - size, - render_pipeline, - obj_model, - camera, - camera_controller, - camera_buffer, - camera_bind_group, - camera_uniform, - instances, - instance_buffer, - depth_texture, - } - } - - fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { - if new_size.width > 0 && new_size.height > 0 { - self.camera.aspect = self.config.width as f32 / self.config.height as f32; - self.size = new_size; - self.config.width = new_size.width; - self.config.height = new_size.height; - self.surface.configure(&self.device, &self.config); - self.depth_texture = - texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); - } - } - fn input(&mut self, event: &WindowEvent) -> bool { - self.camera_controller.process_events(event) - } - - fn update(&mut self) { - self.camera_controller.update_camera(&mut self.camera); - self.camera_uniform.update_view_proj(&self.camera); - self.queue.write_buffer( - &self.camera_buffer, - 0, - bytemuck::cast_slice(&[self.camera_uniform]), - ); - } - - fn render(&mut self) -> Result<(), wgpu::SurfaceError> { - let output = self.surface.get_current_texture()?; - let view = output - .texture - .create_view(&wgpu::TextureViewDescriptor::default()); - - let mut encoder = self - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor { - label: Some("Render Encoder"), - }); - - { - let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("Render Pass"), - color_attachments: &[wgpu::RenderPassColorAttachment { - view: &view, - resolve_target: None, - ops: wgpu::Operations { - load: wgpu::LoadOp::Clear(wgpu::Color { - r: 0.1, - g: 0.2, - b: 0.3, - a: 1.0, - }), - store: true, - }, - }], - depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { - view: &self.depth_texture.view, - depth_ops: Some(wgpu::Operations { - load: wgpu::LoadOp::Clear(1.0), - store: true, - }), - stencil_ops: None, - }), - }); - - render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); - render_pass.set_pipeline(&self.render_pipeline); - render_pass.draw_model_instanced( - &self.obj_model, - 0..self.instances.len() as u32, - &self.camera_bind_group, - ); - } - - self.queue.submit(iter::once(encoder.finish())); - output.present(); - - Ok(()) - } -} +use tutorial9_models::run; fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let title = env!("CARGO_PKG_NAME"); - let window = winit::window::WindowBuilder::new() - .with_title(title) - .build(&event_loop) - .unwrap(); - - // State::new uses async code, so we're going to wait for it to finish - let mut state = pollster::block_on(State::new(&window)); - - event_loop.run(move |event, _, control_flow| { - *control_flow = ControlFlow::Poll; - match event { - Event::MainEventsCleared => window.request_redraw(), - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - if !state.input(event) { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - state.resize(**new_inner_size); - } - _ => {} - } - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - state.update(); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file diff --git a/code/intermediate/tutorial10-lighting/src/lib.rs b/code/intermediate/tutorial10-lighting/src/lib.rs new file mode 100644 index 00000000..53db756f --- /dev/null +++ b/code/intermediate/tutorial10-lighting/src/lib.rs @@ -0,0 +1,744 @@ +use std::iter; + +use cgmath::prelude::*; +use wgpu::util::DeviceExt; +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::Window, +}; + +mod model; +mod texture; + +use model::{DrawLight, DrawModel, Vertex}; + +#[rustfmt::skip] +pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4 = cgmath::Matrix4::new( + 1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 0.5, 0.0, + 0.0, 0.0, 0.5, 1.0, +); + +const NUM_INSTANCES_PER_ROW: u32 = 10; + +struct Camera { + eye: cgmath::Point3, + target: cgmath::Point3, + up: cgmath::Vector3, + aspect: f32, + fovy: f32, + znear: f32, + zfar: f32, +} + +impl Camera { + fn build_view_projection_matrix(&self) -> cgmath::Matrix4 { + let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up); + let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar); + proj * view + } +} + +#[repr(C)] +#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct CameraUniform { + view_position: [f32; 4], + view_proj: [[f32; 4]; 4], +} + +impl CameraUniform { + fn new() -> Self { + Self { + view_position: [0.0; 4], + view_proj: cgmath::Matrix4::identity().into(), + } + } + + fn update_view_proj(&mut self, camera: &Camera) { + // We're using Vector4 because ofthe camera_uniform 16 byte spacing requirement + self.view_position = camera.eye.to_homogeneous().into(); + self.view_proj = camera.build_view_projection_matrix().into(); + } +} + +struct CameraController { + speed: f32, + is_up_pressed: bool, + is_down_pressed: bool, + is_forward_pressed: bool, + is_backward_pressed: bool, + is_left_pressed: bool, + is_right_pressed: bool, +} + +impl CameraController { + fn new(speed: f32) -> Self { + Self { + speed, + is_up_pressed: false, + is_down_pressed: false, + is_forward_pressed: false, + is_backward_pressed: false, + is_left_pressed: false, + is_right_pressed: false, + } + } + + fn process_events(&mut self, event: &WindowEvent) -> bool { + match event { + WindowEvent::KeyboardInput { + input: + KeyboardInput { + state, + virtual_keycode: Some(keycode), + .. + }, + .. + } => { + let is_pressed = *state == ElementState::Pressed; + match keycode { + VirtualKeyCode::Space => { + self.is_up_pressed = is_pressed; + true + } + VirtualKeyCode::LShift => { + self.is_down_pressed = is_pressed; + true + } + VirtualKeyCode::W | VirtualKeyCode::Up => { + self.is_forward_pressed = is_pressed; + true + } + VirtualKeyCode::A | VirtualKeyCode::Left => { + self.is_left_pressed = is_pressed; + true + } + VirtualKeyCode::S | VirtualKeyCode::Down => { + self.is_backward_pressed = is_pressed; + true + } + VirtualKeyCode::D | VirtualKeyCode::Right => { + self.is_right_pressed = is_pressed; + true + } + _ => false, + } + } + _ => false, + } + } + + fn update_camera(&self, camera: &mut Camera) { + let forward = camera.target - camera.eye; + let forward_norm = forward.normalize(); + let forward_mag = forward.magnitude(); + + // Prevents glitching when camera gets too close to the + // center of the scene. + if self.is_forward_pressed && forward_mag > self.speed { + camera.eye += forward_norm * self.speed; + } + if self.is_backward_pressed { + camera.eye -= forward_norm * self.speed; + } + + let right = forward_norm.cross(camera.up); + + // Redo radius calc in case the up/ down is pressed. + let forward = camera.target - camera.eye; + let forward_mag = forward.magnitude(); + + if self.is_right_pressed { + // Rescale the distance between the target and eye so + // that it doesn't change. The eye therefore still + // lies on the circle made by the target and eye. + camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag; + } + if self.is_left_pressed { + camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag; + } + } +} + +struct Instance { + position: cgmath::Vector3, + rotation: cgmath::Quaternion, +} + +impl Instance { + fn to_raw(&self) -> InstanceRaw { + let model = + cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation); + InstanceRaw { + model: model.into(), + normal: cgmath::Matrix3::from(self.rotation).into(), + } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +#[allow(dead_code)] +struct InstanceRaw { + model: [[f32; 4]; 4], + normal: [[f32; 3]; 3], +} + +impl model::Vertex for InstanceRaw { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + // We need to switch from using a step mode of Vertex to Instance + // This means that our shaders will only change to use the next + // instance when the shader starts processing a new instance + step_mode: wgpu::VertexStepMode::Instance, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll + // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later + shader_location: 5, + format: wgpu::VertexFormat::Float32x4, + }, + // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot + // for each vec4. We don't have to do this in code though. + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, + shader_location: 6, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, + shader_location: 7, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, + shader_location: 8, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress, + shader_location: 9, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress, + shader_location: 10, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress, + shader_location: 11, + format: wgpu::VertexFormat::Float32x3, + }, + ], + } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct LightUniform { + position: [f32; 3], + // Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here + _padding: u32, + color: [f32; 3], + // Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here + _padding2: u32, +} + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + render_pipeline: wgpu::RenderPipeline, + obj_model: model::Model, + camera: Camera, + camera_controller: CameraController, + camera_uniform: CameraUniform, + camera_buffer: wgpu::Buffer, + camera_bind_group: wgpu::BindGroup, + instances: Vec, + #[allow(dead_code)] + instance_buffer: wgpu::Buffer, + depth_texture: texture::Texture, + size: winit::dpi::PhysicalSize, + light_uniform: LightUniform, + light_buffer: wgpu::Buffer, + light_bind_group: wgpu::BindGroup, + light_render_pipeline: wgpu::RenderPipeline, +} + +fn create_render_pipeline( + device: &wgpu::Device, + layout: &wgpu::PipelineLayout, + color_format: wgpu::TextureFormat, + depth_format: Option, + vertex_layouts: &[wgpu::VertexBufferLayout], + shader: wgpu::ShaderModuleDescriptor, +) -> wgpu::RenderPipeline { + let shader = device.create_shader_module(&shader); + + device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Render Pipeline"), + layout: Some(layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: vertex_layouts, + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: color_format, + blend: Some(wgpu::BlendState { + alpha: wgpu::BlendComponent::REPLACE, + color: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: depth_format.map(|format| wgpu::DepthStencilState { + format, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::Less, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }), + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }) +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + None, + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + + surface.configure(&device, &config); + + let texture_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + view_dimension: wgpu::TextureViewDimension::D2, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("texture_bind_group_layout"), + }); + + let camera = Camera { + eye: (0.0, 5.0, -10.0).into(), + target: (0.0, 0.0, 0.0).into(), + up: cgmath::Vector3::unit_y(), + aspect: config.width as f32 / config.height as f32, + fovy: 45.0, + znear: 0.1, + zfar: 100.0, + }; + + let camera_controller = CameraController::new(0.2); + + let mut camera_uniform = CameraUniform::new(); + camera_uniform.update_view_proj(&camera); + + let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Camera Buffer"), + contents: bytemuck::cast_slice(&[camera_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + const SPACE_BETWEEN: f32 = 3.0; + let instances = (0..NUM_INSTANCES_PER_ROW) + .flat_map(|z| { + (0..NUM_INSTANCES_PER_ROW).map(move |x| { + let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); + let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); + + let position = cgmath::Vector3 { x, y: 0.0, z }; + + let rotation = if position.is_zero() { + cgmath::Quaternion::from_axis_angle( + cgmath::Vector3::unit_z(), + cgmath::Deg(0.0), + ) + } else { + cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) + }; + + Instance { position, rotation } + }) + }) + .collect::>(); + + let instance_data = instances.iter().map(Instance::to_raw).collect::>(); + let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Instance Buffer"), + contents: bytemuck::cast_slice(&instance_data), + usage: wgpu::BufferUsages::VERTEX, + }); + + let camera_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: Some("camera_bind_group_layout"), + }); + + let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &camera_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: camera_buffer.as_entire_binding(), + }], + label: Some("camera_bind_group"), + }); + + let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); + let obj_model = model::Model::load( + &device, + &queue, + &texture_bind_group_layout, + res_dir.join("cube.obj"), + ) + .unwrap(); + + let light_uniform = LightUniform { + position: [2.0, 2.0, 2.0], + _padding: 0, + color: [1.0, 1.0, 1.0], + _padding2: 0, + }; + + let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Light VB"), + contents: bytemuck::cast_slice(&[light_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + let light_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: None, + }); + + let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &light_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: light_buffer.as_entire_binding(), + }], + label: None, + }); + + let depth_texture = + texture::Texture::create_depth_texture(&device, &config, "depth_texture"); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[ + &texture_bind_group_layout, + &camera_bind_group_layout, + &light_bind_group_layout, + ], + push_constant_ranges: &[], + }); + + let render_pipeline = { + let shader = wgpu::ShaderModuleDescriptor { + label: Some("Normal Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }; + create_render_pipeline( + &device, + &render_pipeline_layout, + config.format, + Some(texture::Texture::DEPTH_FORMAT), + &[model::ModelVertex::desc(), InstanceRaw::desc()], + shader, + ) + }; + + let light_render_pipeline = { + let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Light Pipeline Layout"), + bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout], + push_constant_ranges: &[], + }); + let shader = wgpu::ShaderModuleDescriptor { + label: Some("Light Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()), + }; + create_render_pipeline( + &device, + &layout, + config.format, + Some(texture::Texture::DEPTH_FORMAT), + &[model::ModelVertex::desc()], + shader, + ) + }; + + Self { + surface, + device, + queue, + config, + render_pipeline, + obj_model, + camera, + camera_controller, + camera_buffer, + camera_bind_group, + camera_uniform, + instances, + instance_buffer, + depth_texture, + size, + light_uniform, + light_buffer, + light_bind_group, + light_render_pipeline, + } + } + + fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.camera.aspect = self.config.width as f32 / self.config.height as f32; + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + self.depth_texture = + texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); + } + } + + fn input(&mut self, event: &WindowEvent) -> bool { + self.camera_controller.process_events(event) + } + + fn update(&mut self) { + self.camera_controller.update_camera(&mut self.camera); + self.camera_uniform.update_view_proj(&self.camera); + self.queue.write_buffer( + &self.camera_buffer, + 0, + bytemuck::cast_slice(&[self.camera_uniform]), + ); + + // Update the light + let old_position: cgmath::Vector3<_> = self.light_uniform.position.into(); + self.light_uniform.position = + (cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) + * old_position) + .into(); + self.queue.write_buffer( + &self.light_buffer, + 0, + bytemuck::cast_slice(&[self.light_uniform]), + ); + } + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { + view: &self.depth_texture.view, + depth_ops: Some(wgpu::Operations { + load: wgpu::LoadOp::Clear(1.0), + store: true, + }), + stencil_ops: None, + }), + }); + + render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); + render_pass.set_pipeline(&self.light_render_pipeline); + render_pass.draw_light_model( + &self.obj_model, + &self.camera_bind_group, + &self.light_bind_group, + ); + render_pass.set_pipeline(&self.render_pipeline); + render_pass.draw_model_instanced( + &self.obj_model, + 0..self.instances.len() as u32, + &self.camera_bind_group, + &self.light_bind_group, + ); + } + + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let title = env!("CARGO_PKG_NAME"); + let window = winit::window::WindowBuilder::new() + .with_title(title) + .build(&event_loop) + .unwrap(); + let mut state = pollster::block_on(State::new(&window)); + event_loop.run(move |event, _, control_flow| { + *control_flow = ControlFlow::Poll; + match event { + Event::MainEventsCleared => window.request_redraw(), + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + if !state.input(event) { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + state.resize(**new_inner_size); + } + _ => {} + } + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + state.update(); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + _ => {} + } + }); +} diff --git a/code/intermediate/tutorial10-lighting/src/main.rs b/code/intermediate/tutorial10-lighting/src/main.rs index 59a1682a..9b432b90 100644 --- a/code/intermediate/tutorial10-lighting/src/main.rs +++ b/code/intermediate/tutorial10-lighting/src/main.rs @@ -1,732 +1,5 @@ -use std::iter; - -use cgmath::prelude::*; -use wgpu::util::DeviceExt; -use winit::{ - event::*, - event_loop::{ControlFlow, EventLoop}, - window::Window, -}; - -mod model; -mod texture; - -use model::{DrawLight, DrawModel, Vertex}; - -#[rustfmt::skip] -pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4 = cgmath::Matrix4::new( - 1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 0.5, 0.0, - 0.0, 0.0, 0.5, 1.0, -); - -const NUM_INSTANCES_PER_ROW: u32 = 10; - -struct Camera { - eye: cgmath::Point3, - target: cgmath::Point3, - up: cgmath::Vector3, - aspect: f32, - fovy: f32, - znear: f32, - zfar: f32, -} - -impl Camera { - fn build_view_projection_matrix(&self) -> cgmath::Matrix4 { - let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up); - let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar); - proj * view - } -} - -#[repr(C)] -#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -struct CameraUniform { - view_position: [f32; 4], - view_proj: [[f32; 4]; 4], -} - -impl CameraUniform { - fn new() -> Self { - Self { - view_position: [0.0; 4], - view_proj: cgmath::Matrix4::identity().into(), - } - } - - fn update_view_proj(&mut self, camera: &Camera) { - // We're using Vector4 because ofthe camera_uniform 16 byte spacing requirement - self.view_position = camera.eye.to_homogeneous().into(); - self.view_proj = camera.build_view_projection_matrix().into(); - } -} - -struct CameraController { - speed: f32, - is_forward_pressed: bool, - is_backward_pressed: bool, - is_left_pressed: bool, - is_right_pressed: bool, -} - -impl CameraController { - fn new(speed: f32) -> Self { - Self { - speed, - is_forward_pressed: false, - is_backward_pressed: false, - is_left_pressed: false, - is_right_pressed: false, - } - } - - fn process_events(&mut self, event: &WindowEvent) -> bool { - match event { - WindowEvent::KeyboardInput { - input: - KeyboardInput { - state, - virtual_keycode: Some(keycode), - .. - }, - .. - } => { - let is_pressed = *state == ElementState::Pressed; - match keycode { - VirtualKeyCode::W | VirtualKeyCode::Up => { - self.is_forward_pressed = is_pressed; - true - } - VirtualKeyCode::A | VirtualKeyCode::Left => { - self.is_left_pressed = is_pressed; - true - } - VirtualKeyCode::S | VirtualKeyCode::Down => { - self.is_backward_pressed = is_pressed; - true - } - VirtualKeyCode::D | VirtualKeyCode::Right => { - self.is_right_pressed = is_pressed; - true - } - _ => false, - } - } - _ => false, - } - } - - fn update_camera(&self, camera: &mut Camera) { - let forward = camera.target - camera.eye; - let forward_norm = forward.normalize(); - let forward_mag = forward.magnitude(); - - // Prevents glitching when camera gets too close to the - // center of the scene. - if self.is_forward_pressed && forward_mag > self.speed { - camera.eye += forward_norm * self.speed; - } - if self.is_backward_pressed { - camera.eye -= forward_norm * self.speed; - } - - let right = forward_norm.cross(camera.up); - - // Redo radius calc in case the up/ down is pressed. - let forward = camera.target - camera.eye; - let forward_mag = forward.magnitude(); - - if self.is_right_pressed { - // Rescale the distance between the target and eye so - // that it doesn't change. The eye therefore still - // lies on the circle made by the target and eye. - camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag; - } - if self.is_left_pressed { - camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag; - } - } -} - -struct Instance { - position: cgmath::Vector3, - rotation: cgmath::Quaternion, -} - -impl Instance { - fn to_raw(&self) -> InstanceRaw { - let model = - cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation); - InstanceRaw { - model: model.into(), - normal: cgmath::Matrix3::from(self.rotation).into(), - } - } -} - -#[repr(C)] -#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -#[allow(dead_code)] -struct InstanceRaw { - model: [[f32; 4]; 4], - normal: [[f32; 3]; 3], -} - -impl model::Vertex for InstanceRaw { - fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { - use std::mem; - wgpu::VertexBufferLayout { - array_stride: mem::size_of::() as wgpu::BufferAddress, - // We need to switch from using a step mode of Vertex to Instance - // This means that our shaders will only change to use the next - // instance when the shader starts processing a new instance - step_mode: wgpu::VertexStepMode::Instance, - attributes: &[ - wgpu::VertexAttribute { - offset: 0, - // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll - // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later - shader_location: 5, - format: wgpu::VertexFormat::Float32x4, - }, - // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot - // for each vec4. We don't have to do this in code though. - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, - shader_location: 6, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, - shader_location: 7, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, - shader_location: 8, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress, - shader_location: 9, - format: wgpu::VertexFormat::Float32x3, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress, - shader_location: 10, - format: wgpu::VertexFormat::Float32x3, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress, - shader_location: 11, - format: wgpu::VertexFormat::Float32x3, - }, - ], - } - } -} - -#[repr(C)] -#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -struct LightUniform { - position: [f32; 3], - // Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here - _padding: u32, - color: [f32; 3], - // Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here - _padding2: u32, -} - -struct State { - surface: wgpu::Surface, - device: wgpu::Device, - queue: wgpu::Queue, - config: wgpu::SurfaceConfiguration, - render_pipeline: wgpu::RenderPipeline, - obj_model: model::Model, - camera: Camera, - camera_controller: CameraController, - camera_uniform: CameraUniform, - camera_buffer: wgpu::Buffer, - camera_bind_group: wgpu::BindGroup, - instances: Vec, - #[allow(dead_code)] - instance_buffer: wgpu::Buffer, - depth_texture: texture::Texture, - size: winit::dpi::PhysicalSize, - light_uniform: LightUniform, - light_buffer: wgpu::Buffer, - light_bind_group: wgpu::BindGroup, - light_render_pipeline: wgpu::RenderPipeline, -} - -fn create_render_pipeline( - device: &wgpu::Device, - layout: &wgpu::PipelineLayout, - color_format: wgpu::TextureFormat, - depth_format: Option, - vertex_layouts: &[wgpu::VertexBufferLayout], - shader: wgpu::ShaderModuleDescriptor, -) -> wgpu::RenderPipeline { - let shader = device.create_shader_module(&shader); - - device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some("Render Pipeline"), - layout: Some(layout), - vertex: wgpu::VertexState { - module: &shader, - entry_point: "vs_main", - buffers: vertex_layouts, - }, - fragment: Some(wgpu::FragmentState { - module: &shader, - entry_point: "fs_main", - targets: &[wgpu::ColorTargetState { - format: color_format, - blend: Some(wgpu::BlendState { - alpha: wgpu::BlendComponent::REPLACE, - color: wgpu::BlendComponent::REPLACE, - }), - write_mask: wgpu::ColorWrites::ALL, - }], - }), - primitive: wgpu::PrimitiveState { - topology: wgpu::PrimitiveTopology::TriangleList, - strip_index_format: None, - front_face: wgpu::FrontFace::Ccw, - cull_mode: Some(wgpu::Face::Back), - // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE - polygon_mode: wgpu::PolygonMode::Fill, - // Requires Features::DEPTH_CLIP_CONTROL - unclipped_depth: false, - // Requires Features::CONSERVATIVE_RASTERIZATION - conservative: false, - }, - depth_stencil: depth_format.map(|format| wgpu::DepthStencilState { - format, - depth_write_enabled: true, - depth_compare: wgpu::CompareFunction::Less, - stencil: wgpu::StencilState::default(), - bias: wgpu::DepthBiasState::default(), - }), - multisample: wgpu::MultisampleState { - count: 1, - mask: !0, - alpha_to_coverage_enabled: false, - }, - // If the pipeline will be used with a multiview render pass, this - // indicates how many array layers the attachments will have. - multiview: None, - }) -} - -impl State { - async fn new(window: &Window) -> Self { - let size = window.inner_size(); - - // The instance is a handle to our GPU - // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU - let instance = wgpu::Instance::new(wgpu::Backends::all()); - let surface = unsafe { instance.create_surface(window) }; - let adapter = instance - .request_adapter(&wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::default(), - compatible_surface: Some(&surface), - force_fallback_adapter: false, - }) - .await - .unwrap(); - let (device, queue) = adapter - .request_device( - &wgpu::DeviceDescriptor { - label: None, - features: wgpu::Features::empty(), - limits: wgpu::Limits::default(), - }, - None, - ) - .await - .unwrap(); - - let config = wgpu::SurfaceConfiguration { - usage: wgpu::TextureUsages::RENDER_ATTACHMENT, - format: surface.get_preferred_format(&adapter).unwrap(), - width: size.width, - height: size.height, - present_mode: wgpu::PresentMode::Fifo, - }; - - surface.configure(&device, &config); - - let texture_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[ - wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Texture { - multisampled: false, - view_dimension: wgpu::TextureViewDimension::D2, - sample_type: wgpu::TextureSampleType::Float { filterable: true }, - }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 1, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), - count: None, - }, - ], - label: Some("texture_bind_group_layout"), - }); - - let camera = Camera { - eye: (0.0, 5.0, -10.0).into(), - target: (0.0, 0.0, 0.0).into(), - up: cgmath::Vector3::unit_y(), - aspect: config.width as f32 / config.height as f32, - fovy: 45.0, - znear: 0.1, - zfar: 100.0, - }; - - let camera_controller = CameraController::new(0.2); - - let mut camera_uniform = CameraUniform::new(); - camera_uniform.update_view_proj(&camera); - - let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Camera Buffer"), - contents: bytemuck::cast_slice(&[camera_uniform]), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - - const SPACE_BETWEEN: f32 = 3.0; - let instances = (0..NUM_INSTANCES_PER_ROW) - .flat_map(|z| { - (0..NUM_INSTANCES_PER_ROW).map(move |x| { - let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); - let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); - - let position = cgmath::Vector3 { x, y: 0.0, z }; - - let rotation = if position.is_zero() { - cgmath::Quaternion::from_axis_angle( - cgmath::Vector3::unit_z(), - cgmath::Deg(0.0), - ) - } else { - cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) - }; - - Instance { position, rotation } - }) - }) - .collect::>(); - - let instance_data = instances.iter().map(Instance::to_raw).collect::>(); - let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Instance Buffer"), - contents: bytemuck::cast_slice(&instance_data), - usage: wgpu::BufferUsages::VERTEX, - }); - - let camera_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }], - label: Some("camera_bind_group_layout"), - }); - - let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &camera_bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: camera_buffer.as_entire_binding(), - }], - label: Some("camera_bind_group"), - }); - - let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); - let obj_model = model::Model::load( - &device, - &queue, - &texture_bind_group_layout, - res_dir.join("cube.obj"), - ) - .unwrap(); - - let light_uniform = LightUniform { - position: [2.0, 2.0, 2.0], - _padding: 0, - color: [1.0, 1.0, 1.0], - _padding2: 0, - }; - - let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Light VB"), - contents: bytemuck::cast_slice(&[light_uniform]), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - - let light_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }], - label: None, - }); - - let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &light_bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: light_buffer.as_entire_binding(), - }], - label: None, - }); - - let depth_texture = - texture::Texture::create_depth_texture(&device, &config, "depth_texture"); - - let render_pipeline_layout = - device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: Some("Render Pipeline Layout"), - bind_group_layouts: &[ - &texture_bind_group_layout, - &camera_bind_group_layout, - &light_bind_group_layout, - ], - push_constant_ranges: &[], - }); - - let render_pipeline = { - let shader = wgpu::ShaderModuleDescriptor { - label: Some("Normal Shader"), - source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), - }; - create_render_pipeline( - &device, - &render_pipeline_layout, - config.format, - Some(texture::Texture::DEPTH_FORMAT), - &[model::ModelVertex::desc(), InstanceRaw::desc()], - shader, - ) - }; - - let light_render_pipeline = { - let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: Some("Light Pipeline Layout"), - bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout], - push_constant_ranges: &[], - }); - let shader = wgpu::ShaderModuleDescriptor { - label: Some("Light Shader"), - source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()), - }; - create_render_pipeline( - &device, - &layout, - config.format, - Some(texture::Texture::DEPTH_FORMAT), - &[model::ModelVertex::desc()], - shader, - ) - }; - - Self { - surface, - device, - queue, - config, - render_pipeline, - obj_model, - camera, - camera_controller, - camera_buffer, - camera_bind_group, - camera_uniform, - instances, - instance_buffer, - depth_texture, - size, - light_uniform, - light_buffer, - light_bind_group, - light_render_pipeline, - } - } - - fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { - if new_size.width > 0 && new_size.height > 0 { - self.camera.aspect = self.config.width as f32 / self.config.height as f32; - self.size = new_size; - self.config.width = new_size.width; - self.config.height = new_size.height; - self.surface.configure(&self.device, &self.config); - self.depth_texture = - texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); - } - } - - fn input(&mut self, event: &WindowEvent) -> bool { - self.camera_controller.process_events(event) - } - - fn update(&mut self) { - self.camera_controller.update_camera(&mut self.camera); - self.camera_uniform.update_view_proj(&self.camera); - self.queue.write_buffer( - &self.camera_buffer, - 0, - bytemuck::cast_slice(&[self.camera_uniform]), - ); - - // Update the light - let old_position: cgmath::Vector3<_> = self.light_uniform.position.into(); - self.light_uniform.position = - (cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) - * old_position) - .into(); - self.queue.write_buffer( - &self.light_buffer, - 0, - bytemuck::cast_slice(&[self.light_uniform]), - ); - } - - fn render(&mut self) -> Result<(), wgpu::SurfaceError> { - let output = self.surface.get_current_texture()?; - let view = output - .texture - .create_view(&wgpu::TextureViewDescriptor::default()); - - let mut encoder = self - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor { - label: Some("Render Encoder"), - }); - - { - let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("Render Pass"), - color_attachments: &[wgpu::RenderPassColorAttachment { - view: &view, - resolve_target: None, - ops: wgpu::Operations { - load: wgpu::LoadOp::Clear(wgpu::Color { - r: 0.1, - g: 0.2, - b: 0.3, - a: 1.0, - }), - store: true, - }, - }], - depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { - view: &self.depth_texture.view, - depth_ops: Some(wgpu::Operations { - load: wgpu::LoadOp::Clear(1.0), - store: true, - }), - stencil_ops: None, - }), - }); - - render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); - render_pass.set_pipeline(&self.light_render_pipeline); - render_pass.draw_light_model( - &self.obj_model, - &self.camera_bind_group, - &self.light_bind_group, - ); - render_pass.set_pipeline(&self.render_pipeline); - render_pass.draw_model_instanced( - &self.obj_model, - 0..self.instances.len() as u32, - &self.camera_bind_group, - &self.light_bind_group, - ); - } - - self.queue.submit(iter::once(encoder.finish())); - output.present(); - - Ok(()) - } -} +use tutorial10_lighting::run; fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let title = env!("CARGO_PKG_NAME"); - let window = winit::window::WindowBuilder::new() - .with_title(title) - .build(&event_loop) - .unwrap(); - let mut state = pollster::block_on(State::new(&window)); - event_loop.run(move |event, _, control_flow| { - *control_flow = ControlFlow::Poll; - match event { - Event::MainEventsCleared => window.request_redraw(), - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - if !state.input(event) { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - state.resize(**new_inner_size); - } - _ => {} - } - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - state.update(); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file diff --git a/code/intermediate/tutorial11-normals/src/lib.rs b/code/intermediate/tutorial11-normals/src/lib.rs new file mode 100644 index 00000000..838299a4 --- /dev/null +++ b/code/intermediate/tutorial11-normals/src/lib.rs @@ -0,0 +1,793 @@ +use std::iter; + +use cgmath::prelude::*; +use wgpu::util::DeviceExt; +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::Window, +}; + +mod model; +mod texture; + +use model::{DrawLight, DrawModel, Vertex}; + +#[rustfmt::skip] +pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4 = cgmath::Matrix4::new( + 1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 0.5, 0.0, + 0.0, 0.0, 0.5, 1.0, +); + +const NUM_INSTANCES_PER_ROW: u32 = 10; + +struct Camera { + eye: cgmath::Point3, + target: cgmath::Point3, + up: cgmath::Vector3, + aspect: f32, + fovy: f32, + znear: f32, + zfar: f32, +} + +impl Camera { + fn build_view_projection_matrix(&self) -> cgmath::Matrix4 { + let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up); + let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar); + proj * view + } +} + +#[repr(C)] +#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct CameraUniform { + view_position: [f32; 4], + view_proj: [[f32; 4]; 4], +} + +impl CameraUniform { + fn new() -> Self { + Self { + view_position: [0.0; 4], + view_proj: cgmath::Matrix4::identity().into(), + } + } + + fn update_view_proj(&mut self, camera: &Camera) { + self.view_position = camera.eye.to_homogeneous().into(); + self.view_proj = (OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix()).into(); + } +} + +struct CameraController { + speed: f32, + is_up_pressed: bool, + is_down_pressed: bool, + is_forward_pressed: bool, + is_backward_pressed: bool, + is_left_pressed: bool, + is_right_pressed: bool, +} + +impl CameraController { + fn new(speed: f32) -> Self { + Self { + speed, + is_up_pressed: false, + is_down_pressed: false, + is_forward_pressed: false, + is_backward_pressed: false, + is_left_pressed: false, + is_right_pressed: false, + } + } + + fn process_events(&mut self, event: &WindowEvent) -> bool { + match event { + WindowEvent::KeyboardInput { + input: + KeyboardInput { + state, + virtual_keycode: Some(keycode), + .. + }, + .. + } => { + let is_pressed = *state == ElementState::Pressed; + match keycode { + VirtualKeyCode::Space => { + self.is_up_pressed = is_pressed; + true + } + VirtualKeyCode::LShift => { + self.is_down_pressed = is_pressed; + true + } + VirtualKeyCode::W | VirtualKeyCode::Up => { + self.is_forward_pressed = is_pressed; + true + } + VirtualKeyCode::A | VirtualKeyCode::Left => { + self.is_left_pressed = is_pressed; + true + } + VirtualKeyCode::S | VirtualKeyCode::Down => { + self.is_backward_pressed = is_pressed; + true + } + VirtualKeyCode::D | VirtualKeyCode::Right => { + self.is_right_pressed = is_pressed; + true + } + _ => false, + } + } + _ => false, + } + } + + fn update_camera(&self, camera: &mut Camera) { + let forward = camera.target - camera.eye; + let forward_norm = forward.normalize(); + let forward_mag = forward.magnitude(); + + // Prevents glitching when camera gets too close to the + // center of the scene. + if self.is_forward_pressed && forward_mag > self.speed { + camera.eye += forward_norm * self.speed; + } + if self.is_backward_pressed { + camera.eye -= forward_norm * self.speed; + } + + let right = forward_norm.cross(camera.up); + + // Redo radius calc in case the up/ down is pressed. + let forward = camera.target - camera.eye; + let forward_mag = forward.magnitude(); + + if self.is_right_pressed { + // Rescale the distance between the target and eye so + // that it doesn't change. The eye therefore still + // lies on the circle made by the target and eye. + camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag; + } + if self.is_left_pressed { + camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag; + } + } +} + +struct Instance { + position: cgmath::Vector3, + rotation: cgmath::Quaternion, +} + +impl Instance { + fn to_raw(&self) -> InstanceRaw { + InstanceRaw { + model: (cgmath::Matrix4::from_translation(self.position) + * cgmath::Matrix4::from(self.rotation)) + .into(), + normal: cgmath::Matrix3::from(self.rotation).into(), + } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +#[allow(dead_code)] +struct InstanceRaw { + model: [[f32; 4]; 4], + normal: [[f32; 3]; 3], +} + +impl model::Vertex for InstanceRaw { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + // We need to switch from using a step mode of Vertex to Instance + // This means that our shaders will only change to use the next + // instance when the shader starts processing a new instance + step_mode: wgpu::VertexStepMode::Instance, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll + // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later + shader_location: 5, + format: wgpu::VertexFormat::Float32x4, + }, + // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot + // for each vec4. We don't have to do this in code though. + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, + shader_location: 6, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, + shader_location: 7, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, + shader_location: 8, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress, + shader_location: 9, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress, + shader_location: 10, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress, + shader_location: 11, + format: wgpu::VertexFormat::Float32x3, + }, + ], + } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct LightUniform { + position: [f32; 3], + // Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here + _padding: u32, + color: [f32; 3], + _padding2: u32, +} + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + render_pipeline: wgpu::RenderPipeline, + obj_model: model::Model, + camera: Camera, + camera_controller: CameraController, + camera_uniform: CameraUniform, + camera_buffer: wgpu::Buffer, + camera_bind_group: wgpu::BindGroup, + instances: Vec, + #[allow(dead_code)] + instance_buffer: wgpu::Buffer, + depth_texture: texture::Texture, + size: winit::dpi::PhysicalSize, + light_uniform: LightUniform, + light_buffer: wgpu::Buffer, + light_bind_group: wgpu::BindGroup, + light_render_pipeline: wgpu::RenderPipeline, + #[allow(dead_code)] + debug_material: model::Material, +} + +fn create_render_pipeline( + device: &wgpu::Device, + layout: &wgpu::PipelineLayout, + color_format: wgpu::TextureFormat, + depth_format: Option, + vertex_layouts: &[wgpu::VertexBufferLayout], + shader: wgpu::ShaderModuleDescriptor, +) -> wgpu::RenderPipeline { + let shader = device.create_shader_module(&shader); + + device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Render Pipeline"), + layout: Some(layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: vertex_layouts, + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: color_format, + blend: Some(wgpu::BlendState { + alpha: wgpu::BlendComponent::REPLACE, + color: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: depth_format.map(|format| wgpu::DepthStencilState { + format, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::Less, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }), + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }) +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + None, // Trace path + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + + surface.configure(&device, &config); + + let texture_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + view_dimension: wgpu::TextureViewDimension::D2, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + // normal map + wgpu::BindGroupLayoutEntry { + binding: 2, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + view_dimension: wgpu::TextureViewDimension::D2, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 3, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("texture_bind_group_layout"), + }); + + let camera = Camera { + eye: (0.0, 5.0, -10.0).into(), + target: (0.0, 0.0, 0.0).into(), + up: cgmath::Vector3::unit_y(), + aspect: config.width as f32 / config.height as f32, + fovy: 45.0, + znear: 0.1, + zfar: 100.0, + }; + + let camera_controller = CameraController::new(0.2); + + let mut camera_uniform = CameraUniform::new(); + camera_uniform.update_view_proj(&camera); + + let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Camera Buffer"), + contents: bytemuck::cast_slice(&[camera_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + const SPACE_BETWEEN: f32 = 3.0; + let instances = (0..NUM_INSTANCES_PER_ROW) + .flat_map(|z| { + (0..NUM_INSTANCES_PER_ROW).map(move |x| { + let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); + let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); + + let position = cgmath::Vector3 { x, y: 0.0, z }; + + let rotation = if position.is_zero() { + cgmath::Quaternion::from_axis_angle( + cgmath::Vector3::unit_z(), + cgmath::Deg(0.0), + ) + } else { + cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) + }; + + Instance { position, rotation } + }) + }) + .collect::>(); + + let instance_data = instances.iter().map(Instance::to_raw).collect::>(); + let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Instance Buffer"), + contents: bytemuck::cast_slice(&instance_data), + usage: wgpu::BufferUsages::VERTEX, + }); + + let camera_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: Some("camera_bind_group_layout"), + }); + + let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &camera_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: camera_buffer.as_entire_binding(), + }], + label: Some("camera_bind_group"), + }); + + let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); + let obj_model = model::Model::load( + &device, + &queue, + &texture_bind_group_layout, + res_dir.join("cube.obj"), + ) + .unwrap(); + + let light_uniform = LightUniform { + position: [2.0, 2.0, 2.0], + _padding: 0, + color: [1.0, 1.0, 1.0], + _padding2: 0, + }; + + let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Light VB"), + contents: bytemuck::cast_slice(&[light_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + let light_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: None, + }); + + let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &light_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: light_buffer.as_entire_binding(), + }], + label: None, + }); + + let depth_texture = + texture::Texture::create_depth_texture(&device, &config, "depth_texture"); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[ + &texture_bind_group_layout, + &camera_bind_group_layout, + &light_bind_group_layout, + ], + push_constant_ranges: &[], + }); + + let render_pipeline = { + let shader = wgpu::ShaderModuleDescriptor { + label: Some("Normal Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }; + create_render_pipeline( + &device, + &render_pipeline_layout, + config.format, + Some(texture::Texture::DEPTH_FORMAT), + &[model::ModelVertex::desc(), InstanceRaw::desc()], + shader, + ) + }; + + let light_render_pipeline = { + let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Light Pipeline Layout"), + bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout], + push_constant_ranges: &[], + }); + let shader = wgpu::ShaderModuleDescriptor { + label: Some("Light Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()), + }; + create_render_pipeline( + &device, + &layout, + config.format, + Some(texture::Texture::DEPTH_FORMAT), + &[model::ModelVertex::desc()], + shader, + ) + }; + + let debug_material = { + let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png"); + let normal_bytes = include_bytes!("../res/cobble-normal.png"); + + let diffuse_texture = texture::Texture::from_bytes( + &device, + &queue, + diffuse_bytes, + "res/alt-diffuse.png", + false, + ) + .unwrap(); + let normal_texture = texture::Texture::from_bytes( + &device, + &queue, + normal_bytes, + "res/alt-normal.png", + true, + ) + .unwrap(); + + model::Material::new( + &device, + "alt-material", + diffuse_texture, + normal_texture, + &texture_bind_group_layout, + ) + }; + + Self { + surface, + device, + queue, + config, + render_pipeline, + obj_model, + camera, + camera_controller, + camera_buffer, + camera_bind_group, + camera_uniform, + instances, + instance_buffer, + depth_texture, + size, + light_uniform, + light_buffer, + light_bind_group, + light_render_pipeline, + #[allow(dead_code)] + debug_material, + } + } + + fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.camera.aspect = self.config.width as f32 / self.config.height as f32; + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + self.depth_texture = + texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); + } + } + + fn input(&mut self, event: &WindowEvent) -> bool { + self.camera_controller.process_events(event) + } + + fn update(&mut self) { + self.camera_controller.update_camera(&mut self.camera); + self.camera_uniform.update_view_proj(&self.camera); + self.queue.write_buffer( + &self.camera_buffer, + 0, + bytemuck::cast_slice(&[self.camera_uniform]), + ); + + // Update the light + let old_position: cgmath::Vector3<_> = self.light_uniform.position.into(); + self.light_uniform.position = + (cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) + * old_position) + .into(); + self.queue.write_buffer( + &self.light_buffer, + 0, + bytemuck::cast_slice(&[self.light_uniform]), + ); + } + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { + view: &self.depth_texture.view, + depth_ops: Some(wgpu::Operations { + load: wgpu::LoadOp::Clear(1.0), + store: true, + }), + stencil_ops: None, + }), + }); + + render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); + render_pass.set_pipeline(&self.light_render_pipeline); + render_pass.draw_light_model( + &self.obj_model, + &self.camera_bind_group, + &self.light_bind_group, + ); + + render_pass.set_pipeline(&self.render_pipeline); + render_pass.draw_model_instanced( + &self.obj_model, + 0..self.instances.len() as u32, + &self.camera_bind_group, + &self.light_bind_group, + ); + } + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let title = env!("CARGO_PKG_NAME"); + let window = winit::window::WindowBuilder::new() + .with_title(title) + .build(&event_loop) + .unwrap(); + let mut state = pollster::block_on(State::new(&window)); + event_loop.run(move |event, _, control_flow| { + *control_flow = ControlFlow::Poll; + match event { + Event::MainEventsCleared => window.request_redraw(), + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + if !state.input(event) { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + state.resize(**new_inner_size); + } + _ => {} + } + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + state.update(); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + _ => {} + } + }); +} diff --git a/code/intermediate/tutorial11-normals/src/main.rs b/code/intermediate/tutorial11-normals/src/main.rs index 792113e8..5ec07624 100644 --- a/code/intermediate/tutorial11-normals/src/main.rs +++ b/code/intermediate/tutorial11-normals/src/main.rs @@ -1,781 +1,5 @@ -use std::iter; - -use cgmath::prelude::*; -use wgpu::util::DeviceExt; -use winit::{ - event::*, - event_loop::{ControlFlow, EventLoop}, - window::Window, -}; - -mod model; -mod texture; - -use model::{DrawLight, DrawModel, Vertex}; - -#[rustfmt::skip] -pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4 = cgmath::Matrix4::new( - 1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 0.5, 0.0, - 0.0, 0.0, 0.5, 1.0, -); - -const NUM_INSTANCES_PER_ROW: u32 = 10; - -struct Camera { - eye: cgmath::Point3, - target: cgmath::Point3, - up: cgmath::Vector3, - aspect: f32, - fovy: f32, - znear: f32, - zfar: f32, -} - -impl Camera { - fn build_view_projection_matrix(&self) -> cgmath::Matrix4 { - let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up); - let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar); - proj * view - } -} - -#[repr(C)] -#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -struct CameraUniform { - view_position: [f32; 4], - view_proj: [[f32; 4]; 4], -} - -impl CameraUniform { - fn new() -> Self { - Self { - view_position: [0.0; 4], - view_proj: cgmath::Matrix4::identity().into(), - } - } - - fn update_view_proj(&mut self, camera: &Camera) { - self.view_position = camera.eye.to_homogeneous().into(); - self.view_proj = (OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix()).into(); - } -} - -struct CameraController { - speed: f32, - is_forward_pressed: bool, - is_backward_pressed: bool, - is_left_pressed: bool, - is_right_pressed: bool, -} - -impl CameraController { - fn new(speed: f32) -> Self { - Self { - speed, - is_forward_pressed: false, - is_backward_pressed: false, - is_left_pressed: false, - is_right_pressed: false, - } - } - - fn process_events(&mut self, event: &WindowEvent) -> bool { - match event { - WindowEvent::KeyboardInput { - input: - KeyboardInput { - state, - virtual_keycode: Some(keycode), - .. - }, - .. - } => { - let is_pressed = *state == ElementState::Pressed; - match keycode { - VirtualKeyCode::W | VirtualKeyCode::Up => { - self.is_forward_pressed = is_pressed; - true - } - VirtualKeyCode::A | VirtualKeyCode::Left => { - self.is_left_pressed = is_pressed; - true - } - VirtualKeyCode::S | VirtualKeyCode::Down => { - self.is_backward_pressed = is_pressed; - true - } - VirtualKeyCode::D | VirtualKeyCode::Right => { - self.is_right_pressed = is_pressed; - true - } - _ => false, - } - } - _ => false, - } - } - - fn update_camera(&self, camera: &mut Camera) { - let forward = camera.target - camera.eye; - let forward_norm = forward.normalize(); - let forward_mag = forward.magnitude(); - - // Prevents glitching when camera gets too close to the - // center of the scene. - if self.is_forward_pressed && forward_mag > self.speed { - camera.eye += forward_norm * self.speed; - } - if self.is_backward_pressed { - camera.eye -= forward_norm * self.speed; - } - - let right = forward_norm.cross(camera.up); - - // Redo radius calc in case the up/ down is pressed. - let forward = camera.target - camera.eye; - let forward_mag = forward.magnitude(); - - if self.is_right_pressed { - // Rescale the distance between the target and eye so - // that it doesn't change. The eye therefore still - // lies on the circle made by the target and eye. - camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag; - } - if self.is_left_pressed { - camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag; - } - } -} - -struct Instance { - position: cgmath::Vector3, - rotation: cgmath::Quaternion, -} - -impl Instance { - fn to_raw(&self) -> InstanceRaw { - InstanceRaw { - model: (cgmath::Matrix4::from_translation(self.position) - * cgmath::Matrix4::from(self.rotation)) - .into(), - normal: cgmath::Matrix3::from(self.rotation).into(), - } - } -} - -#[repr(C)] -#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -#[allow(dead_code)] -struct InstanceRaw { - model: [[f32; 4]; 4], - normal: [[f32; 3]; 3], -} - -impl model::Vertex for InstanceRaw { - fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { - use std::mem; - wgpu::VertexBufferLayout { - array_stride: mem::size_of::() as wgpu::BufferAddress, - // We need to switch from using a step mode of Vertex to Instance - // This means that our shaders will only change to use the next - // instance when the shader starts processing a new instance - step_mode: wgpu::VertexStepMode::Instance, - attributes: &[ - wgpu::VertexAttribute { - offset: 0, - // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll - // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later - shader_location: 5, - format: wgpu::VertexFormat::Float32x4, - }, - // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot - // for each vec4. We don't have to do this in code though. - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, - shader_location: 6, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, - shader_location: 7, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, - shader_location: 8, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress, - shader_location: 9, - format: wgpu::VertexFormat::Float32x3, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress, - shader_location: 10, - format: wgpu::VertexFormat::Float32x3, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress, - shader_location: 11, - format: wgpu::VertexFormat::Float32x3, - }, - ], - } - } -} - -#[repr(C)] -#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -struct LightUniform { - position: [f32; 3], - // Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here - _padding: u32, - color: [f32; 3], - _padding2: u32, -} - -struct State { - surface: wgpu::Surface, - device: wgpu::Device, - queue: wgpu::Queue, - config: wgpu::SurfaceConfiguration, - render_pipeline: wgpu::RenderPipeline, - obj_model: model::Model, - camera: Camera, - camera_controller: CameraController, - camera_uniform: CameraUniform, - camera_buffer: wgpu::Buffer, - camera_bind_group: wgpu::BindGroup, - instances: Vec, - #[allow(dead_code)] - instance_buffer: wgpu::Buffer, - depth_texture: texture::Texture, - size: winit::dpi::PhysicalSize, - light_uniform: LightUniform, - light_buffer: wgpu::Buffer, - light_bind_group: wgpu::BindGroup, - light_render_pipeline: wgpu::RenderPipeline, - #[allow(dead_code)] - debug_material: model::Material, -} - -fn create_render_pipeline( - device: &wgpu::Device, - layout: &wgpu::PipelineLayout, - color_format: wgpu::TextureFormat, - depth_format: Option, - vertex_layouts: &[wgpu::VertexBufferLayout], - shader: wgpu::ShaderModuleDescriptor, -) -> wgpu::RenderPipeline { - let shader = device.create_shader_module(&shader); - - device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some("Render Pipeline"), - layout: Some(layout), - vertex: wgpu::VertexState { - module: &shader, - entry_point: "vs_main", - buffers: vertex_layouts, - }, - fragment: Some(wgpu::FragmentState { - module: &shader, - entry_point: "fs_main", - targets: &[wgpu::ColorTargetState { - format: color_format, - blend: Some(wgpu::BlendState { - alpha: wgpu::BlendComponent::REPLACE, - color: wgpu::BlendComponent::REPLACE, - }), - write_mask: wgpu::ColorWrites::ALL, - }], - }), - primitive: wgpu::PrimitiveState { - topology: wgpu::PrimitiveTopology::TriangleList, - strip_index_format: None, - front_face: wgpu::FrontFace::Ccw, - cull_mode: Some(wgpu::Face::Back), - // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE - polygon_mode: wgpu::PolygonMode::Fill, - // Requires Features::DEPTH_CLIP_CONTROL - unclipped_depth: false, - // Requires Features::CONSERVATIVE_RASTERIZATION - conservative: false, - }, - depth_stencil: depth_format.map(|format| wgpu::DepthStencilState { - format, - depth_write_enabled: true, - depth_compare: wgpu::CompareFunction::Less, - stencil: wgpu::StencilState::default(), - bias: wgpu::DepthBiasState::default(), - }), - multisample: wgpu::MultisampleState { - count: 1, - mask: !0, - alpha_to_coverage_enabled: false, - }, - // If the pipeline will be used with a multiview render pass, this - // indicates how many array layers the attachments will have. - multiview: None, - }) -} - -impl State { - async fn new(window: &Window) -> Self { - let size = window.inner_size(); - - // The instance is a handle to our GPU - // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU - let instance = wgpu::Instance::new(wgpu::Backends::all()); - let surface = unsafe { instance.create_surface(window) }; - let adapter = instance - .request_adapter(&wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::default(), - compatible_surface: Some(&surface), - force_fallback_adapter: false, - }) - .await - .unwrap(); - let (device, queue) = adapter - .request_device( - &wgpu::DeviceDescriptor { - label: None, - features: wgpu::Features::empty(), - limits: wgpu::Limits::default(), - }, - None, // Trace path - ) - .await - .unwrap(); - - let config = wgpu::SurfaceConfiguration { - usage: wgpu::TextureUsages::RENDER_ATTACHMENT, - format: surface.get_preferred_format(&adapter).unwrap(), - width: size.width, - height: size.height, - present_mode: wgpu::PresentMode::Fifo, - }; - - surface.configure(&device, &config); - - let texture_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[ - wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Texture { - multisampled: false, - sample_type: wgpu::TextureSampleType::Float { filterable: true }, - view_dimension: wgpu::TextureViewDimension::D2, - }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 1, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), - count: None, - }, - // normal map - wgpu::BindGroupLayoutEntry { - binding: 2, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Texture { - multisampled: false, - sample_type: wgpu::TextureSampleType::Float { filterable: true }, - view_dimension: wgpu::TextureViewDimension::D2, - }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 3, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), - count: None, - }, - ], - label: Some("texture_bind_group_layout"), - }); - - let camera = Camera { - eye: (0.0, 5.0, -10.0).into(), - target: (0.0, 0.0, 0.0).into(), - up: cgmath::Vector3::unit_y(), - aspect: config.width as f32 / config.height as f32, - fovy: 45.0, - znear: 0.1, - zfar: 100.0, - }; - - let camera_controller = CameraController::new(0.2); - - let mut camera_uniform = CameraUniform::new(); - camera_uniform.update_view_proj(&camera); - - let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Camera Buffer"), - contents: bytemuck::cast_slice(&[camera_uniform]), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - - const SPACE_BETWEEN: f32 = 3.0; - let instances = (0..NUM_INSTANCES_PER_ROW) - .flat_map(|z| { - (0..NUM_INSTANCES_PER_ROW).map(move |x| { - let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); - let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); - - let position = cgmath::Vector3 { x, y: 0.0, z }; - - let rotation = if position.is_zero() { - cgmath::Quaternion::from_axis_angle( - cgmath::Vector3::unit_z(), - cgmath::Deg(0.0), - ) - } else { - cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) - }; - - Instance { position, rotation } - }) - }) - .collect::>(); - - let instance_data = instances.iter().map(Instance::to_raw).collect::>(); - let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Instance Buffer"), - contents: bytemuck::cast_slice(&instance_data), - usage: wgpu::BufferUsages::VERTEX, - }); - - let camera_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }], - label: Some("camera_bind_group_layout"), - }); - - let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &camera_bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: camera_buffer.as_entire_binding(), - }], - label: Some("camera_bind_group"), - }); - - let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); - let obj_model = model::Model::load( - &device, - &queue, - &texture_bind_group_layout, - res_dir.join("cube.obj"), - ) - .unwrap(); - - let light_uniform = LightUniform { - position: [2.0, 2.0, 2.0], - _padding: 0, - color: [1.0, 1.0, 1.0], - _padding2: 0, - }; - - let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Light VB"), - contents: bytemuck::cast_slice(&[light_uniform]), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - - let light_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }], - label: None, - }); - - let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &light_bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: light_buffer.as_entire_binding(), - }], - label: None, - }); - - let depth_texture = - texture::Texture::create_depth_texture(&device, &config, "depth_texture"); - - let render_pipeline_layout = - device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: Some("Render Pipeline Layout"), - bind_group_layouts: &[ - &texture_bind_group_layout, - &camera_bind_group_layout, - &light_bind_group_layout, - ], - push_constant_ranges: &[], - }); - - let render_pipeline = { - let shader = wgpu::ShaderModuleDescriptor { - label: Some("Normal Shader"), - source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), - }; - create_render_pipeline( - &device, - &render_pipeline_layout, - config.format, - Some(texture::Texture::DEPTH_FORMAT), - &[model::ModelVertex::desc(), InstanceRaw::desc()], - shader, - ) - }; - - let light_render_pipeline = { - let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: Some("Light Pipeline Layout"), - bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout], - push_constant_ranges: &[], - }); - let shader = wgpu::ShaderModuleDescriptor { - label: Some("Light Shader"), - source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()), - }; - create_render_pipeline( - &device, - &layout, - config.format, - Some(texture::Texture::DEPTH_FORMAT), - &[model::ModelVertex::desc()], - shader, - ) - }; - - let debug_material = { - let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png"); - let normal_bytes = include_bytes!("../res/cobble-normal.png"); - - let diffuse_texture = texture::Texture::from_bytes( - &device, - &queue, - diffuse_bytes, - "res/alt-diffuse.png", - false, - ) - .unwrap(); - let normal_texture = texture::Texture::from_bytes( - &device, - &queue, - normal_bytes, - "res/alt-normal.png", - true, - ) - .unwrap(); - - model::Material::new( - &device, - "alt-material", - diffuse_texture, - normal_texture, - &texture_bind_group_layout, - ) - }; - - Self { - surface, - device, - queue, - config, - render_pipeline, - obj_model, - camera, - camera_controller, - camera_buffer, - camera_bind_group, - camera_uniform, - instances, - instance_buffer, - depth_texture, - size, - light_uniform, - light_buffer, - light_bind_group, - light_render_pipeline, - #[allow(dead_code)] - debug_material, - } - } - - fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { - if new_size.width > 0 && new_size.height > 0 { - self.camera.aspect = self.config.width as f32 / self.config.height as f32; - self.size = new_size; - self.config.width = new_size.width; - self.config.height = new_size.height; - self.surface.configure(&self.device, &self.config); - self.depth_texture = - texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); - } - } - - fn input(&mut self, event: &WindowEvent) -> bool { - self.camera_controller.process_events(event) - } - - fn update(&mut self) { - self.camera_controller.update_camera(&mut self.camera); - self.camera_uniform.update_view_proj(&self.camera); - self.queue.write_buffer( - &self.camera_buffer, - 0, - bytemuck::cast_slice(&[self.camera_uniform]), - ); - - // Update the light - let old_position: cgmath::Vector3<_> = self.light_uniform.position.into(); - self.light_uniform.position = - (cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) - * old_position) - .into(); - self.queue.write_buffer( - &self.light_buffer, - 0, - bytemuck::cast_slice(&[self.light_uniform]), - ); - } - - fn render(&mut self) -> Result<(), wgpu::SurfaceError> { - let output = self.surface.get_current_texture()?; - let view = output - .texture - .create_view(&wgpu::TextureViewDescriptor::default()); - - let mut encoder = self - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor { - label: Some("Render Encoder"), - }); - - { - let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("Render Pass"), - color_attachments: &[wgpu::RenderPassColorAttachment { - view: &view, - resolve_target: None, - ops: wgpu::Operations { - load: wgpu::LoadOp::Clear(wgpu::Color { - r: 0.1, - g: 0.2, - b: 0.3, - a: 1.0, - }), - store: true, - }, - }], - depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { - view: &self.depth_texture.view, - depth_ops: Some(wgpu::Operations { - load: wgpu::LoadOp::Clear(1.0), - store: true, - }), - stencil_ops: None, - }), - }); - - render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); - render_pass.set_pipeline(&self.light_render_pipeline); - render_pass.draw_light_model( - &self.obj_model, - &self.camera_bind_group, - &self.light_bind_group, - ); - - render_pass.set_pipeline(&self.render_pipeline); - render_pass.draw_model_instanced( - &self.obj_model, - 0..self.instances.len() as u32, - &self.camera_bind_group, - &self.light_bind_group, - ); - } - self.queue.submit(iter::once(encoder.finish())); - output.present(); - - Ok(()) - } -} +use tutorial11_normals::run; fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let title = env!("CARGO_PKG_NAME"); - let window = winit::window::WindowBuilder::new() - .with_title(title) - .build(&event_loop) - .unwrap(); - let mut state = pollster::block_on(State::new(&window)); - event_loop.run(move |event, _, control_flow| { - *control_flow = ControlFlow::Poll; - match event { - Event::MainEventsCleared => window.request_redraw(), - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - if !state.input(event) { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - state.resize(**new_inner_size); - } - _ => {} - } - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - state.update(); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file diff --git a/code/intermediate/tutorial12-camera/src/lib.rs b/code/intermediate/tutorial12-camera/src/lib.rs new file mode 100644 index 00000000..723dfd40 --- /dev/null +++ b/code/intermediate/tutorial12-camera/src/lib.rs @@ -0,0 +1,710 @@ +use std::iter; + +use cgmath::prelude::*; +use wgpu::util::DeviceExt; +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::Window, +}; + +mod camera; +mod model; +mod texture; // NEW! + +use model::{DrawLight, DrawModel, Vertex}; + +const NUM_INSTANCES_PER_ROW: u32 = 10; + +#[repr(C)] +#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct CameraUniform { + view_position: [f32; 4], + view_proj: [[f32; 4]; 4], +} + +impl CameraUniform { + fn new() -> Self { + Self { + view_position: [0.0; 4], + view_proj: cgmath::Matrix4::identity().into(), + } + } + + // UPDATED! + fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) { + self.view_position = camera.position.to_homogeneous().into(); + self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into() + } +} + +struct Instance { + position: cgmath::Vector3, + rotation: cgmath::Quaternion, +} + +impl Instance { + fn to_raw(&self) -> InstanceRaw { + InstanceRaw { + model: (cgmath::Matrix4::from_translation(self.position) + * cgmath::Matrix4::from(self.rotation)) + .into(), + normal: cgmath::Matrix3::from(self.rotation).into(), + } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +#[allow(dead_code)] +struct InstanceRaw { + model: [[f32; 4]; 4], + normal: [[f32; 3]; 3], +} + +impl model::Vertex for InstanceRaw { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + // We need to switch from using a step mode of Vertex to Instance + // This means that our shaders will only change to use the next + // instance when the shader starts processing a new instance + step_mode: wgpu::VertexStepMode::Instance, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll + // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later + shader_location: 5, + format: wgpu::VertexFormat::Float32x4, + }, + // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot + // for each vec4. We don't have to do this in code though. + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, + shader_location: 6, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, + shader_location: 7, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, + shader_location: 8, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress, + shader_location: 9, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress, + shader_location: 10, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress, + shader_location: 11, + format: wgpu::VertexFormat::Float32x3, + }, + ], + } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct LightUniform { + position: [f32; 3], + // Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here + _padding: u32, + color: [f32; 3], + _padding2: u32, +} + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + render_pipeline: wgpu::RenderPipeline, + obj_model: model::Model, + camera: camera::Camera, // UPDATED! + projection: camera::Projection, // NEW! + camera_controller: camera::CameraController, // UPDATED! + camera_uniform: CameraUniform, + camera_buffer: wgpu::Buffer, + camera_bind_group: wgpu::BindGroup, + instances: Vec, + #[allow(dead_code)] + instance_buffer: wgpu::Buffer, + depth_texture: texture::Texture, + size: winit::dpi::PhysicalSize, + light_uniform: LightUniform, + light_buffer: wgpu::Buffer, + light_bind_group: wgpu::BindGroup, + light_render_pipeline: wgpu::RenderPipeline, + #[allow(dead_code)] + debug_material: model::Material, + // NEW! + mouse_pressed: bool, +} + +fn create_render_pipeline( + device: &wgpu::Device, + layout: &wgpu::PipelineLayout, + color_format: wgpu::TextureFormat, + depth_format: Option, + vertex_layouts: &[wgpu::VertexBufferLayout], + shader: wgpu::ShaderModuleDescriptor, +) -> wgpu::RenderPipeline { + let shader = device.create_shader_module(&shader); + + device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some(&format!("{:?}", shader)), + layout: Some(layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: vertex_layouts, + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: color_format, + blend: Some(wgpu::BlendState { + alpha: wgpu::BlendComponent::REPLACE, + color: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: depth_format.map(|format| wgpu::DepthStencilState { + format, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::Less, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }), + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }) +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + None, // Trace path + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + + surface.configure(&device, &config); + + let texture_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + view_dimension: wgpu::TextureViewDimension::D2, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + // normal map + wgpu::BindGroupLayoutEntry { + binding: 2, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + view_dimension: wgpu::TextureViewDimension::D2, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 3, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("texture_bind_group_layout"), + }); + + // UPDATED! + let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0)); + let projection = + camera::Projection::new(config.width, config.height, cgmath::Deg(45.0), 0.1, 100.0); + let camera_controller = camera::CameraController::new(4.0, 0.4); + + let mut camera_uniform = CameraUniform::new(); + camera_uniform.update_view_proj(&camera, &projection); + + let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Camera Buffer"), + contents: bytemuck::cast_slice(&[camera_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + const SPACE_BETWEEN: f32 = 3.0; + let instances = (0..NUM_INSTANCES_PER_ROW) + .flat_map(|z| { + (0..NUM_INSTANCES_PER_ROW).map(move |x| { + let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); + let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); + + let position = cgmath::Vector3 { x, y: 0.0, z }; + + let rotation = if position.is_zero() { + cgmath::Quaternion::from_axis_angle( + cgmath::Vector3::unit_z(), + cgmath::Deg(0.0), + ) + } else { + cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) + }; + + Instance { position, rotation } + }) + }) + .collect::>(); + + let instance_data = instances.iter().map(Instance::to_raw).collect::>(); + let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Instance Buffer"), + contents: bytemuck::cast_slice(&instance_data), + usage: wgpu::BufferUsages::VERTEX, + }); + + let camera_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: Some("camera_bind_group_layout"), + }); + + let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &camera_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: camera_buffer.as_entire_binding(), + }], + label: Some("camera_bind_group"), + }); + + let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); + let now = std::time::Instant::now(); + let obj_model = model::Model::load( + &device, + &queue, + &texture_bind_group_layout, + res_dir.join("cube.obj"), + ) + .unwrap(); + println!("Elapsed (Original): {:?}", std::time::Instant::now() - now); + + let light_uniform = LightUniform { + position: [2.0, 2.0, 2.0], + _padding: 0, + color: [1.0, 1.0, 1.0], + _padding2: 0, + }; + + let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Light VB"), + contents: bytemuck::cast_slice(&[light_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + let light_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: None, + }); + + let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &light_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: light_buffer.as_entire_binding(), + }], + label: None, + }); + + let depth_texture = + texture::Texture::create_depth_texture(&device, &config, "depth_texture"); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[ + &texture_bind_group_layout, + &camera_bind_group_layout, + &light_bind_group_layout, + ], + push_constant_ranges: &[], + }); + + let render_pipeline = { + let shader = wgpu::ShaderModuleDescriptor { + label: Some("Normal Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }; + create_render_pipeline( + &device, + &render_pipeline_layout, + config.format, + Some(texture::Texture::DEPTH_FORMAT), + &[model::ModelVertex::desc(), InstanceRaw::desc()], + shader, + ) + }; + + let light_render_pipeline = { + let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Light Pipeline Layout"), + bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout], + push_constant_ranges: &[], + }); + let shader = wgpu::ShaderModuleDescriptor { + label: Some("Light Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()), + }; + create_render_pipeline( + &device, + &layout, + config.format, + Some(texture::Texture::DEPTH_FORMAT), + &[model::ModelVertex::desc()], + shader, + ) + }; + + let debug_material = { + let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png"); + let normal_bytes = include_bytes!("../res/cobble-normal.png"); + + let diffuse_texture = texture::Texture::from_bytes( + &device, + &queue, + diffuse_bytes, + "res/alt-diffuse.png", + false, + ) + .unwrap(); + let normal_texture = texture::Texture::from_bytes( + &device, + &queue, + normal_bytes, + "res/alt-normal.png", + true, + ) + .unwrap(); + + model::Material::new( + &device, + "alt-material", + diffuse_texture, + normal_texture, + &texture_bind_group_layout, + ) + }; + + Self { + surface, + device, + queue, + config, + render_pipeline, + obj_model, + camera, + projection, + camera_controller, + camera_buffer, + camera_bind_group, + camera_uniform, + instances, + instance_buffer, + depth_texture, + size, + light_uniform, + light_buffer, + light_bind_group, + light_render_pipeline, + #[allow(dead_code)] + debug_material, + // NEW! + mouse_pressed: false, + } + } + + fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + // UPDATED! + if new_size.width > 0 && new_size.height > 0 { + self.projection.resize(new_size.width, new_size.height); + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + self.depth_texture = + texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); + } + } + + // UPDATED! + fn input(&mut self, event: &DeviceEvent) -> bool { + match event { + DeviceEvent::Key(KeyboardInput { + virtual_keycode: Some(key), + state, + .. + }) => self.camera_controller.process_keyboard(*key, *state), + DeviceEvent::MouseWheel { delta, .. } => { + self.camera_controller.process_scroll(delta); + true + } + DeviceEvent::Button { + button: 1, // Left Mouse Button + state, + } => { + self.mouse_pressed = *state == ElementState::Pressed; + true + } + DeviceEvent::MouseMotion { delta } => { + if self.mouse_pressed { + self.camera_controller.process_mouse(delta.0, delta.1); + } + true + } + _ => false, + } + } + + fn update(&mut self, dt: std::time::Duration) { + // UPDATED! + self.camera_controller.update_camera(&mut self.camera, dt); + self.camera_uniform + .update_view_proj(&self.camera, &self.projection); + self.queue.write_buffer( + &self.camera_buffer, + 0, + bytemuck::cast_slice(&[self.camera_uniform]), + ); + + // Update the light + let old_position: cgmath::Vector3<_> = self.light_uniform.position.into(); + self.light_uniform.position = + (cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) + * old_position) + .into(); + self.queue.write_buffer( + &self.light_buffer, + 0, + bytemuck::cast_slice(&[self.light_uniform]), + ); + } + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { + view: &self.depth_texture.view, + depth_ops: Some(wgpu::Operations { + load: wgpu::LoadOp::Clear(1.0), + store: true, + }), + stencil_ops: None, + }), + }); + + render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); + render_pass.set_pipeline(&self.light_render_pipeline); + render_pass.draw_light_model( + &self.obj_model, + &self.camera_bind_group, + &self.light_bind_group, + ); + + render_pass.set_pipeline(&self.render_pipeline); + render_pass.draw_model_instanced( + &self.obj_model, + 0..self.instances.len() as u32, + &self.camera_bind_group, + &self.light_bind_group, + ); + } + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let title = env!("CARGO_PKG_NAME"); + let window = winit::window::WindowBuilder::new() + .with_title(title) + .build(&event_loop) + .unwrap(); + let mut state = pollster::block_on(State::new(&window)); // NEW! + let mut last_render_time = std::time::Instant::now(); + event_loop.run(move |event, _, control_flow| { + *control_flow = ControlFlow::Poll; + match event { + Event::MainEventsCleared => window.request_redraw(), + Event::DeviceEvent { + ref event, + .. // We're not using device_id currently + } => { + state.input(event); + } + // UPDATED! + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + state.resize(**new_inner_size); + } + _ => {} + } + } + // UPDATED! + Event::RedrawRequested(window_id) if window_id == window.id() => { + let now = std::time::Instant::now(); + let dt = now - last_render_time; + last_render_time = now; + state.update(dt); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + _ => {} + } + }); +} diff --git a/code/intermediate/tutorial12-camera/src/main.rs b/code/intermediate/tutorial12-camera/src/main.rs index 951906df..544442fd 100644 --- a/code/intermediate/tutorial12-camera/src/main.rs +++ b/code/intermediate/tutorial12-camera/src/main.rs @@ -1,710 +1,5 @@ -use std::iter; - -use cgmath::prelude::*; -use wgpu::util::DeviceExt; -use winit::{ - event::*, - event_loop::{ControlFlow, EventLoop}, - window::Window, -}; - -mod camera; -mod model; -mod texture; // NEW! - -use model::{DrawLight, DrawModel, Vertex}; - -const NUM_INSTANCES_PER_ROW: u32 = 10; - -#[repr(C)] -#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -struct CameraUniform { - view_position: [f32; 4], - view_proj: [[f32; 4]; 4], -} - -impl CameraUniform { - fn new() -> Self { - Self { - view_position: [0.0; 4], - view_proj: cgmath::Matrix4::identity().into(), - } - } - - // UPDATED! - fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) { - self.view_position = camera.position.to_homogeneous().into(); - self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into() - } -} - -struct Instance { - position: cgmath::Vector3, - rotation: cgmath::Quaternion, -} - -impl Instance { - fn to_raw(&self) -> InstanceRaw { - InstanceRaw { - model: (cgmath::Matrix4::from_translation(self.position) - * cgmath::Matrix4::from(self.rotation)) - .into(), - normal: cgmath::Matrix3::from(self.rotation).into(), - } - } -} - -#[repr(C)] -#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -#[allow(dead_code)] -struct InstanceRaw { - model: [[f32; 4]; 4], - normal: [[f32; 3]; 3], -} - -impl model::Vertex for InstanceRaw { - fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { - use std::mem; - wgpu::VertexBufferLayout { - array_stride: mem::size_of::() as wgpu::BufferAddress, - // We need to switch from using a step mode of Vertex to Instance - // This means that our shaders will only change to use the next - // instance when the shader starts processing a new instance - step_mode: wgpu::VertexStepMode::Instance, - attributes: &[ - wgpu::VertexAttribute { - offset: 0, - // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll - // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later - shader_location: 5, - format: wgpu::VertexFormat::Float32x4, - }, - // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot - // for each vec4. We don't have to do this in code though. - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, - shader_location: 6, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, - shader_location: 7, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, - shader_location: 8, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress, - shader_location: 9, - format: wgpu::VertexFormat::Float32x3, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress, - shader_location: 10, - format: wgpu::VertexFormat::Float32x3, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress, - shader_location: 11, - format: wgpu::VertexFormat::Float32x3, - }, - ], - } - } -} - -#[repr(C)] -#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -struct LightUniform { - position: [f32; 3], - // Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here - _padding: u32, - color: [f32; 3], - _padding2: u32, -} - -struct State { - surface: wgpu::Surface, - device: wgpu::Device, - queue: wgpu::Queue, - config: wgpu::SurfaceConfiguration, - render_pipeline: wgpu::RenderPipeline, - obj_model: model::Model, - camera: camera::Camera, // UPDATED! - projection: camera::Projection, // NEW! - camera_controller: camera::CameraController, // UPDATED! - camera_uniform: CameraUniform, - camera_buffer: wgpu::Buffer, - camera_bind_group: wgpu::BindGroup, - instances: Vec, - #[allow(dead_code)] - instance_buffer: wgpu::Buffer, - depth_texture: texture::Texture, - size: winit::dpi::PhysicalSize, - light_uniform: LightUniform, - light_buffer: wgpu::Buffer, - light_bind_group: wgpu::BindGroup, - light_render_pipeline: wgpu::RenderPipeline, - #[allow(dead_code)] - debug_material: model::Material, - // NEW! - mouse_pressed: bool, -} - -fn create_render_pipeline( - device: &wgpu::Device, - layout: &wgpu::PipelineLayout, - color_format: wgpu::TextureFormat, - depth_format: Option, - vertex_layouts: &[wgpu::VertexBufferLayout], - shader: wgpu::ShaderModuleDescriptor, -) -> wgpu::RenderPipeline { - let shader = device.create_shader_module(&shader); - - device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some(&format!("{:?}", shader)), - layout: Some(layout), - vertex: wgpu::VertexState { - module: &shader, - entry_point: "vs_main", - buffers: vertex_layouts, - }, - fragment: Some(wgpu::FragmentState { - module: &shader, - entry_point: "fs_main", - targets: &[wgpu::ColorTargetState { - format: color_format, - blend: Some(wgpu::BlendState { - alpha: wgpu::BlendComponent::REPLACE, - color: wgpu::BlendComponent::REPLACE, - }), - write_mask: wgpu::ColorWrites::ALL, - }], - }), - primitive: wgpu::PrimitiveState { - topology: wgpu::PrimitiveTopology::TriangleList, - strip_index_format: None, - front_face: wgpu::FrontFace::Ccw, - cull_mode: Some(wgpu::Face::Back), - // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE - polygon_mode: wgpu::PolygonMode::Fill, - // Requires Features::DEPTH_CLIP_CONTROL - unclipped_depth: false, - // Requires Features::CONSERVATIVE_RASTERIZATION - conservative: false, - }, - depth_stencil: depth_format.map(|format| wgpu::DepthStencilState { - format, - depth_write_enabled: true, - depth_compare: wgpu::CompareFunction::Less, - stencil: wgpu::StencilState::default(), - bias: wgpu::DepthBiasState::default(), - }), - multisample: wgpu::MultisampleState { - count: 1, - mask: !0, - alpha_to_coverage_enabled: false, - }, - // If the pipeline will be used with a multiview render pass, this - // indicates how many array layers the attachments will have. - multiview: None, - }) -} - -impl State { - async fn new(window: &Window) -> Self { - let size = window.inner_size(); - - // The instance is a handle to our GPU - // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU - let instance = wgpu::Instance::new(wgpu::Backends::all()); - let surface = unsafe { instance.create_surface(window) }; - let adapter = instance - .request_adapter(&wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::default(), - compatible_surface: Some(&surface), - force_fallback_adapter: false, - }) - .await - .unwrap(); - let (device, queue) = adapter - .request_device( - &wgpu::DeviceDescriptor { - label: None, - features: wgpu::Features::empty(), - limits: wgpu::Limits::default(), - }, - None, // Trace path - ) - .await - .unwrap(); - - let config = wgpu::SurfaceConfiguration { - usage: wgpu::TextureUsages::RENDER_ATTACHMENT, - format: surface.get_preferred_format(&adapter).unwrap(), - width: size.width, - height: size.height, - present_mode: wgpu::PresentMode::Fifo, - }; - - surface.configure(&device, &config); - - let texture_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[ - wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Texture { - multisampled: false, - sample_type: wgpu::TextureSampleType::Float { filterable: true }, - view_dimension: wgpu::TextureViewDimension::D2, - }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 1, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), - count: None, - }, - // normal map - wgpu::BindGroupLayoutEntry { - binding: 2, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Texture { - multisampled: false, - sample_type: wgpu::TextureSampleType::Float { filterable: true }, - view_dimension: wgpu::TextureViewDimension::D2, - }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 3, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), - count: None, - }, - ], - label: Some("texture_bind_group_layout"), - }); - - // UPDATED! - let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0)); - let projection = - camera::Projection::new(config.width, config.height, cgmath::Deg(45.0), 0.1, 100.0); - let camera_controller = camera::CameraController::new(4.0, 0.4); - - let mut camera_uniform = CameraUniform::new(); - camera_uniform.update_view_proj(&camera, &projection); - - let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Camera Buffer"), - contents: bytemuck::cast_slice(&[camera_uniform]), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - - const SPACE_BETWEEN: f32 = 3.0; - let instances = (0..NUM_INSTANCES_PER_ROW) - .flat_map(|z| { - (0..NUM_INSTANCES_PER_ROW).map(move |x| { - let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); - let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); - - let position = cgmath::Vector3 { x, y: 0.0, z }; - - let rotation = if position.is_zero() { - cgmath::Quaternion::from_axis_angle( - cgmath::Vector3::unit_z(), - cgmath::Deg(0.0), - ) - } else { - cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) - }; - - Instance { position, rotation } - }) - }) - .collect::>(); - - let instance_data = instances.iter().map(Instance::to_raw).collect::>(); - let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Instance Buffer"), - contents: bytemuck::cast_slice(&instance_data), - usage: wgpu::BufferUsages::VERTEX, - }); - - let camera_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }], - label: Some("camera_bind_group_layout"), - }); - - let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &camera_bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: camera_buffer.as_entire_binding(), - }], - label: Some("camera_bind_group"), - }); - - let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); - let now = std::time::Instant::now(); - let obj_model = model::Model::load( - &device, - &queue, - &texture_bind_group_layout, - res_dir.join("cube.obj"), - ) - .unwrap(); - println!("Elapsed (Original): {:?}", std::time::Instant::now() - now); - - let light_uniform = LightUniform { - position: [2.0, 2.0, 2.0], - _padding: 0, - color: [1.0, 1.0, 1.0], - _padding2: 0, - }; - - let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Light VB"), - contents: bytemuck::cast_slice(&[light_uniform]), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - - let light_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }], - label: None, - }); - - let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &light_bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: light_buffer.as_entire_binding(), - }], - label: None, - }); - - let depth_texture = - texture::Texture::create_depth_texture(&device, &config, "depth_texture"); - - let render_pipeline_layout = - device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: Some("Render Pipeline Layout"), - bind_group_layouts: &[ - &texture_bind_group_layout, - &camera_bind_group_layout, - &light_bind_group_layout, - ], - push_constant_ranges: &[], - }); - - let render_pipeline = { - let shader = wgpu::ShaderModuleDescriptor { - label: Some("Normal Shader"), - source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), - }; - create_render_pipeline( - &device, - &render_pipeline_layout, - config.format, - Some(texture::Texture::DEPTH_FORMAT), - &[model::ModelVertex::desc(), InstanceRaw::desc()], - shader, - ) - }; - - let light_render_pipeline = { - let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: Some("Light Pipeline Layout"), - bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout], - push_constant_ranges: &[], - }); - let shader = wgpu::ShaderModuleDescriptor { - label: Some("Light Shader"), - source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()), - }; - create_render_pipeline( - &device, - &layout, - config.format, - Some(texture::Texture::DEPTH_FORMAT), - &[model::ModelVertex::desc()], - shader, - ) - }; - - let debug_material = { - let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png"); - let normal_bytes = include_bytes!("../res/cobble-normal.png"); - - let diffuse_texture = texture::Texture::from_bytes( - &device, - &queue, - diffuse_bytes, - "res/alt-diffuse.png", - false, - ) - .unwrap(); - let normal_texture = texture::Texture::from_bytes( - &device, - &queue, - normal_bytes, - "res/alt-normal.png", - true, - ) - .unwrap(); - - model::Material::new( - &device, - "alt-material", - diffuse_texture, - normal_texture, - &texture_bind_group_layout, - ) - }; - - Self { - surface, - device, - queue, - config, - render_pipeline, - obj_model, - camera, - projection, - camera_controller, - camera_buffer, - camera_bind_group, - camera_uniform, - instances, - instance_buffer, - depth_texture, - size, - light_uniform, - light_buffer, - light_bind_group, - light_render_pipeline, - #[allow(dead_code)] - debug_material, - // NEW! - mouse_pressed: false, - } - } - - fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { - // UPDATED! - if new_size.width > 0 && new_size.height > 0 { - self.projection.resize(new_size.width, new_size.height); - self.size = new_size; - self.config.width = new_size.width; - self.config.height = new_size.height; - self.surface.configure(&self.device, &self.config); - self.depth_texture = - texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); - } - } - - // UPDATED! - fn input(&mut self, event: &DeviceEvent) -> bool { - match event { - DeviceEvent::Key(KeyboardInput { - virtual_keycode: Some(key), - state, - .. - }) => self.camera_controller.process_keyboard(*key, *state), - DeviceEvent::MouseWheel { delta, .. } => { - self.camera_controller.process_scroll(delta); - true - } - DeviceEvent::Button { - button: 1, // Left Mouse Button - state, - } => { - self.mouse_pressed = *state == ElementState::Pressed; - true - } - DeviceEvent::MouseMotion { delta } => { - if self.mouse_pressed { - self.camera_controller.process_mouse(delta.0, delta.1); - } - true - } - _ => false, - } - } - - fn update(&mut self, dt: std::time::Duration) { - // UPDATED! - self.camera_controller.update_camera(&mut self.camera, dt); - self.camera_uniform - .update_view_proj(&self.camera, &self.projection); - self.queue.write_buffer( - &self.camera_buffer, - 0, - bytemuck::cast_slice(&[self.camera_uniform]), - ); - - // Update the light - let old_position: cgmath::Vector3<_> = self.light_uniform.position.into(); - self.light_uniform.position = - (cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) - * old_position) - .into(); - self.queue.write_buffer( - &self.light_buffer, - 0, - bytemuck::cast_slice(&[self.light_uniform]), - ); - } - - fn render(&mut self) -> Result<(), wgpu::SurfaceError> { - let output = self.surface.get_current_texture()?; - let view = output - .texture - .create_view(&wgpu::TextureViewDescriptor::default()); - - let mut encoder = self - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor { - label: Some("Render Encoder"), - }); - - { - let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("Render Pass"), - color_attachments: &[wgpu::RenderPassColorAttachment { - view: &view, - resolve_target: None, - ops: wgpu::Operations { - load: wgpu::LoadOp::Clear(wgpu::Color { - r: 0.1, - g: 0.2, - b: 0.3, - a: 1.0, - }), - store: true, - }, - }], - depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { - view: &self.depth_texture.view, - depth_ops: Some(wgpu::Operations { - load: wgpu::LoadOp::Clear(1.0), - store: true, - }), - stencil_ops: None, - }), - }); - - render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); - render_pass.set_pipeline(&self.light_render_pipeline); - render_pass.draw_light_model( - &self.obj_model, - &self.camera_bind_group, - &self.light_bind_group, - ); - - render_pass.set_pipeline(&self.render_pipeline); - render_pass.draw_model_instanced( - &self.obj_model, - 0..self.instances.len() as u32, - &self.camera_bind_group, - &self.light_bind_group, - ); - } - self.queue.submit(iter::once(encoder.finish())); - output.present(); - - Ok(()) - } -} +use tutorial12_camera::run; fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let title = env!("CARGO_PKG_NAME"); - let window = winit::window::WindowBuilder::new() - .with_title(title) - .build(&event_loop) - .unwrap(); - let mut state = pollster::block_on(State::new(&window)); // NEW! - let mut last_render_time = std::time::Instant::now(); - event_loop.run(move |event, _, control_flow| { - *control_flow = ControlFlow::Poll; - match event { - Event::MainEventsCleared => window.request_redraw(), - Event::DeviceEvent { - ref event, - .. // We're not using device_id currently - } => { - state.input(event); - } - // UPDATED! - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - state.resize(**new_inner_size); - } - _ => {} - } - } - // UPDATED! - Event::RedrawRequested(window_id) if window_id == window.id() => { - let now = std::time::Instant::now(); - let dt = now - last_render_time; - last_render_time = now; - state.update(dt); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file diff --git a/code/intermediate/tutorial13-threading/src/lib.rs b/code/intermediate/tutorial13-threading/src/lib.rs new file mode 100644 index 00000000..eaafd57f --- /dev/null +++ b/code/intermediate/tutorial13-threading/src/lib.rs @@ -0,0 +1,703 @@ +use cgmath::prelude::*; +use rayon::prelude::*; +use std::iter; +use wgpu::util::DeviceExt; +use winit::{ + event::*, + event_loop::{ControlFlow, EventLoop}, + window::Window, +}; + +mod camera; +mod model; +mod texture; // NEW! + +use model::{DrawLight, DrawModel, Vertex}; + +const NUM_INSTANCES_PER_ROW: u32 = 10; + +#[repr(C)] +#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct CameraUniform { + view_position: [f32; 4], + view_proj: [[f32; 4]; 4], +} + +impl CameraUniform { + fn new() -> Self { + Self { + view_position: [0.0; 4], + view_proj: cgmath::Matrix4::identity().into(), + } + } + + // UPDATED! + fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) { + self.view_position = camera.position.to_homogeneous().into(); + self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into() + } +} + +struct Instance { + position: cgmath::Vector3, + rotation: cgmath::Quaternion, +} + +impl Instance { + fn to_raw(&self) -> InstanceRaw { + InstanceRaw { + model: (cgmath::Matrix4::from_translation(self.position) + * cgmath::Matrix4::from(self.rotation)) + .into(), + normal: cgmath::Matrix3::from(self.rotation).into(), + } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +#[allow(dead_code)] +struct InstanceRaw { + model: [[f32; 4]; 4], + normal: [[f32; 3]; 3], +} + +impl model::Vertex for InstanceRaw { + fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { + use std::mem; + wgpu::VertexBufferLayout { + array_stride: mem::size_of::() as wgpu::BufferAddress, + // We need to switch from using a step mode of Vertex to Instance + // This means that our shaders will only change to use the next + // instance when the shader starts processing a new instance + step_mode: wgpu::VertexStepMode::Instance, + attributes: &[ + wgpu::VertexAttribute { + offset: 0, + // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll + // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later + shader_location: 5, + format: wgpu::VertexFormat::Float32x4, + }, + // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot + // for each vec4. We don't have to do this in code though. + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, + shader_location: 6, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, + shader_location: 7, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, + shader_location: 8, + format: wgpu::VertexFormat::Float32x4, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress, + shader_location: 9, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress, + shader_location: 10, + format: wgpu::VertexFormat::Float32x3, + }, + wgpu::VertexAttribute { + offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress, + shader_location: 11, + format: wgpu::VertexFormat::Float32x3, + }, + ], + } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] +struct LightUniform { + position: [f32; 3], + // Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here + _padding: u32, + color: [f32; 3], + _padding2: u32, +} + +struct State { + surface: wgpu::Surface, + device: wgpu::Device, + queue: wgpu::Queue, + config: wgpu::SurfaceConfiguration, + render_pipeline: wgpu::RenderPipeline, + obj_model: model::Model, + camera: camera::Camera, + projection: camera::Projection, + camera_controller: camera::CameraController, + camera_uniform: CameraUniform, + camera_buffer: wgpu::Buffer, + camera_bind_group: wgpu::BindGroup, + instances: Vec, + #[allow(dead_code)] + instance_buffer: wgpu::Buffer, + depth_texture: texture::Texture, + size: winit::dpi::PhysicalSize, + light_uniform: LightUniform, + light_buffer: wgpu::Buffer, + light_bind_group: wgpu::BindGroup, + light_render_pipeline: wgpu::RenderPipeline, + #[allow(dead_code)] + debug_material: model::Material, + mouse_pressed: bool, +} + +fn create_render_pipeline( + device: &wgpu::Device, + layout: &wgpu::PipelineLayout, + color_format: wgpu::TextureFormat, + depth_format: Option, + vertex_layouts: &[wgpu::VertexBufferLayout], + shader: wgpu::ShaderModuleDescriptor, +) -> wgpu::RenderPipeline { + let shader = device.create_shader_module(&shader); + + device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some(&format!("{:?}", shader)), + layout: Some(layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_main", + buffers: vertex_layouts, + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_main", + targets: &[wgpu::ColorTargetState { + format: color_format, + blend: Some(wgpu::BlendState { + alpha: wgpu::BlendComponent::REPLACE, + color: wgpu::BlendComponent::REPLACE, + }), + write_mask: wgpu::ColorWrites::ALL, + }], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: Some(wgpu::Face::Back), + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE + polygon_mode: wgpu::PolygonMode::Fill, + // Requires Features::DEPTH_CLIP_CONTROL + unclipped_depth: false, + // Requires Features::CONSERVATIVE_RASTERIZATION + conservative: false, + }, + depth_stencil: depth_format.map(|format| wgpu::DepthStencilState { + format, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::Less, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }), + multisample: wgpu::MultisampleState { + count: 1, + mask: !0, + alpha_to_coverage_enabled: false, + }, + // If the pipeline will be used with a multiview render pass, this + // indicates how many array layers the attachments will have. + multiview: None, + }) +} + +impl State { + async fn new(window: &Window) -> Self { + let size = window.inner_size(); + + // The instance is a handle to our GPU + // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU + let instance = wgpu::Instance::new(wgpu::Backends::all()); + let surface = unsafe { instance.create_surface(window) }; + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: wgpu::Features::empty(), + limits: wgpu::Limits::default(), + }, + None, // Trace path + ) + .await + .unwrap(); + + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface.get_preferred_format(&adapter).unwrap(), + width: size.width, + height: size.height, + present_mode: wgpu::PresentMode::Fifo, + }; + + surface.configure(&device, &config); + + let texture_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + view_dimension: wgpu::TextureViewDimension::D2, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + // normal map + wgpu::BindGroupLayoutEntry { + binding: 2, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + view_dimension: wgpu::TextureViewDimension::D2, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 3, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("texture_bind_group_layout"), + }); + + // UPDATED! + let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0)); + let projection = + camera::Projection::new(config.width, config.height, cgmath::Deg(45.0), 0.1, 100.0); + let camera_controller = camera::CameraController::new(4.0, 0.4); + + let mut camera_uniform = CameraUniform::new(); + camera_uniform.update_view_proj(&camera, &projection); + + let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Camera Buffer"), + contents: bytemuck::cast_slice(&[camera_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + const SPACE_BETWEEN: f32 = 3.0; + let instances = (0..NUM_INSTANCES_PER_ROW) + .into_par_iter() // NEW! + .flat_map(|z| { + // UPDATED! + (0..NUM_INSTANCES_PER_ROW).into_par_iter().map(move |x| { + let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); + let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); + + let position = cgmath::Vector3 { x, y: 0.0, z }; + + let rotation = if position.is_zero() { + cgmath::Quaternion::from_axis_angle( + cgmath::Vector3::unit_z(), + cgmath::Deg(0.0), + ) + } else { + cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) + }; + + Instance { position, rotation } + }) + }) + .collect::>(); + + let instance_data = instances.iter().map(Instance::to_raw).collect::>(); + let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Instance Buffer"), + contents: bytemuck::cast_slice(&instance_data), + usage: wgpu::BufferUsages::VERTEX, + }); + + let camera_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: Some("camera_bind_group_layout"), + }); + + let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &camera_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: camera_buffer.as_entire_binding(), + }], + label: Some("camera_bind_group"), + }); + + let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); + let obj_model = model::Model::load( + &device, + &queue, + &texture_bind_group_layout, + res_dir.join("cube.obj"), + ) + .unwrap(); + + let light_uniform = LightUniform { + position: [2.0, 2.0, 2.0], + _padding: 0, + color: [1.0, 1.0, 1.0], + _padding2: 0, + }; + + let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Light VB"), + contents: bytemuck::cast_slice(&[light_uniform]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + let light_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + label: None, + }); + + let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &light_bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: light_buffer.as_entire_binding(), + }], + label: None, + }); + + let depth_texture = + texture::Texture::create_depth_texture(&device, &config, "depth_texture"); + + let render_pipeline_layout = + device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Render Pipeline Layout"), + bind_group_layouts: &[ + &texture_bind_group_layout, + &camera_bind_group_layout, + &light_bind_group_layout, + ], + push_constant_ranges: &[], + }); + + let render_pipeline = { + let shader = wgpu::ShaderModuleDescriptor { + label: Some("Normal Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), + }; + create_render_pipeline( + &device, + &render_pipeline_layout, + config.format, + Some(texture::Texture::DEPTH_FORMAT), + &[model::ModelVertex::desc(), InstanceRaw::desc()], + shader, + ) + }; + + let light_render_pipeline = { + let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("Light Pipeline Layout"), + bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout], + push_constant_ranges: &[], + }); + let shader = wgpu::ShaderModuleDescriptor { + label: Some("Light Shader"), + source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()), + }; + create_render_pipeline( + &device, + &layout, + config.format, + Some(texture::Texture::DEPTH_FORMAT), + &[model::ModelVertex::desc()], + shader, + ) + }; + + let debug_material = { + let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png"); + let normal_bytes = include_bytes!("../res/cobble-normal.png"); + + let diffuse_texture = texture::Texture::from_bytes( + &device, + &queue, + diffuse_bytes, + "res/alt-diffuse.png", + false, + ) + .unwrap(); + let normal_texture = texture::Texture::from_bytes( + &device, + &queue, + normal_bytes, + "res/alt-normal.png", + true, + ) + .unwrap(); + + model::Material::new( + &device, + "alt-material", + diffuse_texture, + normal_texture, + &texture_bind_group_layout, + ) + }; + + Self { + surface, + device, + queue, + config, + render_pipeline, + obj_model, + camera, + projection, + camera_controller, + camera_buffer, + camera_bind_group, + camera_uniform, + instances, + instance_buffer, + depth_texture, + size, + light_uniform, + light_buffer, + light_bind_group, + light_render_pipeline, + #[allow(dead_code)] + debug_material, + mouse_pressed: false, + } + } + + fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.projection.resize(new_size.width, new_size.height); + self.size = new_size; + self.config.width = new_size.width; + self.config.height = new_size.height; + self.surface.configure(&self.device, &self.config); + self.depth_texture = + texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); + } + } + + fn input(&mut self, event: &DeviceEvent) -> bool { + match event { + DeviceEvent::Key(KeyboardInput { + virtual_keycode: Some(key), + state, + .. + }) => self.camera_controller.process_keyboard(*key, *state), + DeviceEvent::MouseWheel { delta, .. } => { + self.camera_controller.process_scroll(delta); + true + } + DeviceEvent::Button { + button: 1, // Left Mouse Button + state, + } => { + self.mouse_pressed = *state == ElementState::Pressed; + true + } + DeviceEvent::MouseMotion { delta } => { + if self.mouse_pressed { + self.camera_controller.process_mouse(delta.0, delta.1); + } + true + } + _ => false, + } + } + + fn update(&mut self, dt: std::time::Duration) { + self.camera_controller.update_camera(&mut self.camera, dt); + self.camera_uniform + .update_view_proj(&self.camera, &self.projection); + self.queue.write_buffer( + &self.camera_buffer, + 0, + bytemuck::cast_slice(&[self.camera_uniform]), + ); + + // Update the light + let old_position: cgmath::Vector3<_> = self.light_uniform.position.into(); + self.light_uniform.position = + (cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) + * old_position) + .into(); + self.queue.write_buffer( + &self.light_buffer, + 0, + bytemuck::cast_slice(&[self.light_uniform]), + ); + } + + fn render(&mut self) -> Result<(), wgpu::SurfaceError> { + let output = self.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color { + r: 0.1, + g: 0.2, + b: 0.3, + a: 1.0, + }), + store: true, + }, + }], + depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { + view: &self.depth_texture.view, + depth_ops: Some(wgpu::Operations { + load: wgpu::LoadOp::Clear(1.0), + store: true, + }), + stencil_ops: None, + }), + }); + + render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); + render_pass.set_pipeline(&self.light_render_pipeline); + render_pass.draw_light_model( + &self.obj_model, + &self.camera_bind_group, + &self.light_bind_group, + ); + + render_pass.set_pipeline(&self.render_pipeline); + render_pass.draw_model_instanced( + &self.obj_model, + 0..self.instances.len() as u32, + &self.camera_bind_group, + &self.light_bind_group, + ); + } + self.queue.submit(iter::once(encoder.finish())); + output.present(); + + Ok(()) + } +} + +pub fn run() { + env_logger::init(); + let event_loop = EventLoop::new(); + let title = env!("CARGO_PKG_NAME"); + let window = winit::window::WindowBuilder::new() + .with_title(title) + .build(&event_loop) + .unwrap(); + let mut state = pollster::block_on(State::new(&window)); // NEW! + let mut last_render_time = std::time::Instant::now(); + event_loop.run(move |event, _, control_flow| { + *control_flow = ControlFlow::Poll; + match event { + Event::MainEventsCleared => window.request_redraw(), + Event::DeviceEvent { + ref event, + .. // We're not using device_id currently + } => { + state.input(event); + } + Event::WindowEvent { + ref event, + window_id, + } if window_id == window.id() => { + match event { + WindowEvent::CloseRequested + | WindowEvent::KeyboardInput { + input: + KeyboardInput { + state: ElementState::Pressed, + virtual_keycode: Some(VirtualKeyCode::Escape), + .. + }, + .. + } => *control_flow = ControlFlow::Exit, + WindowEvent::Resized(physical_size) => { + state.resize(*physical_size); + } + WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { + state.resize(**new_inner_size); + } + _ => {} + } + } + Event::RedrawRequested(window_id) if window_id == window.id() => { + let now = std::time::Instant::now(); + let dt = now - last_render_time; + last_render_time = now; + state.update(dt); + match state.render() { + Ok(_) => {} + // Reconfigure the surface if lost + Err(wgpu::SurfaceError::Lost) => state.resize(state.size), + // The system is out of memory, we should probably quit + Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, + // All other errors (Outdated, Timeout) should be resolved by the next frame + Err(e) => eprintln!("{:?}", e), + } + } + _ => {} + } + }); +} diff --git a/code/intermediate/tutorial13-threading/src/main.rs b/code/intermediate/tutorial13-threading/src/main.rs index d60ceaa6..4c1ec813 100644 --- a/code/intermediate/tutorial13-threading/src/main.rs +++ b/code/intermediate/tutorial13-threading/src/main.rs @@ -1,703 +1,5 @@ -use cgmath::prelude::*; -use rayon::prelude::*; -use std::iter; -use wgpu::util::DeviceExt; -use winit::{ - event::*, - event_loop::{ControlFlow, EventLoop}, - window::Window, -}; - -mod camera; -mod model; -mod texture; // NEW! - -use model::{DrawLight, DrawModel, Vertex}; - -const NUM_INSTANCES_PER_ROW: u32 = 10; - -#[repr(C)] -#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -struct CameraUniform { - view_position: [f32; 4], - view_proj: [[f32; 4]; 4], -} - -impl CameraUniform { - fn new() -> Self { - Self { - view_position: [0.0; 4], - view_proj: cgmath::Matrix4::identity().into(), - } - } - - // UPDATED! - fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) { - self.view_position = camera.position.to_homogeneous().into(); - self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into() - } -} - -struct Instance { - position: cgmath::Vector3, - rotation: cgmath::Quaternion, -} - -impl Instance { - fn to_raw(&self) -> InstanceRaw { - InstanceRaw { - model: (cgmath::Matrix4::from_translation(self.position) - * cgmath::Matrix4::from(self.rotation)) - .into(), - normal: cgmath::Matrix3::from(self.rotation).into(), - } - } -} - -#[repr(C)] -#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -#[allow(dead_code)] -struct InstanceRaw { - model: [[f32; 4]; 4], - normal: [[f32; 3]; 3], -} - -impl model::Vertex for InstanceRaw { - fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { - use std::mem; - wgpu::VertexBufferLayout { - array_stride: mem::size_of::() as wgpu::BufferAddress, - // We need to switch from using a step mode of Vertex to Instance - // This means that our shaders will only change to use the next - // instance when the shader starts processing a new instance - step_mode: wgpu::VertexStepMode::Instance, - attributes: &[ - wgpu::VertexAttribute { - offset: 0, - // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll - // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later - shader_location: 5, - format: wgpu::VertexFormat::Float32x4, - }, - // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot - // for each vec4. We don't have to do this in code though. - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, - shader_location: 6, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, - shader_location: 7, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, - shader_location: 8, - format: wgpu::VertexFormat::Float32x4, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 16]>() as wgpu::BufferAddress, - shader_location: 9, - format: wgpu::VertexFormat::Float32x3, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 19]>() as wgpu::BufferAddress, - shader_location: 10, - format: wgpu::VertexFormat::Float32x3, - }, - wgpu::VertexAttribute { - offset: mem::size_of::<[f32; 22]>() as wgpu::BufferAddress, - shader_location: 11, - format: wgpu::VertexFormat::Float32x3, - }, - ], - } - } -} - -#[repr(C)] -#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] -struct LightUniform { - position: [f32; 3], - // Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here - _padding: u32, - color: [f32; 3], - _padding2: u32, -} - -struct State { - surface: wgpu::Surface, - device: wgpu::Device, - queue: wgpu::Queue, - config: wgpu::SurfaceConfiguration, - render_pipeline: wgpu::RenderPipeline, - obj_model: model::Model, - camera: camera::Camera, - projection: camera::Projection, - camera_controller: camera::CameraController, - camera_uniform: CameraUniform, - camera_buffer: wgpu::Buffer, - camera_bind_group: wgpu::BindGroup, - instances: Vec, - #[allow(dead_code)] - instance_buffer: wgpu::Buffer, - depth_texture: texture::Texture, - size: winit::dpi::PhysicalSize, - light_uniform: LightUniform, - light_buffer: wgpu::Buffer, - light_bind_group: wgpu::BindGroup, - light_render_pipeline: wgpu::RenderPipeline, - #[allow(dead_code)] - debug_material: model::Material, - mouse_pressed: bool, -} - -fn create_render_pipeline( - device: &wgpu::Device, - layout: &wgpu::PipelineLayout, - color_format: wgpu::TextureFormat, - depth_format: Option, - vertex_layouts: &[wgpu::VertexBufferLayout], - shader: wgpu::ShaderModuleDescriptor, -) -> wgpu::RenderPipeline { - let shader = device.create_shader_module(&shader); - - device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some(&format!("{:?}", shader)), - layout: Some(layout), - vertex: wgpu::VertexState { - module: &shader, - entry_point: "vs_main", - buffers: vertex_layouts, - }, - fragment: Some(wgpu::FragmentState { - module: &shader, - entry_point: "fs_main", - targets: &[wgpu::ColorTargetState { - format: color_format, - blend: Some(wgpu::BlendState { - alpha: wgpu::BlendComponent::REPLACE, - color: wgpu::BlendComponent::REPLACE, - }), - write_mask: wgpu::ColorWrites::ALL, - }], - }), - primitive: wgpu::PrimitiveState { - topology: wgpu::PrimitiveTopology::TriangleList, - strip_index_format: None, - front_face: wgpu::FrontFace::Ccw, - cull_mode: Some(wgpu::Face::Back), - // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE - polygon_mode: wgpu::PolygonMode::Fill, - // Requires Features::DEPTH_CLIP_CONTROL - unclipped_depth: false, - // Requires Features::CONSERVATIVE_RASTERIZATION - conservative: false, - }, - depth_stencil: depth_format.map(|format| wgpu::DepthStencilState { - format, - depth_write_enabled: true, - depth_compare: wgpu::CompareFunction::Less, - stencil: wgpu::StencilState::default(), - bias: wgpu::DepthBiasState::default(), - }), - multisample: wgpu::MultisampleState { - count: 1, - mask: !0, - alpha_to_coverage_enabled: false, - }, - // If the pipeline will be used with a multiview render pass, this - // indicates how many array layers the attachments will have. - multiview: None, - }) -} - -impl State { - async fn new(window: &Window) -> Self { - let size = window.inner_size(); - - // The instance is a handle to our GPU - // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU - let instance = wgpu::Instance::new(wgpu::Backends::all()); - let surface = unsafe { instance.create_surface(window) }; - let adapter = instance - .request_adapter(&wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::default(), - compatible_surface: Some(&surface), - force_fallback_adapter: false, - }) - .await - .unwrap(); - let (device, queue) = adapter - .request_device( - &wgpu::DeviceDescriptor { - label: None, - features: wgpu::Features::empty(), - limits: wgpu::Limits::default(), - }, - None, // Trace path - ) - .await - .unwrap(); - - let config = wgpu::SurfaceConfiguration { - usage: wgpu::TextureUsages::RENDER_ATTACHMENT, - format: surface.get_preferred_format(&adapter).unwrap(), - width: size.width, - height: size.height, - present_mode: wgpu::PresentMode::Fifo, - }; - - surface.configure(&device, &config); - - let texture_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[ - wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Texture { - multisampled: false, - sample_type: wgpu::TextureSampleType::Float { filterable: true }, - view_dimension: wgpu::TextureViewDimension::D2, - }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 1, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), - count: None, - }, - // normal map - wgpu::BindGroupLayoutEntry { - binding: 2, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Texture { - multisampled: false, - sample_type: wgpu::TextureSampleType::Float { filterable: true }, - view_dimension: wgpu::TextureViewDimension::D2, - }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 3, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), - count: None, - }, - ], - label: Some("texture_bind_group_layout"), - }); - - // UPDATED! - let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0)); - let projection = - camera::Projection::new(config.width, config.height, cgmath::Deg(45.0), 0.1, 100.0); - let camera_controller = camera::CameraController::new(4.0, 0.4); - - let mut camera_uniform = CameraUniform::new(); - camera_uniform.update_view_proj(&camera, &projection); - - let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Camera Buffer"), - contents: bytemuck::cast_slice(&[camera_uniform]), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - - const SPACE_BETWEEN: f32 = 3.0; - let instances = (0..NUM_INSTANCES_PER_ROW) - .into_par_iter() // NEW! - .flat_map(|z| { - // UPDATED! - (0..NUM_INSTANCES_PER_ROW).into_par_iter().map(move |x| { - let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); - let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); - - let position = cgmath::Vector3 { x, y: 0.0, z }; - - let rotation = if position.is_zero() { - cgmath::Quaternion::from_axis_angle( - cgmath::Vector3::unit_z(), - cgmath::Deg(0.0), - ) - } else { - cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0)) - }; - - Instance { position, rotation } - }) - }) - .collect::>(); - - let instance_data = instances.iter().map(Instance::to_raw).collect::>(); - let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Instance Buffer"), - contents: bytemuck::cast_slice(&instance_data), - usage: wgpu::BufferUsages::VERTEX, - }); - - let camera_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }], - label: Some("camera_bind_group_layout"), - }); - - let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &camera_bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: camera_buffer.as_entire_binding(), - }], - label: Some("camera_bind_group"), - }); - - let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); - let obj_model = model::Model::load( - &device, - &queue, - &texture_bind_group_layout, - res_dir.join("cube.obj"), - ) - .unwrap(); - - let light_uniform = LightUniform { - position: [2.0, 2.0, 2.0], - _padding: 0, - color: [1.0, 1.0, 1.0], - _padding2: 0, - }; - - let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Light VB"), - contents: bytemuck::cast_slice(&[light_uniform]), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - - let light_bind_group_layout = - device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }], - label: None, - }); - - let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &light_bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: light_buffer.as_entire_binding(), - }], - label: None, - }); - - let depth_texture = - texture::Texture::create_depth_texture(&device, &config, "depth_texture"); - - let render_pipeline_layout = - device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: Some("Render Pipeline Layout"), - bind_group_layouts: &[ - &texture_bind_group_layout, - &camera_bind_group_layout, - &light_bind_group_layout, - ], - push_constant_ranges: &[], - }); - - let render_pipeline = { - let shader = wgpu::ShaderModuleDescriptor { - label: Some("Normal Shader"), - source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), - }; - create_render_pipeline( - &device, - &render_pipeline_layout, - config.format, - Some(texture::Texture::DEPTH_FORMAT), - &[model::ModelVertex::desc(), InstanceRaw::desc()], - shader, - ) - }; - - let light_render_pipeline = { - let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: Some("Light Pipeline Layout"), - bind_group_layouts: &[&camera_bind_group_layout, &light_bind_group_layout], - push_constant_ranges: &[], - }); - let shader = wgpu::ShaderModuleDescriptor { - label: Some("Light Shader"), - source: wgpu::ShaderSource::Wgsl(include_str!("light.wgsl").into()), - }; - create_render_pipeline( - &device, - &layout, - config.format, - Some(texture::Texture::DEPTH_FORMAT), - &[model::ModelVertex::desc()], - shader, - ) - }; - - let debug_material = { - let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png"); - let normal_bytes = include_bytes!("../res/cobble-normal.png"); - - let diffuse_texture = texture::Texture::from_bytes( - &device, - &queue, - diffuse_bytes, - "res/alt-diffuse.png", - false, - ) - .unwrap(); - let normal_texture = texture::Texture::from_bytes( - &device, - &queue, - normal_bytes, - "res/alt-normal.png", - true, - ) - .unwrap(); - - model::Material::new( - &device, - "alt-material", - diffuse_texture, - normal_texture, - &texture_bind_group_layout, - ) - }; - - Self { - surface, - device, - queue, - config, - render_pipeline, - obj_model, - camera, - projection, - camera_controller, - camera_buffer, - camera_bind_group, - camera_uniform, - instances, - instance_buffer, - depth_texture, - size, - light_uniform, - light_buffer, - light_bind_group, - light_render_pipeline, - #[allow(dead_code)] - debug_material, - mouse_pressed: false, - } - } - - fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { - if new_size.width > 0 && new_size.height > 0 { - self.projection.resize(new_size.width, new_size.height); - self.size = new_size; - self.config.width = new_size.width; - self.config.height = new_size.height; - self.surface.configure(&self.device, &self.config); - self.depth_texture = - texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); - } - } - - fn input(&mut self, event: &DeviceEvent) -> bool { - match event { - DeviceEvent::Key(KeyboardInput { - virtual_keycode: Some(key), - state, - .. - }) => self.camera_controller.process_keyboard(*key, *state), - DeviceEvent::MouseWheel { delta, .. } => { - self.camera_controller.process_scroll(delta); - true - } - DeviceEvent::Button { - button: 1, // Left Mouse Button - state, - } => { - self.mouse_pressed = *state == ElementState::Pressed; - true - } - DeviceEvent::MouseMotion { delta } => { - if self.mouse_pressed { - self.camera_controller.process_mouse(delta.0, delta.1); - } - true - } - _ => false, - } - } - - fn update(&mut self, dt: std::time::Duration) { - self.camera_controller.update_camera(&mut self.camera, dt); - self.camera_uniform - .update_view_proj(&self.camera, &self.projection); - self.queue.write_buffer( - &self.camera_buffer, - 0, - bytemuck::cast_slice(&[self.camera_uniform]), - ); - - // Update the light - let old_position: cgmath::Vector3<_> = self.light_uniform.position.into(); - self.light_uniform.position = - (cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) - * old_position) - .into(); - self.queue.write_buffer( - &self.light_buffer, - 0, - bytemuck::cast_slice(&[self.light_uniform]), - ); - } - - fn render(&mut self) -> Result<(), wgpu::SurfaceError> { - let output = self.surface.get_current_texture()?; - let view = output - .texture - .create_view(&wgpu::TextureViewDescriptor::default()); - - let mut encoder = self - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor { - label: Some("Render Encoder"), - }); - - { - let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("Render Pass"), - color_attachments: &[wgpu::RenderPassColorAttachment { - view: &view, - resolve_target: None, - ops: wgpu::Operations { - load: wgpu::LoadOp::Clear(wgpu::Color { - r: 0.1, - g: 0.2, - b: 0.3, - a: 1.0, - }), - store: true, - }, - }], - depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { - view: &self.depth_texture.view, - depth_ops: Some(wgpu::Operations { - load: wgpu::LoadOp::Clear(1.0), - store: true, - }), - stencil_ops: None, - }), - }); - - render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); - render_pass.set_pipeline(&self.light_render_pipeline); - render_pass.draw_light_model( - &self.obj_model, - &self.camera_bind_group, - &self.light_bind_group, - ); - - render_pass.set_pipeline(&self.render_pipeline); - render_pass.draw_model_instanced( - &self.obj_model, - 0..self.instances.len() as u32, - &self.camera_bind_group, - &self.light_bind_group, - ); - } - self.queue.submit(iter::once(encoder.finish())); - output.present(); - - Ok(()) - } -} +use tutorial13_threading::run; fn main() { - env_logger::init(); - let event_loop = EventLoop::new(); - let title = env!("CARGO_PKG_NAME"); - let window = winit::window::WindowBuilder::new() - .with_title(title) - .build(&event_loop) - .unwrap(); - let mut state = pollster::block_on(State::new(&window)); // NEW! - let mut last_render_time = std::time::Instant::now(); - event_loop.run(move |event, _, control_flow| { - *control_flow = ControlFlow::Poll; - match event { - Event::MainEventsCleared => window.request_redraw(), - Event::DeviceEvent { - ref event, - .. // We're not using device_id currently - } => { - state.input(event); - } - Event::WindowEvent { - ref event, - window_id, - } if window_id == window.id() => { - match event { - WindowEvent::CloseRequested - | WindowEvent::KeyboardInput { - input: - KeyboardInput { - state: ElementState::Pressed, - virtual_keycode: Some(VirtualKeyCode::Escape), - .. - }, - .. - } => *control_flow = ControlFlow::Exit, - WindowEvent::Resized(physical_size) => { - state.resize(*physical_size); - } - WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { - state.resize(**new_inner_size); - } - _ => {} - } - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - let now = std::time::Instant::now(); - let dt = now - last_render_time; - last_render_time = now; - state.update(dt); - match state.render() { - Ok(_) => {} - // Reconfigure the surface if lost - Err(wgpu::SurfaceError::Lost) => state.resize(state.size), - // The system is out of memory, we should probably quit - Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, - // All other errors (Outdated, Timeout) should be resolved by the next frame - Err(e) => eprintln!("{:?}", e), - } - } - _ => {} - } - }); -} + run(); +} \ No newline at end of file