use winit::{ event::*, event_loop::{EventLoop, ControlFlow}, window::{Window, WindowBuilder}, }; use cgmath::prelude::*; #[repr(C)] #[derive(Copy, Clone, Debug)] struct Vertex { position: [f32; 3], tex_coords: [f32; 2], } impl Vertex { fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> { use std::mem; wgpu::VertexBufferDescriptor { stride: mem::size_of::() as wgpu::BufferAddress, step_mode: wgpu::InputStepMode::Vertex, attributes: &[ wgpu::VertexAttributeDescriptor { offset: 0, shader_location: 0, format: wgpu::VertexFormat::Float3, }, wgpu::VertexAttributeDescriptor { offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress, shader_location: 1, format: wgpu::VertexFormat::Float2, }, ] } } } const VERTICES: &[Vertex] = &[ Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E ]; const INDICES: &[u16] = &[ 0, 1, 4, 1, 2, 4, 2, 3, 4, ]; #[cfg_attr(rustfmt, rustfmt_skip)] pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4 = cgmath::Matrix4::new( 1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.5, 1.0, ); const NUM_INSTANCES_PER_ROW: u32 = 10; const INSTANCE_DISPLACEMENT: cgmath::Vector3 = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5); struct Camera { eye: cgmath::Point3, target: cgmath::Point3, up: cgmath::Vector3, aspect: f32, fovy: f32, znear: f32, zfar: f32, } impl Camera { fn build_view_projection_matrix(&self) -> cgmath::Matrix4 { let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up); let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar); return proj * view; } } #[repr(C)] #[derive(Copy, Clone)] struct Uniforms { view_proj: cgmath::Matrix4, } impl Uniforms { fn new() -> Self { Self { view_proj: cgmath::Matrix4::identity(), } } fn update_view_proj(&mut self, camera: &Camera) { self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix(); } } struct CameraController { speed: f32, is_up_pressed: bool, is_down_pressed: bool, is_forward_pressed: bool, is_backward_pressed: bool, is_left_pressed: bool, is_right_pressed: bool, } impl CameraController { fn new(speed: f32) -> Self { Self { speed, is_up_pressed: false, is_down_pressed: false, is_forward_pressed: false, is_backward_pressed: false, is_left_pressed: false, is_right_pressed: false, } } fn process_events(&mut self, event: &WindowEvent) -> bool { match event { WindowEvent::KeyboardInput { input: KeyboardInput { state, virtual_keycode: Some(keycode), .. }, .. } => { let is_pressed = *state == ElementState::Pressed; match keycode { VirtualKeyCode::Space => { self.is_up_pressed = is_pressed; true } VirtualKeyCode::LShift => { self.is_down_pressed = is_pressed; true } VirtualKeyCode::W | VirtualKeyCode::Up => { self.is_forward_pressed = is_pressed; true } VirtualKeyCode::A | VirtualKeyCode::Left => { self.is_left_pressed = is_pressed; true } VirtualKeyCode::S | VirtualKeyCode::Down => { self.is_backward_pressed = is_pressed; true } VirtualKeyCode::D | VirtualKeyCode::Right => { self.is_right_pressed = is_pressed; true } _ => false, } } _ => false, } } fn update_camera(&self, camera: &mut Camera) { let forward = (camera.target - camera.eye).normalize(); if self.is_forward_pressed { camera.eye += forward * self.speed; } if self.is_backward_pressed { camera.eye -= forward * self.speed; } let right = forward.cross(camera.up); if self.is_right_pressed { camera.eye += right * self.speed; } if self.is_left_pressed { camera.eye -= right * self.speed; } } } const ROTATION_SPEED: f32 = 2.0 * std::f32::consts::PI / 60.0; struct Instance { position: cgmath::Vector3, rotation: cgmath::Quaternion, } impl Instance { fn to_matrix(&self) -> cgmath::Matrix4 { cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation) } } struct State { surface: wgpu::Surface, device: wgpu::Device, queue: wgpu::Queue, sc_desc: wgpu::SwapChainDescriptor, swap_chain: wgpu::SwapChain, render_pipeline: wgpu::RenderPipeline, vertex_buffer: wgpu::Buffer, index_buffer: wgpu::Buffer, num_indices: u32, diffuse_texture: wgpu::Texture, diffuse_texture_view: wgpu::TextureView, diffuse_sampler: wgpu::Sampler, diffuse_bind_group: wgpu::BindGroup, camera: Camera, camera_controller: CameraController, uniforms: Uniforms, uniform_buffer: wgpu::Buffer, uniform_bind_group: wgpu::BindGroup, size: winit::dpi::PhysicalSize, instances: Vec, instance_buffer: wgpu::Buffer, } fn quat_mul(q: cgmath::Quaternion, r: cgmath::Quaternion) -> cgmath::Quaternion { // This block uses quaternions of the form of // q=q0+iq1+jq2+kq3 // and // r=r0+ir1+jr2+kr3. // The quaternion product has the form of // t=q×r=t0+it1+jt2+kt3, // where // t0=(r0 q0 − r1 q1 − r2 q2 − r3 q3) // t1=(r0 q1 + r1 q0 − r2 q3 + r3 q2) // t2=(r0 q2 + r1 q3 + r2 q0 − r3 q1) // t3=(r0 q3 − r1 q2 + r2 q1 + r3 q0 let w = r.s * q.s - r.v.x * q.v.x - r.v.y * q.v.y - r.v.z * q.v.z; let xi = r.s * q.v.x + r.v.x * q.s - r.v.y * q.v.z + r.v.z * q.v.y; let yj = r.s * q.v.y + r.v.x * q.v.z + r.v.y * q.s - r.v.z * q.v.x; let zk = r.s * q.v.z - r.v.x * q.v.y + r.v.y * q.v.x + r.v.z * q.s; cgmath::Quaternion::new(w, xi, yj, zk) } impl State { fn new(window: &Window) -> Self { let size = window.inner_size(); let surface = wgpu::Surface::create(window); let adapter = wgpu::Adapter::request(&wgpu::RequestAdapterOptions { ..Default::default() }).unwrap(); let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor { extensions: wgpu::Extensions { anisotropic_filtering: false, }, limits: Default::default(), }); let sc_desc = wgpu::SwapChainDescriptor { usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT, format: wgpu::TextureFormat::Bgra8UnormSrgb, width: size.width, height: size.height, present_mode: wgpu::PresentMode::Vsync, }; let swap_chain = device.create_swap_chain(&surface, &sc_desc); let diffuse_bytes = include_bytes!("happy-tree.png"); let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap(); let diffuse_rgba = diffuse_image.as_rgba8().unwrap(); use image::GenericImageView; let dimensions = diffuse_image.dimensions(); let size3d = wgpu::Extent3d { width: dimensions.0, height: dimensions.1, depth: 1, }; let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor { size: size3d, array_layer_count: 1, mip_level_count: 1, sample_count: 1, dimension: wgpu::TextureDimension::D2, format: wgpu::TextureFormat::Rgba8UnormSrgb, usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST, }); let diffuse_buffer = device .create_buffer_mapped(diffuse_rgba.len(), wgpu::BufferUsage::COPY_SRC) .fill_from_slice(&diffuse_rgba); let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0, }); encoder.copy_buffer_to_texture( wgpu::BufferCopyView { buffer: &diffuse_buffer, offset: 0, row_pitch: 4 * dimensions.0, image_height: dimensions.1, }, wgpu::TextureCopyView { texture: &diffuse_texture, mip_level: 0, array_layer: 0, origin: wgpu::Origin3d::ZERO, }, size3d, ); queue.submit(&[encoder.finish()]); let diffuse_texture_view = diffuse_texture.create_default_view(); let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor { address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge, mag_filter: wgpu::FilterMode::Linear, min_filter: wgpu::FilterMode::Nearest, mipmap_filter: wgpu::FilterMode::Nearest, lod_min_clamp: -100.0, lod_max_clamp: 100.0, compare_function: wgpu::CompareFunction::Always, }); let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { bindings: &[ wgpu::BindGroupLayoutBinding { binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::SampledTexture { multisampled: false, dimension: wgpu::TextureViewDimension::D2, }, }, wgpu::BindGroupLayoutBinding { binding: 1, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Sampler, }, ], }); let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { layout: &texture_bind_group_layout, bindings: &[ wgpu::Binding { binding: 0, resource: wgpu::BindingResource::TextureView(&diffuse_texture_view), }, wgpu::Binding { binding: 1, resource: wgpu::BindingResource::Sampler(&diffuse_sampler), } ], }); let camera = Camera { eye: (0.0, 5.0, -10.0).into(), target: (0.0, 0.0, 0.0).into(), up: cgmath::Vector3::unit_y(), aspect: sc_desc.width as f32 / sc_desc.height as f32, fovy: 45.0, znear: 0.1, zfar: 100.0, }; let camera_controller = CameraController::new(0.2); let mut uniforms = Uniforms::new(); uniforms.update_view_proj(&camera); let uniform_buffer = device .create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST) .fill_from_slice(&[uniforms]); let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| { (0..NUM_INSTANCES_PER_ROW).map(move |x| { let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT; let rotation = if position.is_zero() { // this is needed so an object at (0, 0, 0) won't get scaled to zero // as Quaternions can effect scale if they're not create correctly cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_y(), cgmath::Deg(0.0)) } else { cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0)) }; Instance { position, rotation, } }) }).collect::>(); let instance_data = instances.iter().map(Instance::to_matrix).collect::>(); let instance_buffer_size = instance_data.len() * std::mem::size_of::>(); let instance_buffer = device .create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ | wgpu::BufferUsage::COPY_DST) .fill_from_slice(&instance_data); let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { bindings: &[ wgpu::BindGroupLayoutBinding { binding: 0, visibility: wgpu::ShaderStage::VERTEX, ty: wgpu::BindingType::UniformBuffer { dynamic: false, }, }, wgpu::BindGroupLayoutBinding { binding: 1, visibility: wgpu::ShaderStage::VERTEX, ty: wgpu::BindingType::StorageBuffer { dynamic: false, readonly: true, } } ] }); let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { layout: &uniform_bind_group_layout, bindings: &[ wgpu::Binding { binding: 0, resource: wgpu::BindingResource::Buffer { buffer: &uniform_buffer, range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress, } }, wgpu::Binding { binding: 1, resource: wgpu::BindingResource::Buffer { buffer: &instance_buffer, range: 0..instance_buffer_size as wgpu::BufferAddress, } } ], }); let vs_src = include_str!("challenge.vert"); let fs_src = include_str!("shader.frag"); let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap(); let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap(); let vs_data = wgpu::read_spirv(vs_spirv).unwrap(); let fs_data = wgpu::read_spirv(fs_spirv).unwrap(); let vs_module = device.create_shader_module(&vs_data); let fs_module = device.create_shader_module(&fs_data); let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout], }); let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { layout: &render_pipeline_layout, vertex_stage: wgpu::ProgrammableStageDescriptor { module: &vs_module, entry_point: "main", }, fragment_stage: Some(wgpu::ProgrammableStageDescriptor { module: &fs_module, entry_point: "main", }), rasterization_state: Some(wgpu::RasterizationStateDescriptor { front_face: wgpu::FrontFace::Ccw, cull_mode: wgpu::CullMode::None, depth_bias: 0, depth_bias_slope_scale: 0.0, depth_bias_clamp: 0.0, }), primitive_topology: wgpu::PrimitiveTopology::TriangleList, color_states: &[ wgpu::ColorStateDescriptor { format: sc_desc.format, color_blend: wgpu::BlendDescriptor::REPLACE, alpha_blend: wgpu::BlendDescriptor::REPLACE, write_mask: wgpu::ColorWrite::ALL, }, ], depth_stencil_state: None, index_format: wgpu::IndexFormat::Uint16, vertex_buffers: &[ Vertex::desc(), ], sample_count: 1, sample_mask: !0, alpha_to_coverage_enabled: false, }); let vertex_buffer = device .create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX) .fill_from_slice(VERTICES); let index_buffer = device .create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX) .fill_from_slice(INDICES); let num_indices = INDICES.len() as u32; Self { surface, device, queue, sc_desc, swap_chain, render_pipeline, vertex_buffer, index_buffer, num_indices, diffuse_texture, diffuse_texture_view, diffuse_sampler, diffuse_bind_group, camera, camera_controller, uniform_buffer, uniform_bind_group, uniforms, size, instances, instance_buffer, } } fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { self.size = new_size; self.sc_desc.width = new_size.width; self.sc_desc.height = new_size.height; self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc); self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32; } fn input(&mut self, event: &WindowEvent) -> bool { self.camera_controller.process_events(event) } fn update(&mut self) { self.camera_controller.update_camera(&mut self.camera); self.uniforms.update_view_proj(&self.camera); let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0, }); let staging_buffer = self.device .create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC) .fill_from_slice(&[self.uniforms]); encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::() as wgpu::BufferAddress); for instance in &mut self.instances { let amount = cgmath::Quaternion::from_angle_y(cgmath::Rad(ROTATION_SPEED)); let current = instance.rotation; instance.rotation = quat_mul(amount, current); } let instance_data = self.instances.iter().map(Instance::to_matrix).collect::>(); let instance_buffer_size = instance_data.len() * std::mem::size_of::>(); let instance_buffer = self.device .create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::COPY_SRC) .fill_from_slice(&instance_data); encoder.copy_buffer_to_buffer(&instance_buffer, 0, &self.instance_buffer, 0, instance_buffer_size as wgpu::BufferAddress); self.queue.submit(&[encoder.finish()]); } fn render(&mut self) { let frame = self.swap_chain.get_next_texture(); let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0, }); { let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { color_attachments: &[ wgpu::RenderPassColorAttachmentDescriptor { attachment: &frame.view, resolve_target: None, load_op: wgpu::LoadOp::Clear, store_op: wgpu::StoreOp::Store, clear_color: wgpu::Color { r: 0.1, g: 0.2, b: 0.3, a: 1.0, }, } ], depth_stencil_attachment: None, }); render_pass.set_pipeline(&self.render_pipeline); render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); render_pass.set_bind_group(1, &self.uniform_bind_group, &[]); render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]); render_pass.set_index_buffer(&self.index_buffer, 0); render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32); } self.queue.submit(&[ encoder.finish() ]); } } fn main() { let event_loop = EventLoop::new(); let window = WindowBuilder::new() .build(&event_loop) .unwrap(); let mut state = State::new(&window); let mut old_time = std::time::Instant::now(); const MSPT: std::time::Duration = std::time::Duration::from_millis(20); event_loop.run(move |event, _, control_flow| { match event { Event::WindowEvent { ref event, window_id, } if window_id == window.id() => if state.input(event) { () } else { match event { WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, WindowEvent::KeyboardInput { input, .. } => { match input { KeyboardInput { state: ElementState::Pressed, virtual_keycode: Some(VirtualKeyCode::Escape), .. } => *control_flow = ControlFlow::Exit, _ => (), } } WindowEvent::Resized(physical_size) => { state.resize(*physical_size); () } WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { state.resize(**new_inner_size); () } _ => (), } } Event::MainEventsCleared => { state.update(); state.render(); let new_time = std::time::Instant::now(); let delta_time = new_time - old_time; *control_flow = if delta_time > MSPT { ControlFlow::Poll } else { ControlFlow::WaitUntil(old_time + MSPT) }; old_time = new_time; } _ => (), } }); }