updata wgpu 0.5

pull/26/head
downtime 4 years ago
parent 6d2d839411
commit 764f5405db

19
Cargo.lock generated

@ -2019,25 +2019,30 @@ dependencies = [
name = "tutorial8-depth"
version = "0.1.0"
dependencies = [
"bytemuck 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.23.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.5.0 (git+https://github.com/gfx-rs/wgpu-rs.git)",
"winit 0.22.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tutorial9-models"
version = "0.1.0"
dependencies = [
"bytemuck 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.23.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.5.0 (git+https://github.com/gfx-rs/wgpu-rs.git)",
"winit 0.22.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]

@ -63,7 +63,7 @@ impl State {
}
fn input(&mut self, event: &WindowEvent) -> bool {
false
self.camera_controller.process_events(event)
}
async fn update(&mut self) {

@ -7,14 +7,16 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
image = "0.22.4"
winit = "0.20.0"
glsl-to-spirv = "0.1.7"
cgmath = "0.17.0"
image = "0.23"
winit = "0.22"
glsl-to-spirv = "0.1"
cgmath = "0.17"
failure = "0.1"
wgpu = "0.4.0"
# wgpu = { git = "https://github.com/gfx-rs/wgpu-rs.git" }
tobj = "1"
bytemuck = "1.2"
futures = "0.3"
# wgpu = "0.5"
wgpu = { git = "https://github.com/gfx-rs/wgpu-rs.git" }
# zerocopy = "0.2.8"
[[bin]]
@ -24,4 +26,5 @@ path = "src/main.rs"
[[bin]]
name = "tutorial8-challenge"
path = "src/challenge.rs"
path = "src/challenge.rs"

@ -7,15 +7,16 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
image = "0.22.4"
winit = "0.20.0"
glsl-to-spirv = "0.1.7"
cgmath = "0.17.0"
image = "0.23"
winit = "0.22"
glsl-to-spirv = "0.1"
cgmath = "0.17"
failure = "0.1"
tobj = "0.1"
wgpu = "0.4.0"
# wgpu = { git = "https://github.com/gfx-rs/wgpu-rs.git" }
tobj = "1"
bytemuck = "1.2"
futures = "0.3"
# wgpu = "0.5"
wgpu = { git = "https://github.com/gfx-rs/wgpu-rs.git" }
# zerocopy = "0.2.8"
[[bin]]

@ -1,18 +1,15 @@
use cgmath::prelude::*;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
event_loop::{ControlFlow, EventLoop},
window::Window,
};
use cgmath::prelude::*;
mod texture;
mod model;
mod texture;
use model::{DrawModel, Vertex};
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
@ -55,10 +52,12 @@ impl Uniforms {
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
self.view_proj = camera.build_view_projection_matrix();
// self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
unsafe impl bytemuck::Zeroable for Uniforms {}
unsafe impl bytemuck::Pod for Uniforms {}
struct CameraController {
speed: f32,
is_up_pressed: bool,
@ -85,11 +84,12 @@ impl CameraController {
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
input:
KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
@ -158,77 +158,44 @@ impl Instance {
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
obj_model: model::Model,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
instance_buffer: wgpu::Buffer,
depth_texture: wgpu::Texture,
depth_texture_view: wgpu::TextureView,
}
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> wgpu::Texture {
let desc = wgpu::TextureDescriptor {
format: DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
..sc_desc.to_texture_desc()
};
device.create_texture(&desc)
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&Default::default()).unwrap();
let (device, mut queue) = adapter.request_device(&Default::default());
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
fn new(
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
) -> (Self, Option<Vec<wgpu::CommandBuffer>>) {
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
component_type: wgpu::TextureComponentType::Float,
dimension: wgpu::TextureViewDimension::D2,
},
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
},
],
});
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false },
},
],
label: None,
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
@ -244,56 +211,67 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let uniform_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[uniforms]),
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let position = cgmath::Vector3 { x, y: 0.0, z };
let rotation = if position.is_zero() {
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
let instances = (0..NUM_INSTANCES_PER_ROW)
.flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let position = cgmath::Vector3 { x, y: 0.0, z };
let rotation = if position.is_zero() {
cgmath::Quaternion::from_axis_angle(
cgmath::Vector3::unit_z(),
cgmath::Deg(0.0),
)
} else {
cgmath::Quaternion::from_axis_angle(
position.clone().normalize(),
cgmath::Deg(45.0),
)
};
Instance { position, rotation }
})
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_matrix).collect::<Vec<_>>();
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ)
.fill_from_slice(&instance_data);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
.collect::<Vec<_>>();
let instance_data = instances
.iter()
.map(Instance::to_matrix)
.collect::<Vec<_>>();
let instance_buffer_size =
instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device.create_buffer_with_data(
matrix4f_cast_slice(&[instance_data]),
wgpu::BufferUsage::STORAGE_READ,
);
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer { dynamic: false },
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
}
}
]
});
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
},
},
],
label: None,
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
@ -303,20 +281,25 @@ impl State {
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
},
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Buffer {
buffer: &instance_buffer,
range: 0..instance_buffer_size as wgpu::BufferAddress,
}
}
},
},
],
label: None,
});
let (obj_model, cmds) = model::Model::load(&device, &texture_bind_group_layout, "code/beginner/tutorial9-models/src/res/cube.obj").unwrap();
queue.submit(&cmds);
let (obj_model, cmds) = model::Model::load(
&device,
&texture_bind_group_layout,
"code/beginner/tutorial9-models/src/res/cube.obj",
)
.unwrap();
let vs_src = include_str!("shader.vert");
let fs_src = include_str!("shader.frag");
@ -327,12 +310,13 @@ impl State {
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let depth_texture = create_depth_texture(&device, &sc_desc);
let depth_texture = texture::Texture::create_depth_texture(&device, &sc_desc).texture;
let depth_texture_view = depth_texture.create_default_view();
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
@ -352,16 +336,14 @@ impl State {
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: DEPTH_FORMAT,
format: texture::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
@ -369,21 +351,16 @@ impl State {
stencil_read_mask: 0,
stencil_write_mask: 0,
}),
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: &[
model::ModelVertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: &[model::ModelVertex::desc()],
},
});
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
let state = Self {
render_pipeline,
obj_model,
camera,
@ -391,71 +368,75 @@ impl State {
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
instance_buffer,
depth_texture,
depth_texture_view,
}
};
(state, Some(cmds))
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_texture = create_depth_texture(&self.device, &self.sc_desc);
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
) -> Option<wgpu::CommandBuffer> {
self.depth_texture = texture::Texture::create_depth_texture(device, sc_desc).texture;
self.depth_texture_view = self.depth_texture.create_default_view();
self.camera.aspect = sc_desc.width as f32 / sc_desc.height as f32;
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
None
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
fn update(&mut self, device: &wgpu::Device) -> Option<wgpu::CommandBuffer> {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
let staging_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
encoder.copy_buffer_to_buffer(
&staging_buffer,
0,
&self.uniform_buffer,
0,
std::mem::size_of::<Uniforms>() as wgpu::BufferAddress,
);
self.queue.submit(&[encoder.finish()]);
Some(encoder.finish())
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
fn render(
&mut self,
frame: &wgpu::SwapChainOutput,
device: &wgpu::Device,
) -> wgpu::CommandBuffer {
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture_view,
depth_load_op: wgpu::LoadOp::Clear,
@ -468,63 +449,123 @@ impl State {
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.uniform_bind_group);
render_pass.draw_model_instanced(
&self.obj_model,
0..self.instances.len() as u32,
&self.uniform_bind_group,
);
}
self.queue.submit(&[
encoder.finish()
]);
encoder.finish()
}
}
async fn run_async(event_loop: EventLoop<()>, window: Window) {
let size = window.inner_size();
let surface = wgpu::Surface::create(&window);
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY,
)
.await
.unwrap();
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: wgpu::Limits::default(),
})
.await;
let mut sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: if cfg!(target_arch = "wasm32") {
wgpu::TextureFormat::Bgra8Unorm
} else {
wgpu::TextureFormat::Bgra8UnormSrgb
},
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Mailbox,
};
let mut swap_chain = device.create_swap_chain(&surface, &sc_desc);
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
let (mut state, init_cmds_buf) = State::new(&sc_desc, &device);
if let Some(cmds_buf) = init_cmds_buf {
queue.submit(&cmds_buf);
}
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => window.request_redraw(),
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
} => {
*control_flow = ControlFlow::Exit;
}
_ => {}
},
WindowEvent::Resized(physical_size) => {
sc_desc.width = physical_size.width;
sc_desc.height = physical_size.height;
swap_chain = device.create_swap_chain(&surface, &sc_desc);
let cmd_buf = state.resize(&sc_desc, &device);
if let Some(cmd_buf) = cmd_buf {
queue.submit(&[cmd_buf]);
}
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
sc_desc.width = new_inner_size.width;
sc_desc.height = new_inner_size.height;
swap_chain = device.create_swap_chain(&surface, &sc_desc);
let cmd_buf = state.resize(&sc_desc, &device);
if let Some(cmd_buf) = cmd_buf {
queue.submit(&[cmd_buf]);
}
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
}
Event::MainEventsCleared => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
Event::RedrawRequested(_) => {
if let Some(cmd_buf) = state.update(&device) {
queue.submit(&[cmd_buf]);
}
let frame = swap_chain
.get_next_texture()
.expect("Timeout when acquiring next swap chain texture");
let command_buf = state.render(&frame, &device);
queue.submit(&[command_buf]);
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
});
}
fn main() {
let event_loop = EventLoop::new();
let title = "tutorial9-models";
let window = winit::window::WindowBuilder::new()
.with_title(title)
.build(&event_loop)
.unwrap();
futures::executor::block_on(run_async(event_loop, window));
}
#[inline]
pub fn matrix4f_cast_slice(a: &[Vec<cgmath::Matrix4<f32>>]) -> &[u8] {
let new_len = a[0].len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
unsafe { core::slice::from_raw_parts(a[0].as_ptr() as *const u8, new_len) }
}

@ -1,5 +1,5 @@
use std::path::Path;
use std::ops::Range;
use std::path::Path;
use crate::texture;
@ -14,6 +14,8 @@ pub struct ModelVertex {
tex_coords: [f32; 2],
normal: [f32; 3],
}
unsafe impl bytemuck::Zeroable for ModelVertex {}
unsafe impl bytemuck::Pod for ModelVertex {}
impl Vertex for ModelVertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
@ -37,7 +39,7 @@ impl Vertex for ModelVertex {
shader_location: 2,
format: wgpu::VertexFormat::Float3,
},
]
],
}
}
}
@ -61,12 +63,15 @@ pub struct Model {
pub materials: Vec<Material>,
}
impl Model {
pub fn load<P: AsRef<Path>>(device: &wgpu::Device, layout: &wgpu::BindGroupLayout, path: P) -> Result<(Self, Vec<wgpu::CommandBuffer>), failure::Error> {
pub fn load<P: AsRef<Path>>(
device: &wgpu::Device,
layout: &wgpu::BindGroupLayout,
path: P,
) -> Result<(Self, Vec<wgpu::CommandBuffer>), failure::Error> {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref())?;
// We're assuming that the texture files are stored with the obj file
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent().unwrap();
// Our `Texure` struct currently returns a `CommandBuffer` when it's created so we need to collect those and return them.
@ -75,8 +80,9 @@ impl Model {
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let (diffuse_texture, cmds) = texture::Texture::load(&device, containing_folder.join(diffuse_path))?;
let (diffuse_texture, cmds) =
texture::Texture::load(&device, containing_folder.join(diffuse_path))?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
bindings: &[
@ -88,7 +94,8 @@ impl Model {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
]
],
label: None,
});
materials.push(Material {
@ -109,10 +116,7 @@ impl Model {
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [
m.mesh.texcoords[i * 2],
m.mesh.texcoords[i * 2 + 1],
],
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
@ -121,13 +125,14 @@ impl Model {
});
}
let vertex_buffer = device
.create_buffer_mapped(vertices.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(&vertices);
let index_buffer = device
.create_buffer_mapped(m.mesh.indices.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(&m.mesh.indices);
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&vertices),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&m.mesh.indices),
wgpu::BufferUsage::INDEX,
);
meshes.push(Mesh {
name: m.name,
@ -137,40 +142,68 @@ impl Model {
material: m.mesh.material_id.unwrap_or(0),
});
}
Ok((Self { meshes, materials, }, command_buffers))
Ok((Self { meshes, materials }, command_buffers))
}
}
pub trait DrawModel {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup);
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup);
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup);
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup);
pub trait DrawModel<'a, 'b>
where
'b: 'a,
{
fn draw_mesh(&mut self, mesh: &'b Mesh, material: &'b Material, uniforms: &'b wgpu::BindGroup);
fn draw_mesh_instanced(
&mut self,
mesh: &'b Mesh,
material: &'b Material,
instances: Range<u32>,
uniforms: &'b wgpu::BindGroup,
);
fn draw_model(&mut self, model: &'b Model, uniforms: &'b wgpu::BindGroup);
fn draw_model_instanced(
&mut self,
model: &'b Model,
instances: Range<u32>,
uniforms: &'b wgpu::BindGroup,
);
}
impl<'a> DrawModel for wgpu::RenderPass<'a> {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup) {
impl<'a, 'b> DrawModel<'a, 'b> for wgpu::RenderPass<'a>
where
'b: 'a,
{
fn draw_mesh(&mut self, mesh: &'b Mesh, material: &'b Material, uniforms: &'b wgpu::BindGroup) {
self.draw_mesh_instanced(mesh, material, 0..1, uniforms);
}
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
self.set_vertex_buffers(0, &[(&mesh.vertex_buffer, 0)]);
self.set_index_buffer(&mesh.index_buffer, 0);
fn draw_mesh_instanced(
&mut self,
mesh: &'b Mesh,
material: &'b Material,
instances: Range<u32>,
uniforms: &'b wgpu::BindGroup,
) {
self.set_vertex_buffer(0, &mesh.vertex_buffer, 0, 0);
self.set_index_buffer(&mesh.index_buffer, 0, 0);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, &uniforms, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup) {
fn draw_model(&mut self, model: &'b Model, uniforms: &'b wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, uniforms);
}
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
fn draw_model_instanced(
&mut self,
model: &'b Model,
instances: Range<u32>,
uniforms: &'b wgpu::BindGroup,
) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), uniforms);
}
}
}
}

@ -10,5 +10,5 @@ Ke 0.000000 0.000000 0.000000
Ni 1.450000
d 1.000000
illum 2
map_Bump /home/benjamin/Downloads/tuto-14-normal.png
map_Kd /home/benjamin/Downloads/tuto-14-diffuse.jpg
map_Bump cube-normal.png
map_Kd cube-diffuse.jpg

@ -1,6 +1,7 @@
use image::GenericImageView;
use std::path::Path;
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub struct Texture {
pub texture: wgpu::Texture,
@ -9,18 +10,30 @@ pub struct Texture {
}
impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn load<P: AsRef<Path>>(device: &wgpu::Device, path: P) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
pub fn load<P: AsRef<Path>>(
device: &wgpu::Device,
path: P,
) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::open(path)?;
Self::from_image(device, &img)
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> Self {
pub fn create_depth_texture(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
) -> Self {
let desc = wgpu::TextureDescriptor {
format: Self::DEPTH_FORMAT,
label: None,
size: wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
..sc_desc.to_texture_desc()
};
let texture = device.create_texture(&desc);
@ -34,18 +47,28 @@ impl Texture {
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
compare: wgpu::CompareFunction::Always,
});
Self { texture, view, sampler }
Self {
texture,
view,
sampler,
}
}
pub fn from_bytes(device: &wgpu::Device, bytes: &[u8]) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
pub fn from_bytes(
device: &wgpu::Device,
bytes: &[u8],
) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, &img)
}
pub fn from_image(device: &wgpu::Device, img: &image::DynamicImage) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
pub fn from_image(
device: &wgpu::Device,
img: &image::DynamicImage,
) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let rgba = img.to_rgba();
let dimensions = img.dimensions();
@ -55,8 +78,8 @@ impl Texture {
depth: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: None,
size,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
@ -65,8 +88,7 @@ impl Texture {
});
let buffer = device
.create_buffer_mapped(rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&rgba);
.create_buffer_with_data(bytemuck::cast_slice(&rgba), wgpu::BufferUsage::COPY_SRC);
let mut encoder = device.create_command_encoder(&Default::default());
@ -74,15 +96,15 @@ impl Texture {
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
bytes_per_row: 4 * dimensions.0,
rows_per_image: dimensions.1,
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
},
size,
);
@ -98,9 +120,16 @@ impl Texture {
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
compare: wgpu::CompareFunction::Always,
});
Ok((Self { texture, view, sampler }, cmd_buffer))
Ok((
Self {
texture,
view,
sampler,
},
cmd_buffer,
))
}
}
}

@ -665,7 +665,7 @@ async fn run_async(event_loop: EventLoop<()>, window: Window) {
}
}
}
Event::MainEventsCleared => {
Event::RedrawRequested(_) => {
if let Some(cmd_buf) = state.update(&device) {
queue.submit(&[cmd_buf]);
}
@ -675,13 +675,6 @@ async fn run_async(event_loop: EventLoop<()>, window: Window) {
let command_buf = state.render(&frame, &device);
queue.submit(&[command_buf]);
}
Event::RedrawRequested(_) => {
let frame = swap_chain
.get_next_texture()
.expect("Timeout when acquiring next swap chain texture");
let command_buf = state.render(&frame, &device);
queue.submit(&[command_buf]);
}
_ => {}
}
});

@ -65,9 +65,10 @@ This is basically the same as the original `VertexBufferDescriptor`, but we adde
```rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
// ...
vertex_buffers: &[
model::ModelVertex::desc(),
],
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[model::ModelVertex::desc()],
},
// ...
});
```
@ -133,10 +134,14 @@ With all that out of the way, we can get to loading our model.
```rust
impl Model {
pub fn load<P: AsRef<Path>>(device: &wgpu::Device, path: P) -> Result<(Self, Vec<wgpu::CommandBuffer>), failure::Error> {
pub fn load<P: AsRef<Path>>(
device: &wgpu::Device,
layout: &wgpu::BindGroupLayout,
path: P,
) -> Result<(Self, Vec<wgpu::CommandBuffer>), failure::Error> {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref())?;
// We're assuming that the texture files are stored with the obj file
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent().unwrap();
// Our `Texure` struct currently returns a `CommandBuffer` when it's created so we need to collect those and return them.
@ -145,7 +150,24 @@ impl Model {
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let (diffuse_texture, cmds) = texture::Texture::load(&device, containing_folder.join(diffuse_path))?;
let (diffuse_texture, cmds) =
texture::Texture::load(&device, containing_folder.join(diffuse_path))?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
materials.push(Material {
name: mat.name,
diffuse_texture,
@ -163,10 +185,7 @@ impl Model {
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [
m.mesh.texcoords[i * 2],
m.mesh.texcoords[i * 2 + 1],
],
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
@ -175,13 +194,14 @@ impl Model {
});
}
let vertex_buffer = device
.create_buffer_mapped(vertices.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(&vertices);
let index_buffer = device
.create_buffer_mapped(m.mesh.indices.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(&m.mesh.indices);
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&vertices),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&m.mesh.indices),
wgpu::BufferUsage::INDEX,
);
meshes.push(Mesh {
name: m.name,
@ -191,8 +211,8 @@ impl Model {
material: m.mesh.material_id.unwrap_or(0),
});
}
Ok((Self { meshes, materials, }, command_buffers))
Ok((Self { meshes, materials }, command_buffers))
}
}
```
@ -202,8 +222,10 @@ Make sure that you change the `IndexFormat` that the `RenderPipeline` uses from
```rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
// ...
index_format: wgpu::IndexFormat::Uint32,
// ...
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: &[model::ModelVertex::desc()],
}, // ...
});
```
@ -212,19 +234,32 @@ let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescrip
Before we can draw the model, we need to be able to draw an individual mesh. Let's create a trait called `DrawModel`, and implement it for `RenderPass`.
```rust
pub trait DrawModel {
fn draw_mesh(&mut self, mesh: &Mesh);
fn draw_mesh_instanced(&mut self, mesh: &Mesh, instances: Range<u32>);
pub trait DrawModel<'a, 'b>
where
'b: 'a,
{
fn draw_mesh(&mut self, mesh: &'b Mesh);
fn draw_mesh_instanced(
&mut self,
mesh: &'b Mesh,
instances: Range<u32>,
);
}
impl<'a> DrawModel for wgpu::RenderPass<'a> {
fn draw_mesh(&mut self, mesh: &Mesh) {
impl<'a, 'b> DrawModel<'a, 'b> for wgpu::RenderPass<'a>
where
'b: 'a,
{
fn draw_mesh(&mut self, mesh: &'b Mesh) {
self.draw_mesh_instanced(mesh, 0..1);
}
fn draw_mesh_instanced(&mut self, mesh: &Mesh, instances: Range<u32>) {
self.set_vertex_buffers(0, &[(&mesh.vertex_buffer, 0)]);
self.set_index_buffer(&mesh.index_buffer, 0);
fn draw_mesh_instanced(
&mut self,
mesh: &'b Mesh,
instances: Range<u32>,
){
self.set_vertex_buffer(0, &mesh.vertex_buffer, 0, 0);
self.set_index_buffer(&mesh.index_buffer, 0, 0);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
}
@ -246,7 +281,6 @@ Before that though we need to actually load the model and save it to `State`. Pu
```rust
let (obj_model, cmds) = model::Model::load(&device, "code/beginner/tutorial9-models/src/res/cube.obj").unwrap();
queue.submit(&cmds);
```
The path to the obj will be different for you, so keep that in mind.
@ -299,22 +333,39 @@ pub struct Material {
We're going to add a material parameter to `DrawModel`.
```rust
pub trait DrawModel {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup);
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup);
pub trait DrawModel<'a, 'b>
where
'b: 'a,
{
fn draw_mesh(&mut self, mesh: &'b Mesh, material: &'b Material, uniforms: &'b wgpu::BindGroup);
fn draw_mesh_instanced(
&mut self,
mesh: &'b Mesh,
material: &'b Material,
instances: Range<u32>,
uniforms: &'b wgpu::BindGroup,
);
}
impl<'a> DrawModel for wgpu::RenderPass<'a> {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup) {
impl<'a, 'b> DrawModel<'a, 'b> for wgpu::RenderPass<'a>
where
'b: 'a,
{
fn draw_mesh(&mut self, mesh: &'b Mesh, material: &'b Material, uniforms: &'b wgpu::BindGroup) {
self.draw_mesh_instanced(mesh, material, 0..1, uniforms);
}
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
self.set_vertex_buffers(0, &[(&mesh.vertex_buffer, 0)]);
self.set_index_buffer(&mesh.index_buffer, 0);
fn draw_mesh_instanced(
&mut self,
mesh: &'b Mesh,
material: &'b Material,
instances: Range<u32>,
uniforms: &'b wgpu::BindGroup,
) {
self.set_vertex_buffer(0, &mesh.vertex_buffer, 0, 0);
self.set_index_buffer(&mesh.index_buffer, 0, 0);
self.set_bind_group(0, &material.bind_group, &[]);
// Do to a bug in 0.4, we need to pass in the uniforms and bind them here.
// This will be fixed in 0.5
self.set_bind_group(1, &uniforms, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
@ -340,21 +391,34 @@ With all that in place we should get the following.
Right now we are specifying the mesh and the material directly. This is useful if we want to draw a mesh with a different material. We're also not rendering other parts of the model (if we had some). Let's create a method for `DrawModel` that will draw all the parts of the model with their respective materials.
```rust
pub trait DrawModel {
pub trait DrawModel<'a, 'b>
where
'b: 'a,
{
// ...
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup);
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup);
fn draw_model(&mut self, model: &'b Model, uniforms: &'b wgpu::BindGroup);
fn draw_model_instanced(
&mut self,
model: &'b Model,
instances: Range<u32>,
uniforms: &'b wgpu::BindGroup,
);
}
impl<'a> DrawModel for wgpu::RenderPass<'a> {
impl<'a, 'b> DrawModel<'a, 'b> for wgpu::RenderPass<'a>
where
'b: 'a, {
// ...
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup) {
fn draw_model(&mut self, model: &'b Model, uniforms: &'b wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, uniforms);
}
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
fn draw_model_instanced(
&mut self,
model: &'b Model,
instances: Range<u32>,
uniforms: &'b wgpu::BindGroup,
) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), uniforms);

Loading…
Cancel
Save