migrated tutorial8

pull/29/head
Ben Hansen 4 years ago
parent baf4377930
commit 2b8e97b6fa

@ -9,6 +9,10 @@ layout(set = 0, binding = 0) uniform texture2D t_depth;
layout(set = 0, binding = 1) uniform samplerShadow s_depth;
void main() {
float near = 0.1;
float far = 100.0;
float depth = texture(sampler2DShadow(t_depth, s_depth), vec3(v_tex_coords, 1));
f_color = vec4(depth, 0, 0, 1);
float r = (2.0 * near * far) / (far + near - depth * (far - near));
f_color = vec4(vec3(r), 1);
}

@ -14,6 +14,9 @@ struct Vertex {
tex_coords: [f32; 2],
}
unsafe impl bytemuck::Pod for Vertex {}
unsafe impl bytemuck::Zeroable for Vertex {}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
@ -51,10 +54,10 @@ const INDICES: &[u16] = &[
];
const DEPTH_VERTICES: &[Vertex] = &[
Vertex { position: [0.0, 0.0, 0.0], tex_coords: [1.0, 1.0]},
Vertex { position: [1.0, 0.0, 0.0], tex_coords: [0.0, 1.0]},
Vertex { position: [1.0, -1.0, 0.0], tex_coords: [0.0, 0.0]},
Vertex { position: [0.0, -1.0, 0.0], tex_coords: [1.0, 0.0]},
Vertex { position: [0.0, 0.0, 0.0], tex_coords: [0.0, 1.0]},
Vertex { position: [1.0, 0.0, 0.0], tex_coords: [1.0, 1.0]},
Vertex { position: [1.0, 1.0, 0.0], tex_coords: [1.0, 0.0]},
Vertex { position: [0.0, 1.0, 0.0], tex_coords: [0.0, 0.0]},
];
const DEPTH_INDICES: &[u16] = &[
@ -65,12 +68,13 @@ const DEPTH_INDICES: &[u16] = &[
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
#[allow(dead_code)]
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
@ -99,6 +103,9 @@ struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
Self {
@ -204,11 +211,22 @@ struct Instance {
}
impl Instance {
fn to_matrix(&self) -> cgmath::Matrix4<f32> {
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct InstanceRaw {
model: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for InstanceRaw {}
unsafe impl bytemuck::Zeroable for InstanceRaw {}
struct DepthPass {
texture: texture::Texture,
bind_group: wgpu::BindGroup,
@ -221,7 +239,7 @@ struct DepthPass {
impl DepthPass {
fn new(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, texture_bind_group_layout: &wgpu::BindGroupLayout) -> Self {
let texture = texture::Texture::create_depth_texture(device, sc_desc);
let texture = texture::Texture::create_depth_texture(device, sc_desc, "depth_texture");
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: texture_bind_group_layout,
@ -235,14 +253,17 @@ impl DepthPass {
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
],
label: Some("depth_pass.bind_group"),
});
let vertex_buffer = device
.create_buffer_mapped(DEPTH_VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(DEPTH_VERTICES);
let index_buffer = device
.create_buffer_mapped(DEPTH_INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(DEPTH_INDICES);
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(DEPTH_VERTICES),
wgpu::BufferUsage::VERTEX
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(DEPTH_INDICES),
wgpu::BufferUsage::INDEX,
);
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[texture_bind_group_layout],
@ -284,10 +305,12 @@ impl DepthPass {
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
@ -303,8 +326,7 @@ impl DepthPass {
}
fn resize(&mut self, device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, texture_bind_group_layout: &wgpu::BindGroupLayout) {
self.texture = texture::Texture::create_depth_texture(device, sc_desc);
self.texture = texture::Texture::create_depth_texture(device, sc_desc, "depth_texture");
self.bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: texture_bind_group_layout,
bindings: &[
@ -317,6 +339,7 @@ impl DepthPass {
resource: wgpu::BindingResource::Sampler(&self.texture.sampler),
}
],
label: Some("depth_pass.bind_group"),
});
}
}
@ -334,9 +357,7 @@ struct State {
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_texture: texture::Texture,
diffuse_bind_group: wgpu::BindGroup,
texture_bind_group_layout: wgpu::BindGroupLayout,
@ -354,114 +375,60 @@ struct State {
depth_pass: DepthPass,
}
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> wgpu::Texture {
let desc = wgpu::TextureDescriptor {
format: DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::COPY_SRC,
..sc_desc.to_texture_desc()
};
device.create_texture(&desc)
}
impl State {
fn new(window: &Window) -> Self {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&Default::default()).unwrap();
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY, // Vulkan + Metal + DX12 + Browser WebGPU
).await.unwrap();
let (device, mut queue) = adapter.request_device(&Default::default());
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
}).await;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
let diffuse_rgba = diffuse_image.as_rgba8().unwrap();
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(diffuse_rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&diffuse_rgba);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let (diffuse_texture, cmds) = texture::Texture::from_bytes(&device, diffuse_bytes, "happy-tree.png").unwrap();
queue.submit(&[cmds]);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
},
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
ty: wgpu::BindingType::Sampler {
comparison: true,
},
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -469,13 +436,14 @@ impl State {
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
@ -492,9 +460,10 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let uniform_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[uniforms]),
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
@ -514,22 +483,23 @@ impl State {
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_matrix).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ)
.fill_from_slice(&instance_data);
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::STORAGE_READ,
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
@ -537,7 +507,8 @@ impl State {
readonly: true,
}
}
]
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -558,6 +529,7 @@ impl State {
}
}
],
label: Some("uniform_bind_group"),
});
let vs_src = include_str!("shader.vert");
@ -600,7 +572,7 @@ impl State {
},
],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: DEPTH_FORMAT,
format: texture::Texture::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
@ -608,21 +580,25 @@ impl State {
stencil_read_mask: 0,
stencil_write_mask: 0,
}),
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(VERTICES),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(INDICES),
wgpu::BufferUsage::INDEX,
);
let num_indices = INDICES.len() as u32;
let depth_pass = DepthPass::new(&device, &sc_desc, &texture_bind_group_layout);
@ -638,8 +614,6 @@ impl State {
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
texture_bind_group_layout,
camera,
@ -655,7 +629,7 @@ impl State {
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
async fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
@ -673,28 +647,30 @@ impl State {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
async fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("update encoder"),
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
async fn render(&mut self) {
let frame = self.swap_chain.get_next_texture()
.expect("Timeout getting texture");
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("Render Encoder"),
});
{
@ -727,8 +703,8 @@ impl State {
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_index_buffer(&self.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
}
@ -747,8 +723,8 @@ impl State {
});
render_pass.set_pipeline(&self.depth_pass.render_pipeline);
render_pass.set_bind_group(0, &self.depth_pass.bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.depth_pass.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.depth_pass.index_buffer, 0);
render_pass.set_vertex_buffer(0, &self.depth_pass.vertex_buffer, 0, 0);
render_pass.set_index_buffer(&self.depth_pass.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.depth_pass.num_depth_indices, 0, 0..1);
}
@ -759,6 +735,7 @@ impl State {
size: buffer_size,
usage: wgpu::BufferUsage::COPY_DST
| wgpu::BufferUsage::MAP_READ,
label: Some("screenshot_buffer"),
};
let buffer = self.device.create_buffer(&buffer_desc);
@ -773,8 +750,8 @@ impl State {
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
row_pitch: U32_SIZE * self.sc_desc.width,
image_height: self.sc_desc.height,
bytes_per_row: U32_SIZE * self.sc_desc.width,
rows_per_image: self.sc_desc.height,
},
wgpu::Extent3d {
width: self.sc_desc.width,
@ -800,10 +777,14 @@ impl State {
let near = self.camera.znear;
let far = self.camera.zfar;
buffer.map_read_async(0, buffer_size, move |result: wgpu::BufferMapAsyncResult<&[f32]>| {
let mapping = result.unwrap();
let data = mapping.data;
std::thread::spawn(move || {
use futures::executor::block_on;
let mapping_future = buffer.map_read(0, buffer_size);
let mapping = block_on(mapping_future).unwrap();
let data = mapping.as_slice();
let pixels: &[f32] = bytemuck::try_cast_slice(data).unwrap();
use image::{ImageBuffer, Rgba, Pixel};
let mut buffer = ImageBuffer::<Rgba<u8>, _>::new(
width,
@ -812,22 +793,22 @@ impl State {
let mut x = 0;
let mut y = 0;
for pixel in data {
for pixel in pixels {
let z = pixel * 2.0 - 1.0;
let r = (2.0 * near * far) / (far + near - z * (far - near));
let p = (r.floor() * 255.0 / far) as u8;
buffer.put_pixel(x, y, Pixel::from_channels(
p, p, p, 255,
));
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
buffer.save("image.png").unwrap();
});
}
@ -840,16 +821,17 @@ fn main() {
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
use futures::executor::block_on;
// Since main can't be async, we're going to need to block
let mut state = block_on(State::new(&window));
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
@ -862,26 +844,29 @@ fn main() {
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
block_on(state.resize(*physical_size));
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
// new_inner_size is &mut so w have to dereference it twice
block_on(state.resize(**new_inner_size));
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
}
Event::RedrawRequested(_) => {
block_on(state.update());
block_on(state.render());
}
Event::MainEventsCleared => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
});
}

@ -14,6 +14,9 @@ struct Vertex {
tex_coords: [f32; 2],
}
unsafe impl bytemuck::Pod for Vertex {}
unsafe impl bytemuck::Zeroable for Vertex {}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
@ -59,6 +62,7 @@ pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
#[allow(dead_code)]
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
@ -87,6 +91,9 @@ struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
Self {
@ -192,11 +199,22 @@ struct Instance {
}
impl Instance {
fn to_matrix(&self) -> cgmath::Matrix4<f32> {
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
}
}
}
#[derive(Copy, Clone)]
struct InstanceRaw {
#[allow(dead_code)]
model: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for InstanceRaw {}
unsafe impl bytemuck::Zeroable for InstanceRaw {}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
@ -210,6 +228,7 @@ struct State {
index_buffer: wgpu::Buffer,
num_indices: u32,
#[allow(dead_code)]
diffuse_texture: texture::Texture,
diffuse_bind_group: wgpu::BindGroup,
@ -222,50 +241,66 @@ struct State {
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
#[allow(dead_code)]
instance_buffer: wgpu::Buffer,
depth_texture: texture::Texture,
}
impl State {
fn new(window: &Window) -> Self {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&Default::default()).unwrap();
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY, // Vulkan + Metal + DX12 + Browser WebGPU
).await.unwrap();
let (device, mut queue) = adapter.request_device(&Default::default());
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
}).await;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let (diffuse_texture, cmds) = texture::Texture::from_bytes(&device, diffuse_bytes).unwrap();
let (diffuse_texture, cmds) = texture::Texture::from_bytes(&device, diffuse_bytes, "happy-tree.png").unwrap();
queue.submit(&[cmds]);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
},
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -280,6 +315,7 @@ impl State {
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
@ -296,9 +332,10 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let uniform_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[uniforms]),
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
@ -318,22 +355,23 @@ impl State {
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_matrix).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ)
.fill_from_slice(&instance_data);
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::STORAGE_READ,
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
@ -341,7 +379,8 @@ impl State {
readonly: true,
}
}
]
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -362,6 +401,7 @@ impl State {
}
}
],
label: Some("uniform_bind_group"),
});
let vs_src = include_str!("shader.vert");
@ -373,7 +413,7 @@ impl State {
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let depth_texture = texture::Texture::create_depth_texture(&device, &sc_desc);
let depth_texture = texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
@ -414,21 +454,25 @@ impl State {
stencil_read_mask: 0,
stencil_write_mask: 0,
}),
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(VERTICES),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(INDICES),
wgpu::BufferUsage::INDEX,
);
let num_indices = INDICES.len() as u32;
Self {
@ -456,13 +500,13 @@ impl State {
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
async fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
@ -471,28 +515,30 @@ impl State {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
async fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("update encoder"),
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
async fn render(&mut self) {
let frame = self.swap_chain.get_next_texture()
.expect("Timeout getting texture");
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("Render Encoder"),
});
{
@ -525,8 +571,8 @@ impl State {
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_index_buffer(&self.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
}
@ -542,16 +588,17 @@ fn main() {
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
use futures::executor::block_on;
// Since main can't be async, we're going to need to block
let mut state = block_on(State::new(&window));
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
@ -564,26 +611,29 @@ fn main() {
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
block_on(state.resize(*physical_size));
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
// new_inner_size is &mut so w have to dereference it twice
block_on(state.resize(**new_inner_size));
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
}
Event::RedrawRequested(_) => {
block_on(state.update());
block_on(state.render());
}
Event::MainEventsCleared => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
});
}

@ -10,11 +10,23 @@ pub struct Texture {
impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> Self {
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, label: &str) -> Self {
let size = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let desc = wgpu::TextureDescriptor {
label: Some(label),
size,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
..sc_desc.to_texture_desc()
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::COPY_SRC,
};
let texture = device.create_texture(&desc);
@ -24,22 +36,22 @@ impl Texture {
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
compare: wgpu::CompareFunction::LessEqual,
});
Self { texture, view, sampler }
}
pub fn from_bytes(device: &wgpu::Device, bytes: &[u8]) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
pub fn from_bytes(device: &wgpu::Device, bytes: &[u8], label: &str) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, &img)
Self::from_image(device, &img, Some(label))
}
pub fn from_image(device: &wgpu::Device, img: &image::DynamicImage) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
pub fn from_image(device: &wgpu::Device, img: &image::DynamicImage, label: Option<&str>) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let rgba = img.as_rgba8().unwrap();
let dimensions = img.dimensions();
@ -49,6 +61,7 @@ impl Texture {
depth: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
array_layer_count: 1,
mip_level_count: 1,
@ -58,18 +71,21 @@ impl Texture {
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let buffer = device
.create_buffer_mapped(rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&rgba);
let buffer = device.create_buffer_with_data(
&rgba,
wgpu::BufferUsage::COPY_SRC,
);
let mut encoder = device.create_command_encoder(&Default::default());
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("texture_buffer_copy_encoder"),
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
bytes_per_row: 4 * dimensions.0,
rows_per_image: dimensions.1,
},
wgpu::TextureCopyView {
texture: &texture,
@ -92,7 +108,7 @@ impl Texture {
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
compare: wgpu::CompareFunction::Always,
});
Ok((Self { texture, view, sampler }, cmd_buffer))

@ -16,7 +16,7 @@ A simple way of doing this is to sort all the objects by their distance to the c
If want to do this properly we need to have pixel level precision. That's where a *depth buffer* comes in.
## The depth of pixels
## A pixels depth
A depth buffer is a black and white texture that stores the z-coordinate of rendered pixels. Wgpu can use this when drawing new pixels to determine whether to replace the data or keep it. This technique is called depth testing. This will fix our draw order problem without needing us to sort our objects!
@ -26,11 +26,23 @@ Let's make a function to create the depth texture in `texture.rs`.
impl Texture {
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; // 1.
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> Self {
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, label: &str) -> Self {
let size = wgpu::Extent3d { // 2.
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let desc = wgpu::TextureDescriptor {
format: DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT, // 2.
..sc_desc.to_texture_desc() // 3.
label: Some(label),
size,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT // 3.
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::COPY_SRC,
};
let texture = device.create_texture(&desc);
@ -44,7 +56,7 @@ impl Texture {
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
compare: wgpu::CompareFunction::LessEqual, // 5.
});
Self { texture, view, sampler }
@ -53,14 +65,15 @@ impl Texture {
```
1. We need the DEPTH_FORMAT for when we create the depth stage of the `render_pipeline` and creating the depth texture itself.
2. Since we are rendering to this texture, we need to add the `OUTPUT_ATTACHMENT` flag to it.
3. Our depth texture needs to be the same size as our screen if we want things to render correctly. We can use our `sc_desc` to make sure that our depth texture is the same size as our swap chain images.
2. Our depth texture needs to be the same size as our screen if we want things to render correctly. We can use our `sc_desc` to make sure that our depth texture is the same size as our swap chain images.
3. Since we are rendering to this texture, we need to add the `OUTPUT_ATTACHMENT` flag to it.
4. We technically don't *need* a sampler for a depth texture, but our `Texture` struct requires it, and we need one if we ever want to render it.
5. If we do decide to render our depth texture, we need to use `CompareFunction::LessEqual`. This is due to how the `samplerShadow` and `sampler2DShadow()` interacts with the `texture()` function in GLSL.
We create our `depth_texture` in `State::new()`.
```rust
let depth_texture = create_depth_texture(&device, &sc_desc);
let depth_texture = create_depth_texture(&device, &sc_desc, "depth_texture");
```
We need to modify our `render_pipeline` to allow depth testing.
@ -81,20 +94,22 @@ let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescrip
});
```
1. The compare function tells us when to discard a new pixel. Using `LESS` means pixels will be drawn front to back. Here are all the values you can use.
1. The `depth_compare` function tells us when to discard a new pixel. Using `LESS` means pixels will be drawn front to back. Here are all the values you can use.
```rust
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum CompareFunction {
Never = 0,
Less = 1,
Equal = 2,
LessEqual = 3,
Greater = 4,
NotEqual = 5,
GreaterEqual = 6,
Always = 7,
Undefined = 0,
Never = 1,
Less = 2,
Equal = 3,
LessEqual = 4,
Greater = 5,
NotEqual = 6,
GreaterEqual = 7,
Always = 8,
}
```
@ -115,7 +130,7 @@ We need to remember to change the `resize()` method to create a new `depth_textu
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
// ...
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
// ...
}

Loading…
Cancel
Save