updated lighting tutorial to use model.rs

pull/15/head
Ben Hansen 4 years ago
parent 3d5740c6b1
commit a805bd8fa3

20
Cargo.lock generated

@ -1428,18 +1428,20 @@ dependencies = [
]
[[package]]
name = "tutorial2-swapchain"
name = "tutorial10-lighting"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tutorial3-pipeline"
name = "tutorial2-swapchain"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1450,7 +1452,7 @@ dependencies = [
]
[[package]]
name = "tutorial4-buffer"
name = "tutorial3-pipeline"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1461,11 +1463,10 @@ dependencies = [
]
[[package]]
name = "tutorial5-textures"
name = "tutorial4-buffer"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1473,7 +1474,7 @@ dependencies = [
]
[[package]]
name = "tutorial6-uniforms"
name = "tutorial5-textures"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1485,7 +1486,7 @@ dependencies = [
]
[[package]]
name = "tutorial7-instancing"
name = "tutorial6-uniforms"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1497,7 +1498,7 @@ dependencies = [
]
[[package]]
name = "tutorial8-depth"
name = "tutorial7-instancing"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1509,10 +1510,11 @@ dependencies = [
]
[[package]]
name = "tutorial9-lighting"
name = "tutorial8-depth"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",

@ -23,6 +23,6 @@ name = "tutorial9-models"
path = "src/main.rs"
[[bin]]
name = "tutorial9-challenge"
path = "src/challenge.rs"
# [[bin]]
# name = "tutorial9-challenge"
# path = "src/challenge.rs"

@ -1,14 +0,0 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_color;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_depth;
layout(set = 0, binding = 1) uniform samplerShadow s_depth;
void main() {
float depth = texture(sampler2DShadow(t_depth, s_depth), vec3(v_tex_coords, 1));
f_color = vec4(depth, 0, 0, 1);
}

@ -1,900 +0,0 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const DEPTH_VERTICES: &[Vertex] = &[
Vertex { position: [0.0, 0.0, 0.0], tex_coords: [1.0, 1.0]},
Vertex { position: [1.0, 0.0, 0.0], tex_coords: [0.0, 1.0]},
Vertex { position: [1.0, -1.0, 0.0], tex_coords: [0.0, 0.0]},
Vertex { position: [0.0, -1.0, 0.0], tex_coords: [1.0, 0.0]},
];
const DEPTH_INDICES: &[u16] = &[
0, 1, 2,
0, 2, 3,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
struct CameraController {
speed: f32,
is_up_pressed: bool,
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
VirtualKeyCode::LShift => {
self.is_down_pressed = is_pressed;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = (camera.target - camera.eye).normalize();
if self.is_forward_pressed {
camera.eye += forward * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward * self.speed;
}
let right = forward.cross(camera.up);
if self.is_right_pressed {
camera.eye += right * self.speed;
}
if self.is_left_pressed {
camera.eye -= right * self.speed;
}
}
}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
fn to_matrix(&self) -> cgmath::Matrix4<f32> {
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)
}
}
struct DepthPass {
texture: wgpu::Texture,
view: wgpu::TextureView,
sampler: wgpu::Sampler,
bind_group: wgpu::BindGroup,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_depth_indices: u32,
render_pipeline: wgpu::RenderPipeline,
has_saved_to_file: bool,
}
impl DepthPass {
fn new(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, texture_bind_group_layout: &wgpu::BindGroupLayout) -> Self {
let texture = create_depth_texture(device, sc_desc);
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::LessEqual,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
}
],
});
let vertex_buffer = device
.create_buffer_mapped(DEPTH_VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(DEPTH_VERTICES);
let index_buffer = device
.create_buffer_mapped(DEPTH_INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(DEPTH_INDICES);
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[texture_bind_group_layout],
});
let vs_src = include_str!("challenge.vert");
let fs_src = include_str!("challenge.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
Self {
texture, view, sampler, bind_group,
vertex_buffer, index_buffer,
num_depth_indices: DEPTH_INDICES.len() as u32,
render_pipeline,
has_saved_to_file: false,
}
}
fn resize(&mut self, device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, texture_bind_group_layout: &wgpu::BindGroupLayout) {
self.texture = create_depth_texture(device, sc_desc);
self.view = self.texture.create_default_view();
self.bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&self.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.sampler),
}
],
});
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
texture_bind_group_layout: wgpu::BindGroupLayout,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
instance_buffer: wgpu::Buffer,
depth_pass: DepthPass,
}
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> wgpu::Texture {
let desc = wgpu::TextureDescriptor {
format: DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::COPY_SRC,
..sc_desc.to_texture_desc()
};
device.create_texture(&desc)
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&Default::default()).unwrap();
let (device, mut queue) = adapter.request_device(&Default::default());
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
let diffuse_rgba = diffuse_image.as_rgba8().unwrap();
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(diffuse_rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&diffuse_rgba);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
},
],
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_matrix).collect::<Vec<_>>();
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ)
.fill_from_slice(&instance_data);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
}
}
]
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Buffer {
buffer: &instance_buffer,
range: 0..instance_buffer_size as wgpu::BufferAddress,
}
}
],
});
let vs_src = include_str!("shader.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
}),
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let num_indices = INDICES.len() as u32;
let depth_pass = DepthPass::new(&device, &sc_desc, &texture_bind_group_layout);
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
texture_bind_group_layout,
camera,
camera_controller,
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
instance_buffer,
depth_pass,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_pass.resize(&self.device, &self.sc_desc, &self.texture_bind_group_layout);
// self.depth_pass.texture = create_depth_texture(&self.device, &self.sc_desc);
// self.depth_pass.view = self.depth_pass.texture.create_default_view();
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_pass.view,
depth_load_op: wgpu::LoadOp::Clear,
depth_store_op: wgpu::StoreOp::Store,
clear_depth: 1.0,
stencil_load_op: wgpu::LoadOp::Clear,
stencil_store_op: wgpu::StoreOp::Store,
clear_stencil: 0,
}),
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
}
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Load,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color::BLACK,
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.depth_pass.render_pipeline);
render_pass.set_bind_group(0, &self.depth_pass.bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.depth_pass.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.depth_pass.index_buffer, 0);
render_pass.draw_indexed(0..self.depth_pass.num_depth_indices, 0, 0..1);
}
let buffer = if !self.depth_pass.has_saved_to_file {
const U32_SIZE: u32 = std::mem::size_of::<u32>() as u32;
let buffer_size = (U32_SIZE * self.sc_desc.width * self.sc_desc.height) as wgpu::BufferAddress;
let buffer_desc = wgpu::BufferDescriptor {
size: buffer_size,
usage: wgpu::BufferUsage::COPY_DST
| wgpu::BufferUsage::MAP_READ,
};
let buffer = self.device.create_buffer(&buffer_desc);
encoder.copy_texture_to_buffer(
wgpu::TextureCopyView {
texture: &self.depth_pass.texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
row_pitch: U32_SIZE * self.sc_desc.width,
image_height: self.sc_desc.height,
},
wgpu::Extent3d {
width: self.sc_desc.width,
height: self.sc_desc.height,
depth: 1,
},
);
self.depth_pass.has_saved_to_file = true;
Some((buffer, buffer_size))
} else {
None
};
self.queue.submit(&[
encoder.finish()
]);
if let Some((buffer, buffer_size)) = buffer {
let width = self.sc_desc.width;
let height = self.sc_desc.height;
let near = self.camera.znear;
let far = self.camera.zfar;
buffer.map_read_async(0, buffer_size, move |result: wgpu::BufferMapAsyncResult<&[f32]>| {
let mapping = result.unwrap();
let data = mapping.data;
use image::{ImageBuffer, Rgba, Pixel};
let mut buffer = ImageBuffer::<Rgba<u8>, _>::new(
width,
height,
);
let mut x = 0;
let mut y = 0;
for pixel in data {
let z = pixel * 2.0 - 1.0;
let r = (2.0 * near * far) / (far + near - z * (far - near));
let p = (r.floor() * 255.0 / far) as u8;
buffer.put_pixel(x, y, Pixel::from_channels(
p, p, p, 255,
));
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
buffer.save("image.png").unwrap();
});
}
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
}
Event::MainEventsCleared => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
});
}

@ -1,11 +0,0 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=0) out vec2 v_tex_coords;
void main() {
v_tex_coords = a_tex_coords;
gl_Position = vec4(a_position, 1.0);
}

@ -1,5 +1,5 @@
[package]
name = "tutorial9-lighting"
name = "tutorial10-lighting"
version = "0.1.0"
authors = ["Ben Hansen <bhbenjaminhansen@gmail.com>"]
edition = "2018"
@ -11,16 +11,18 @@ image = "0.22.4"
winit = "0.20.0"
glsl-to-spirv = "0.1.7"
cgmath = "0.17.0"
failure = "0.1"
tobj = "0.1"
wgpu = "0.4.0"
# wgpu = { git = "https://github.com/gfx-rs/wgpu-rs.git" }
# zerocopy = "0.2.8"
[[bin]]
name = "tutorial9-lighting"
name = "tutorial10-lighting"
path = "src/main.rs"
[[bin]]
name = "tutorial9-challenge"
path = "src/challenge.rs"
# [[bin]]
# name = "tutorial10-challenge"
# path = "src/challenge.rs"

@ -9,7 +9,8 @@ layout(location=1) out vec3 v_normal;
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
mat4 u_view;
mat4 u_proj;
};
layout(set=1, binding=1)
@ -22,5 +23,5 @@ void main() {
mat4 model = s_models[gl_InstanceIndex];
v_normal = transpose(inverse(mat3(model))) * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
gl_Position = u_proj * u_view * model * vec4(a_position, 1.0);
}

@ -5,60 +5,13 @@ use winit::{
};
use cgmath::prelude::*;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
mod texture;
mod model;
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const DEPTH_VERTICES: &[Vertex] = &[
Vertex { position: [0.0, 0.0, 0.0], tex_coords: [1.0, 1.0]},
Vertex { position: [1.0, 0.0, 0.0], tex_coords: [0.0, 1.0]},
Vertex { position: [1.0, -1.0, 0.0], tex_coords: [0.0, 0.0]},
Vertex { position: [0.0, -1.0, 0.0], tex_coords: [1.0, 0.0]},
];
const DEPTH_INDICES: &[u16] = &[
0, 1, 2,
0, 2, 3,
];
use model::{DrawModel, Vertex};
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -69,9 +22,6 @@ pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
struct Camera {
eye: cgmath::Point3<f32>,
@ -84,28 +34,35 @@ struct Camera {
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
fn build_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
return proj;
}
fn build_view_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
return view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
view: cgmath::Matrix4<f32>,
proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
view: cgmath::Matrix4::identity(),
proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
self.proj = OPENGL_TO_WGPU_MATRIX * camera.build_projection_matrix();
self.view = camera.build_view_matrix();
}
}
@ -207,131 +164,11 @@ impl Instance {
}
}
struct DepthPass {
texture: wgpu::Texture,
view: wgpu::TextureView,
sampler: wgpu::Sampler,
bind_group: wgpu::BindGroup,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_depth_indices: u32,
render_pipeline: wgpu::RenderPipeline,
has_saved_to_file: bool,
}
impl DepthPass {
fn new(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, texture_bind_group_layout: &wgpu::BindGroupLayout) -> Self {
let texture = create_depth_texture(device, sc_desc);
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::LessEqual,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
}
],
});
let vertex_buffer = device
.create_buffer_mapped(DEPTH_VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(DEPTH_VERTICES);
let index_buffer = device
.create_buffer_mapped(DEPTH_INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(DEPTH_INDICES);
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[texture_bind_group_layout],
});
let vs_src = include_str!("challenge.vert");
let fs_src = include_str!("challenge.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
Self {
texture, view, sampler, bind_group,
vertex_buffer, index_buffer,
num_depth_indices: DEPTH_INDICES.len() as u32,
render_pipeline,
has_saved_to_file: false,
}
}
fn resize(&mut self, device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, texture_bind_group_layout: &wgpu::BindGroupLayout) {
self.texture = create_depth_texture(device, sc_desc);
self.view = self.texture.create_default_view();
self.bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&self.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.sampler),
}
],
});
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Light {
direction: cgmath::Vector3<f32>,
}
struct State {
@ -343,15 +180,7 @@ struct State {
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
texture_bind_group_layout: wgpu::BindGroupLayout,
obj_model: model::Model,
camera: Camera,
camera_controller: CameraController,
@ -364,21 +193,24 @@ struct State {
instances: Vec<Instance>,
instance_buffer: wgpu::Buffer,
depth_pass: DepthPass,
depth_texture: wgpu::Texture,
depth_texture_view: wgpu::TextureView,
light: Light,
light_buffer: wgpu::Buffer,
}
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> wgpu::Texture {
let desc = wgpu::TextureDescriptor {
format: DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::COPY_SRC,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
..sc_desc.to_texture_desc()
};
device.create_texture(&desc)
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
@ -398,67 +230,6 @@ impl State {
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
let diffuse_rgba = diffuse_image.as_rgba8().unwrap();
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(diffuse_rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&diffuse_rgba);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
@ -477,20 +248,6 @@ impl State {
],
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
target: (0.0, 0.0, 0.0).into(),
@ -509,13 +266,16 @@ impl State {
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let position = cgmath::Vector3 { x, y: 0.0, z };
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
@ -533,6 +293,13 @@ impl State {
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ)
.fill_from_slice(&instance_data);
let light = Light {
direction: (-1.0, 0.4, -0.9).into(),
};
let light_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM)
.fill_from_slice(&[light]);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
@ -549,7 +316,14 @@ impl State {
dynamic: false,
readonly: true,
}
}
},
wgpu::BindGroupLayoutBinding {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
]
});
@ -569,12 +343,22 @@ impl State {
buffer: &instance_buffer,
range: 0..instance_buffer_size as wgpu::BufferAddress,
}
}
},
wgpu::Binding {
binding: 2,
resource: wgpu::BindingResource::Buffer {
buffer: &light_buffer,
range: 0..std::mem::size_of_val(&light) as wgpu::BufferAddress,
}
},
],
});
let vs_src = include_str!("shader.vert");
let fs_src = include_str!("shader.frag");
let (obj_model, cmds) = model::Model::load(&device, &texture_bind_group_layout, "code/beginner/tutorial9-models/src/res/cube.obj").unwrap();
queue.submit(&cmds);
let vs_src = include_str!("gouraud.vert");
let fs_src = include_str!("gouraud.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
@ -582,6 +366,9 @@ impl State {
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let depth_texture = create_depth_texture(&device, &sc_desc);
let depth_texture_view = depth_texture.create_default_view();
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
@ -621,25 +408,15 @@ impl State {
stencil_read_mask: 0,
stencil_write_mask: 0,
}),
index_format: wgpu::IndexFormat::Uint16,
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: &[
Vertex::desc(),
model::ModelVertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let num_indices = INDICES.len() as u32;
let depth_pass = DepthPass::new(&device, &sc_desc, &texture_bind_group_layout);
Self {
surface,
device,
@ -647,14 +424,7 @@ impl State {
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
texture_bind_group_layout,
obj_model,
camera,
camera_controller,
uniform_buffer,
@ -663,7 +433,10 @@ impl State {
size,
instances,
instance_buffer,
depth_pass,
depth_texture,
depth_texture_view,
light,
light_buffer,
}
}
@ -674,10 +447,8 @@ impl State {
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_pass.resize(&self.device, &self.sc_desc, &self.texture_bind_group_layout);
// self.depth_pass.texture = create_depth_texture(&self.device, &self.sc_desc);
// self.depth_pass.view = self.depth_pass.texture.create_default_view();
self.depth_texture = create_depth_texture(&self.device, &self.sc_desc);
self.depth_texture_view = self.depth_texture.create_default_view();
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
@ -727,7 +498,7 @@ impl State {
}
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_pass.view,
attachment: &self.depth_texture_view,
depth_load_op: wgpu::LoadOp::Clear,
depth_store_op: wgpu::StoreOp::Store,
clear_depth: 1.0,
@ -738,112 +509,12 @@ impl State {
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
}
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Load,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color::BLACK,
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.depth_pass.render_pipeline);
render_pass.set_bind_group(0, &self.depth_pass.bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.depth_pass.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.depth_pass.index_buffer, 0);
render_pass.draw_indexed(0..self.depth_pass.num_depth_indices, 0, 0..1);
render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.uniform_bind_group);
}
let buffer = if !self.depth_pass.has_saved_to_file {
const U32_SIZE: u32 = std::mem::size_of::<u32>() as u32;
let buffer_size = (U32_SIZE * self.sc_desc.width * self.sc_desc.height) as wgpu::BufferAddress;
let buffer_desc = wgpu::BufferDescriptor {
size: buffer_size,
usage: wgpu::BufferUsage::COPY_DST
| wgpu::BufferUsage::MAP_READ,
};
let buffer = self.device.create_buffer(&buffer_desc);
encoder.copy_texture_to_buffer(
wgpu::TextureCopyView {
texture: &self.depth_pass.texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
row_pitch: U32_SIZE * self.sc_desc.width,
image_height: self.sc_desc.height,
},
wgpu::Extent3d {
width: self.sc_desc.width,
height: self.sc_desc.height,
depth: 1,
},
);
self.depth_pass.has_saved_to_file = true;
Some((buffer, buffer_size))
} else {
None
};
self.queue.submit(&[
encoder.finish()
]);
if let Some((buffer, buffer_size)) = buffer {
let width = self.sc_desc.width;
let height = self.sc_desc.height;
let near = self.camera.znear;
let far = self.camera.zfar;
buffer.map_read_async(0, buffer_size, move |result: wgpu::BufferMapAsyncResult<&[f32]>| {
let mapping = result.unwrap();
let data = mapping.data;
use image::{ImageBuffer, Rgba, Pixel};
let mut buffer = ImageBuffer::<Rgba<u8>, _>::new(
width,
height,
);
let mut x = 0;
let mut y = 0;
for pixel in data {
let z = pixel * 2.0 - 1.0;
let r = (2.0 * near * far) / (far + near - z * (far - near));
let p = (r.floor() * 255.0 / far) as u8;
buffer.put_pixel(x, y, Pixel::from_channels(
p, p, p, 255,
));
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
buffer.save("image.png").unwrap();
});
}
}
}

@ -0,0 +1,176 @@
use std::path::Path;
use std::ops::Range;
use crate::texture;
pub trait Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a>;
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct ModelVertex {
position: [f32; 3],
tex_coords: [f32; 2],
normal: [f32; 3],
}
impl Vertex for ModelVertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float3,
},
]
}
}
}
pub struct Material {
pub name: String,
pub diffuse_texture: texture::Texture,
pub bind_group: wgpu::BindGroup,
}
pub struct Mesh {
pub name: String,
pub vertex_buffer: wgpu::Buffer,
pub index_buffer: wgpu::Buffer,
pub num_elements: u32,
pub material: usize,
}
pub struct Model {
pub meshes: Vec<Mesh>,
pub materials: Vec<Material>,
}
impl Model {
pub fn load<P: AsRef<Path>>(device: &wgpu::Device, layout: &wgpu::BindGroupLayout, path: P) -> Result<(Self, Vec<wgpu::CommandBuffer>), failure::Error> {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref())?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent().unwrap();
// Our `Texure` struct currently returns a `CommandBuffer` when it's created so we need to collect those and return them.
let mut command_buffers = Vec::new();
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let (diffuse_texture, cmds) = texture::Texture::load(&device, containing_folder.join(diffuse_path))?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
]
});
materials.push(Material {
name: mat.name,
diffuse_texture,
bind_group,
});
command_buffers.push(cmds);
}
let mut meshes = Vec::new();
for m in obj_models {
let mut vertices = Vec::new();
for i in 0..m.mesh.positions.len() / 3 {
vertices.push(ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [
m.mesh.texcoords[i * 2],
m.mesh.texcoords[i * 2 + 1],
],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
});
}
let vertex_buffer = device
.create_buffer_mapped(vertices.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(&vertices);
let index_buffer = device
.create_buffer_mapped(m.mesh.indices.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(&m.mesh.indices);
meshes.push(Mesh {
name: m.name,
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
});
}
Ok((Self { meshes, materials, }, command_buffers))
}
}
pub trait DrawModel {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup);
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup);
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup);
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup);
}
impl<'a> DrawModel for wgpu::RenderPass<'a> {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup) {
self.draw_mesh_instanced(mesh, material, 0..1, uniforms);
}
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
self.set_vertex_buffers(0, &[(&mesh.vertex_buffer, 0)]);
self.set_index_buffer(&mesh.index_buffer, 0);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, &uniforms, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, uniforms);
}
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), uniforms);
}
}
}

@ -0,0 +1,106 @@
use image::GenericImageView;
use std::path::Path;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler,
}
impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn load<P: AsRef<Path>>(device: &wgpu::Device, path: P) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::open(path)?;
Self::from_image(device, &img)
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> Self {
let desc = wgpu::TextureDescriptor {
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
..sc_desc.to_texture_desc()
};
let texture = device.create_texture(&desc);
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
Self { texture, view, sampler }
}
pub fn from_bytes(device: &wgpu::Device, bytes: &[u8]) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, &img)
}
pub fn from_image(device: &wgpu::Device, img: &image::DynamicImage) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let rgba = img.to_rgba();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
size,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let buffer = device
.create_buffer_mapped(rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&rgba);
let mut encoder = device.create_command_encoder(&Default::default());
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size,
);
let cmd_buffer = encoder.finish();
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
Ok((Self { texture, view, sampler }, cmd_buffer))
}
}

@ -1,14 +0,0 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_color;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_depth;
layout(set = 0, binding = 1) uniform samplerShadow s_depth;
void main() {
float depth = texture(sampler2DShadow(t_depth, s_depth), vec3(v_tex_coords, 1));
f_color = vec4(depth, 0, 0, 1);
}

@ -1,11 +0,0 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=0) out vec2 v_tex_coords;
void main() {
v_tex_coords = a_tex_coords;
gl_Position = vec4(a_position, 1.0);
}

@ -36,7 +36,7 @@ module.exports = {
title: 'Intermediate',
collapsable: false,
children: [
'/intermediate/tutorial9-lighting/',
'/intermediate/tutorial10-lighting/',
'/intermediate/windowless/',
],
},

Before

Width:  |  Height:  |  Size: 109 KiB

After

Width:  |  Height:  |  Size: 109 KiB

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

Loading…
Cancel
Save