finished lighting rewrite

pull/32/head
Ben Hansen 4 years ago
parent f66dfa61c0
commit 3b22ab9da9

@ -0,0 +1,8 @@
#version 450
layout(location=0) in vec3 v_color;
layout(location=0) out vec4 f_color;
void main() {
f_color = vec4(v_color, 1.0);
}

@ -0,0 +1,27 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=0) out vec4 v_color;
layout(set=0, binding=0)
uniform Uniforms {
vec3 u_view_position; // unused
mat4 u_view_proj;
};
layout(set=1, binding=0)
uniform Light {
vec3 u_position;
vec3 u_color;
};
// Let's keep our light smaller than our other objects
float scale = 0.25;
void main() {
vec3 v_position = a_position * scale + u_position;
gl_Position = u_view_proj * vec4(v_position, 1);
v_color = vec4(u_color, 0);
}

@ -9,7 +9,7 @@ mod texture;
mod model;
use model::{DrawModel, Vertex};
use model::{DrawModel, DrawLight, Vertex};
#[cfg_attr(rustfmt, rustfmt_skip)]
@ -43,17 +43,24 @@ impl Camera {
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_position: cgmath::Vector4<f32>,
view_proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view_position: Zero::zero(),
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
// We don't specifically need homogeneous coordinates since we're just using
// a vec3 in the shader. We're using Point3 for the camera.eye, and this is
// the easiest way to convert to Vector4. We're using Vector4 because of
// the uniforms 16 byte spacing requirement
self.view_position = camera.eye.to_homogeneous();
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
@ -156,11 +163,13 @@ impl Instance {
}
}
#[repr(C)]
#[derive(Copy, Clone)]
#[derive(Debug, Copy, Clone)]
struct Light {
direction: cgmath::Vector3<f32>,
position: cgmath::Vector3<f32>,
// Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here
_padding: u32,
color: cgmath::Vector3<f32>,
}
struct State {
@ -190,6 +199,8 @@ struct State {
light: Light,
light_buffer: wgpu::Buffer,
light_bind_group: wgpu::BindGroup,
light_render_pipeline: wgpu::RenderPipeline,
}
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
@ -202,6 +213,66 @@ fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescript
device.create_texture(&desc)
}
fn create_render_pipeline(
device: &wgpu::Device,
layout: &wgpu::PipelineLayout,
color_format: wgpu::TextureFormat,
depth_format: Option<wgpu::TextureFormat>,
vertex_descs: &[wgpu::VertexBufferDescriptor],
vs_src: &str,
fs_src: &str,
) -> wgpu::RenderPipeline {
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: color_format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: depth_format.map(|format| {
wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
}
}),
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: vertex_descs,
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
})
}
impl State {
fn new(window: &Window) -> Self {
@ -285,18 +356,11 @@ impl State {
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ)
.fill_from_slice(&instance_data);
let light = Light {
direction: (-1.0, 0.4, -0.9).into(),
};
let light_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM)
.fill_from_slice(&[light]);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
@ -308,14 +372,7 @@ impl State {
dynamic: false,
readonly: true,
}
},
wgpu::BindGroupLayoutBinding {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
}
]
});
@ -335,80 +392,98 @@ impl State {
buffer: &instance_buffer,
range: 0..instance_buffer_size as wgpu::BufferAddress,
}
},
}
],
});
let (obj_model, cmds) = model::Model::load(&device, &texture_bind_group_layout, "code/beginner/tutorial9-models/src/res/cube.obj").unwrap();
queue.submit(&cmds);
let light = Light {
position: (2.0, 2.0, 2.0).into(),
_padding: 0,
color: (1.0, 1.0, 1.0).into(),
};
let light_buffer = device
// We'll want to update our lights position, so we use COPY_DST
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[light]);
let light_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false
},
}
],
});
let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &light_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 2,
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &light_buffer,
range: 0..std::mem::size_of_val(&light) as wgpu::BufferAddress,
}
},
}
],
});
let (obj_model, cmds) = model::Model::load(&device, &texture_bind_group_layout, "code/intermediate/tutorial10-lighting/src/res/cube.obj").unwrap();
queue.submit(&cmds);
let vs_src = include_str!("shader.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let depth_texture = create_depth_texture(&device, &sc_desc);
let depth_texture_view = depth_texture.create_default_view();
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
}),
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: &[
model::ModelVertex::desc(),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
&light_bind_group_layout,
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let render_pipeline = {
let vs_src = include_str!("shader.vert");
let fs_src = include_str!("shader.frag");
create_render_pipeline(
&device,
&render_pipeline_layout,
sc_desc.format,
Some(DEPTH_FORMAT),
&[model::ModelVertex::desc()],
vs_src,
fs_src
)
};
let light_render_pipeline = {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[
&uniform_bind_group_layout,
&light_bind_group_layout,
]
});
let vs_src = include_str!("light.vert");
let fs_src = include_str!("light.frag");
create_render_pipeline(
&device,
&layout,
sc_desc.format,
Some(DEPTH_FORMAT),
&[model::ModelVertex::desc()],
vs_src,
fs_src,
)
};
Self {
surface,
device,
@ -429,6 +504,8 @@ impl State {
depth_texture_view,
light,
light_buffer,
light_bind_group,
light_render_pipeline,
}
}
@ -463,6 +540,15 @@ impl State {
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
// Update the light
let old_position = self.light.position;
self.light.position = cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) * old_position;
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.light]);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.light_buffer, 0, std::mem::size_of::<Light>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
@ -500,8 +586,11 @@ impl State {
}),
});
render_pass.set_pipeline(&self.light_render_pipeline);
render_pass.draw_light_model(&self.obj_model, &self.uniform_bind_group, &self.light_bind_group);
render_pass.set_pipeline(&self.render_pipeline);
render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.uniform_bind_group);
render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.uniform_bind_group, &self.light_bind_group);
}
self.queue.submit(&[
@ -521,7 +610,6 @@ fn main() {
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => { window.request_redraw(); }
Event::WindowEvent {
ref event,
window_id,
@ -537,7 +625,9 @@ fn main() {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
} => {
*control_flow = ControlFlow::Exit;
}
_ => {}
}
}
@ -547,10 +637,10 @@ fn main() {
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
}
_ => {}
_ => {},
}
}
Event::RedrawRequested(_) => {
Event::MainEventsCleared => {
state.update();
state.render();
}

@ -143,34 +143,67 @@ impl Model {
}
pub trait DrawModel {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup);
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup);
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup);
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup);
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
}
impl<'a> DrawModel for wgpu::RenderPass<'a> {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup) {
self.draw_mesh_instanced(mesh, material, 0..1, uniforms);
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.draw_mesh_instanced(mesh, material, 0..1, uniforms, light);
}
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.set_vertex_buffers(0, &[(&mesh.vertex_buffer, 0)]);
self.set_index_buffer(&mesh.index_buffer, 0);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, &uniforms, &[]);
self.set_bind_group(2, &light, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, uniforms);
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, uniforms, light);
}
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), uniforms);
self.draw_mesh_instanced(mesh, material, instances.clone(), uniforms, light);
}
}
}
pub trait DrawLight {
fn draw_light_mesh(&mut self, mesh: &Mesh, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_light_mesh_instanced(&mut self, mesh: &Mesh, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_light_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_light_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
}
impl<'a> DrawLight for wgpu::RenderPass<'a> {
fn draw_light_mesh(&mut self, mesh: &Mesh, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.draw_light_mesh_instanced(mesh, 0..1, uniforms, light);
}
fn draw_light_mesh_instanced(&mut self, mesh: &Mesh, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.set_vertex_buffers(0, &[(&mesh.vertex_buffer, 0)]);
self.set_index_buffer(&mesh.index_buffer, 0);
self.set_bind_group(0, uniforms, &[]);
self.set_bind_group(1, light, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_light_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.draw_light_model_instanced(model, 0..1, uniforms, light);
}
fn draw_light_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
for mesh in &model.meshes {
self.draw_light_mesh_instanced(mesh, instances.clone(), uniforms, light);
}
}
}

@ -5,7 +5,7 @@ layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal;
layout(location=1) out vec3 v_brightness;
layout(set=1, binding=0)
uniform Uniforms {

@ -5,108 +5,12 @@ use winit::{
};
use cgmath::prelude::*;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
normal: [f32; 3],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float3,
}
]
}
}
}
use std::f32::consts::PI;
fn make_sphere(radius: f32, latitudes: u16, longitudes: u16) -> (Vec<Vertex>, Vec<u16>) {
assert!(latitudes > 0 && longitudes > 0);
let mut vertices = Vec::new();
let mut indices = Vec::new();
let M = latitudes as f32;
let N = longitudes as f32;
for lat in 0..latitudes {
for lon in 0..longitudes {
let m = lat as f32;
let n = lon as f32;
let normal = cgmath::Vector3 {
x: (PI * m / M).sin() * (2.0 * PI * n / N).sin(),
y: (PI * m / M).sin() * (2.0 * PI * n / N).cos(),
z: (PI * m / M).cos(),
};
let position = normal * radius;
let tex_coords = cgmath::Vector2 {
x: (2.0 - normal.x) * 0.5,
y: (2.0 - normal.z) * 0.5,
};
vertices.push(Vertex {
position: position.into(),
tex_coords: tex_coords.into(),
normal: normal.into(),
});
}
}
for lat in 0..latitudes {
for lon in 0..longitudes {
indices.push(lat * longitudes + lon + 1);
indices.push((lat + 1) * longitudes + lon);
indices.push(lat * longitudes + lon);
indices.push((lat + 1) * longitudes + lon + 1);
indices.push((lat + 1) * longitudes + lon);
indices.push(lat * longitudes + lon + 1);
}
}
(vertices, indices)
}
mod texture;
mod model;
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], normal: [0.0, 0.0, -1.0]}, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], normal: [0.0, 0.0, -1.0]}, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], normal: [0.0, 0.0, -1.0]}, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], normal: [0.0, 0.0, -1.0]}, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], normal: [0.0, 0.0, -1.0]}, // E
];
use model::{DrawModel, Vertex};
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -116,11 +20,7 @@ pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
0.0, 0.0, 0.5, 1.0,
);
// const NUM_INSTANCES_PER_ROW: u32 = 1;
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
struct Camera {
eye: cgmath::Point3<f32>,
@ -133,35 +33,28 @@ struct Camera {
}
impl Camera {
fn build_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj;
}
fn build_view_matrix(&self) -> cgmath::Matrix4<f32> {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
return view;
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view: cgmath::Matrix4<f32>,
proj: cgmath::Matrix4<f32>,
view_proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view: cgmath::Matrix4::identity(),
proj: cgmath::Matrix4::identity(),
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.proj = OPENGL_TO_WGPU_MATRIX * camera.build_projection_matrix();
self.view = camera.build_view_matrix();
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
@ -279,14 +172,7 @@ struct State {
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
obj_model: model::Model,
camera: Camera,
camera_controller: CameraController,
@ -316,6 +202,7 @@ fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescript
device.create_texture(&desc)
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
@ -335,67 +222,6 @@ impl State {
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
let diffuse_rgba = diffuse_image.as_rgba8().unwrap();
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(diffuse_rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&diffuse_rgba);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
@ -414,20 +240,6 @@ impl State {
],
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
target: (0.0, 0.0, 0.0).into(),
@ -446,13 +258,16 @@ impl State {
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let position = cgmath::Vector3 { x, y: 0.0, z };
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
@ -531,6 +346,9 @@ impl State {
],
});
let (obj_model, cmds) = model::Model::load(&device, &texture_bind_group_layout, "code/intermediate/tutorial10-lighting/src/res/cube.obj").unwrap();
queue.submit(&cmds);
let vs_src = include_str!("shader.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
@ -582,29 +400,15 @@ impl State {
stencil_read_mask: 0,
stencil_write_mask: 0,
}),
index_format: wgpu::IndexFormat::Uint16,
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: &[
Vertex::desc(),
model::ModelVertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let (vertices, indices) = make_sphere(0.5, 10, 10);
let vertex_buffer = device
.create_buffer_mapped(vertices.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(&vertices);
// .create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
// .fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(indices.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(&indices);
// .create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
// .fill_from_slice(INDICES);
let num_indices = indices.len() as u32;
// let num_indices = INDICES.len() as u32;
Self {
surface,
device,
@ -612,13 +416,7 @@ impl State {
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
obj_model,
camera,
camera_controller,
uniform_buffer,
@ -703,11 +501,7 @@ impl State {
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.uniform_bind_group);
}
self.queue.submit(&[
@ -725,13 +519,13 @@ fn main() {
let mut state = State::new(&window);
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => { window.request_redraw(); }
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
@ -744,26 +538,23 @@ fn main() {
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
}
Event::MainEventsCleared => {
Event::RedrawRequested(_) => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
});
}

@ -0,0 +1,35 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=2) in vec3 v_position;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
};
const vec3 ambient_color = vec3(0.0, 0.0, 0.0);
const vec3 specular_color = vec3(1.0, 1.0, 1.0);
const float shininess = 32;
void main() {
vec4 diffuse_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float diffuse_term = max(dot(normalize(v_normal), normalize(u_light)), 0);
vec3 camera_dir = normalize(-v_position);
// This is an aproximation of the actual reflection vector, aka what
// angle you have to look at the object to be blinded by the light
vec3 half_direction = normalize(normalize(u_light) + camera_dir);
float specular_term = pow(max(dot(normalize(v_normal), half_direction), 0.0), shininess);
f_color = vec4(ambient_color, 1.0) + vec4(specular_term * specular_color, 1.0) + diffuse_term * diffuse_color;
}

@ -0,0 +1,34 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal;
layout(location=2) out vec3 v_position;
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
void main() {
v_tex_coords = a_tex_coords;
mat4 model = s_models[gl_InstanceIndex];
// Rotate the normals with respect to the model, ignoring scaling
mat3 normal_matrix = mat3(transpose(inverse(mat3(model))));
v_normal = normal_matrix * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
// Get the position relative to the view for the lighting calc
v_position = gl_Position.xyz / gl_Position.w;
}

@ -0,0 +1,47 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=2) in vec3 v_position;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=0)
uniform Uniforms {
vec3 u_view_position;
mat4 u_view_proj; // unused
};
layout(set = 2, binding = 0) uniform Light {
vec3 light_position;
vec3 light_color;
};
void main() {
vec4 object_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
// We don't need (or want) much ambient light, so 0.1 is fine
float ambient_strength = 0.1;
vec3 ambient_color = light_color * ambient_strength;
vec3 normal = normalize(v_normal);
vec3 light_dir = normalize(light_position - v_position);
float diffuse_strength = max(dot(normal, light_dir), 0.0);
vec3 diffuse_color = light_color * diffuse_strength;
vec3 view_dir = normalize(u_view_position - v_position);
vec3 reflect_dir = reflect(-light_dir, normal);
float specular_strength = pow(max(dot(view_dir, reflect_dir), 0.0), 32);
vec3 specular_color = specular_strength * light_color;
vec3 result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
// Since lights don't typically (afaik) cast transparency, so we use
// the alpha here at the end.
f_color = vec4(result, object_color.a);
}

@ -9,27 +9,39 @@ layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
layout(set=1, binding=0)
uniform Uniforms {
vec3 u_view_position;
mat4 u_view_proj; // unused
};
const vec3 ambient_color = vec3(0.0, 0.0, 0.0);
const vec3 specular_color = vec3(1.0, 1.0, 1.0);
const float shininess = 32;
layout(set = 2, binding = 0) uniform Light {
vec3 light_position;
vec3 light_color;
};
void main() {
vec4 diffuse_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float diffuse_term = max(dot(normalize(v_normal), normalize(u_light)), 0);
vec4 object_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
vec3 camera_dir = normalize(-v_position);
// We don't need (or want) much ambient light, so 0.1 is fine
float ambient_strength = 0.1;
vec3 ambient_color = light_color * ambient_strength;
// This is an aproximation of the actual reflection vector, aka what
// angle you have to look at the object to be blinded by the light
vec3 half_direction = normalize(normalize(u_light) + camera_dir);
float specular_term = pow(max(dot(normalize(v_normal), half_direction), 0.0), shininess);
f_color = vec4(ambient_color, 1.0) + vec4(specular_term * specular_color, 1.0) + diffuse_term * diffuse_color;
vec3 normal = normalize(v_normal);
vec3 light_dir = normalize(light_position - v_position);
float diffuse_strength = max(dot(normal, light_dir), 0.0);
vec3 diffuse_color = light_color * diffuse_strength;
vec3 view_dir = normalize(u_view_position - v_position);
vec3 half_dir = normalize(view_dir + light_dir);
float specular_strength = pow(max(dot(normal, half_dir), 0.0), 32);
vec3 specular_color = specular_strength * light_color;
vec3 result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
// Since lights don't typically (afaik) cast transparency, so we use
// the alpha here at the end.
f_color = vec4(result, object_color.a);
}

@ -10,6 +10,7 @@ layout(location=2) out vec3 v_position;
layout(set=1, binding=0)
uniform Uniforms {
vec3 u_view_position; // unused
mat4 u_view_proj;
};
@ -21,14 +22,12 @@ buffer Instances {
void main() {
v_tex_coords = a_tex_coords;
mat4 model = s_models[gl_InstanceIndex];
// Rotate the normals with respect to the model, ignoring scaling
mat3 normal_matrix = mat3(transpose(inverse(mat3(model))));
mat4 model_matrix = s_models[gl_InstanceIndex];
mat3 normal_matrix = mat3(transpose(inverse(model_matrix)));
v_normal = normal_matrix * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
vec4 model_space = model_matrix * vec4(a_position, 1.0);
v_position = model_space.xyz;
// Get the position relative to the view for the lighting calc
v_position = gl_Position.xyz / gl_Position.w;
gl_Position = u_view_proj * model_space;
}

@ -1 +1 @@
Subproject commit 50afb738454fd9a4d69e42bea313f79386300908
Subproject commit 66be0839e394339ce3fe1cb645f7986846d476e6

@ -0,0 +1,232 @@
# Working with Lights
While we can tell that our scene is 3d because of our camera, it still feels very flat. That's because our model stays the same color regardless of how it's oriented. If we want to change that we need to add lighting to our scene.
In the real world, a light source emits photons which bounce around until they enter into our eyes. The color we see is the light's original color minus whatever energy it lost while it was bouncing around.
In the computer graphics world, modeling individual photons would be hilariously computationally expensive. A single 100 Watt light bulb emits about 3.27 x 10^20 photons *per second*. Just imagine that for the sun! To get around this, we're gonna use math to cheat.
Let's discuss a few options.
## Ray/Path Tracing
This is an *advanced* topic, and we won't be covering it in depth here. It's the closest model to the way light really works so I felt I had to mention it. Check out the [ray tracing tutorial](../../todo/) if you want to learn more.
## Gouraud Shading
### Testing sub-sub-headers
Named after [Henri Gourad](https://en.wikipedia.org/wiki/Gouraud_shading), Gourad shading uses a surface normal vector per vertex to determine what direction the surface is facing and then compares that normal to the light's direction to calculate how bright the surface should be. Normals indicate what direction a surface is facing. We compare the normal to light vector to calculate how bright a given part of the model should be.
![normals.png](./normals.png)
Fortunately for use our cube already has normals that we can use. We can get straight to changing our vertex shader to use our normals.
```glsl
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal; // NEW
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal; // NEW
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
void main() {
v_tex_coords = a_tex_coords;
// UPDATED
mat4 model = s_models[gl_InstanceIndex];
v_normal = transpose(inverse(mat3(model))) * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
}
```
We pull out the model-view-projection matrix that we use to transform our model, because we are going to need it transform our normals. Because a normal is just a direction, not a position, we need to pull out the rotational part of the `model` matrix. That's why we convert it to `mat3`. I'm not sure why the `transpose` and `invert` bit are needed, but they are.
The fragment shader will take that normal, and a new `u_light` uniform, and perform the calculation.
```glsl
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
};
void main() {
vec4 diffuse = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float brightness = dot(normalize(v_normal), normalize(u_light)); // 1.
vec4 ambient = vec4(0.0, 0.0, 0.0, 1.0); // 2.
f_color = mix(ambient, diffuse, brightness); // 3.
}
```
1. The dot product gives us the cosine of the angle between the two vectors multiplied by the magnitude of each vector. Normalizing the vectors gives them a magnitude of one, so we get just the cosine of the angle between the two. We can use this value to determine how "similar" they are. A value of 1.0 means that the vectors are the same. A value of -1.0 means that they point in opposite directions.
2. The ambient value is the color the object would be in the dark.
3. We get the final color by mixing the ambient and diffuse colors using our brightness value.
Before we can see the results, we need to create the uniform buffer to hold the light data. We're going to create a new buffer to make it easier to store multiple lights.
```rust
#[repr(C)]
#[derive(Copy, Clone)]
struct Light {
direction: cgmath::Vector3<f32>,
}
let light = Light {
direction: (-1.0, 0.4, -0.9).into(),
};
let light_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM)
.fill_from_slice(&[light]);
```
We need to update the uniform bind group as well.
```rust
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
// ...
wgpu::BindGroupLayoutBinding {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
]
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
// ...
wgpu::Binding {
binding: 2,
resource: wgpu::BindingResource::Buffer {
buffer: &light_buffer,
range: 0..std::mem::size_of_val(&light) as wgpu::BufferAddress,
}
},
],
});
```
With all that you should get something that looks like this.
![gouraud.png](./gouraud.png)
You can see they cubes now have a light side and a dark side.
## Blinn-Phong Shading
Gouraud shading works, but it's not super accurate. It's missing specular reflection.
Specular reflection is the light that's reflected of surface without getting scattered as the diffuse reflection. It's the bright spots you see on s shiny surface such as an apple.
Fortunately we only have to change the shader code to get this new effect.
```glsl
// shader.vert
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal;
layout(location=2) out vec3 v_position;
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
void main() {
v_tex_coords = a_tex_coords;
mat4 model = s_models[gl_InstanceIndex];
// Rotate the normals with respect to the model, ignoring scaling
mat3 normal_matrix = mat3(transpose(inverse(mat3(model))));
v_normal = normal_matrix * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
// Get the position relative to the view for the lighting calc
v_position = gl_Position.xyz / gl_Position.w;
}
```
```glsl
// shader.frag
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=2) in vec3 v_position;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
};
const vec3 ambient_color = vec3(0.0, 0.0, 0.0);
const vec3 specular_color = vec3(1.0, 1.0, 1.0);
const float shininess = 32;
void main() {
vec4 diffuse_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float diffuse_term = max(dot(normalize(v_normal), normalize(u_light)), 0);
vec3 camera_dir = normalize(-v_position);
// This is an aproximation of the actual reflection vector, aka what
// angle you have to look at the object to be blinded by the light
vec3 half_direction = normalize(normalize(u_light) + camera_dir);
float specular_term = pow(max(dot(normalize(v_normal), half_direction), 0.0), shininess);
f_color = vec4(ambient_color, 1.0) + vec4(specular_term * specular_color, 1.0) + diffuse_term * diffuse_color;
}
```
With that we should get something like this.
![./blinn-phong.png](./blinn-phong.png)
This is a bit bright for a brick texture though. You can modify the `shininess` value if you want to reduce the brightness. I'm going to leave it as is though. The lighting calculations will change as we get into [Normal Mapping](../tutorial11-normals).
<AutoGithubLink/>

@ -1,4 +1,6 @@
# Working with Lights
# Working with Lights **UPDATED!**
* The old lighting tutorial was not well made (and plain wrong in places), so I've redone it!
While we can tell that our scene is 3d because of our camera, it still feels very flat. That's because our model stays the same color regardless of how it's oriented. If we want to change that we need to add lighting to our scene.
@ -12,219 +14,584 @@ Let's discuss a few options.
This is an *advanced* topic, and we won't be covering it in depth here. It's the closest model to the way light really works so I felt I had to mention it. Check out the [ray tracing tutorial](../../todo/) if you want to learn more.
## Gouraud Shading
## The Blinn-Phong Model
Ray/path tracing is often too computationally expensive for most realtime applications (though that is starting to change), so a more efficient, if less accurate method based on the [Phong reflection model](https://en.wikipedia.org/wiki/Phong_shading) is often used. It splits up the lighting calculation into three (3) parts: ambient lighting, diffuse lighting, and specular lighting. We're going to be learning the [Blinn-Phong model](https://en.wikipedia.org/wiki/Blinn%E2%80%93Phong_reflection_model), which cheats a bit at the specular calculation to speed things up.
Before we can get into that though, we need to add a light to our scene.
```rust
// main.rs
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct Light {
position: cgmath::Vector3<f32>,
// Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here
_padding: u32,
color: cgmath::Vector3<f32>,
}
```
Our `Light` represents a colored point in space. We're just going to use pure white light, but it's good to allow different colors of light.
We're going to create another buffer to store our light in.
```rust
let light = Light {
position: (2.0, 2.0, 2.0).into(),
_padding: 0,
color: (1.0, 1.0, 1.0).into(),
};
let light_buffer = device
// We'll want to update our lights position, so we use COPY_DST
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[light]);
```
Don't forget to add the `light` and `light_buffer` to `State`. After that we need to create a bind group layout and bind group for our light.
```rust
let light_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false
},
}
],
});
let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &light_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &light_buffer,
range: 0..std::mem::size_of_val(&light) as wgpu::BufferAddress,
}
}
],
});
```
Add those to `State`, and also update the `render_pipeline_layout`.
```rust
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
&light_bind_group_layout,
],
});
```
Let's also update the lights position in the `update()` method, so we can see what our objects look like from different angles.
```rust
// Update the light
let old_position = self.light.position;
self.light.position = cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) * old_position;
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.light]);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.light_buffer, 0, std::mem::size_of::<Light>() as wgpu::BufferAddress);
```
This will have the light rotate around the origin one degree every frame.
## Seeing the light
For debugging purposes, it would be nice if we could see where the light is to make sure that the scene looks correct. We could adapt our existing render pipeline to draw the light, but it will likely get in the way. Instead we are going to extract our render pipeline creation code into a new function called `create_render_pipeline()`.
```rust
fn create_render_pipeline(
device: &wgpu::Device,
layout: &wgpu::PipelineLayout,
color_format: wgpu::TextureFormat,
depth_format: Option<wgpu::TextureFormat>,
vertex_descs: &[wgpu::VertexBufferDescriptor],
vs_src: &str,
fs_src: &str,
) -> wgpu::RenderPipeline {
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: color_format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: depth_format.map(|format| {
wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
}
}),
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: vertex_descs,
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
})
}
```
We also need to change `State::new()` to use this function.
```rust
let render_pipeline = {
let vs_src = include_str!("shader.vert");
let fs_src = include_str!("shader.frag");
create_render_pipeline(
&device,
&render_pipeline_layout,
sc_desc.format,
Some(DEPTH_FORMAT),
&[model::ModelVertex::desc()],
vs_src,
fs_src
)
};
```
Named after [Henri Gourad](https://en.wikipedia.org/wiki/Gouraud_shading), Gourad shading uses a surface normal vector per vertex to determine what direction the surface is facing and then compares that normal to the light's direction to calculate how bright the surface should be. Normals indicate what direction a surface is facing. We compare the normal to light vector to calculate how bright a given part of the model should be.
We're going to need to modify `model::DrawModel` to use our `light_bind_group`.
![normals.png](./normals.png)
```rust
pub trait DrawModel {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
Fortunately for use our cube already has normals that we can use. We can get straight to changing our vertex shader to use our normals.
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
}
impl<'a> DrawModel for wgpu::RenderPass<'a> {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.draw_mesh_instanced(mesh, material, 0..1, uniforms, light);
}
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.set_vertex_buffers(0, &[(&mesh.vertex_buffer, 0)]);
self.set_index_buffer(&mesh.index_buffer, 0);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, &uniforms, &[]);
self.set_bind_group(2, &light, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, uniforms, light);
}
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), uniforms, light);
}
}
}
```
With that done we can create another render pipeline for our light.
```rust
let light_render_pipeline = {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[
&uniform_bind_group_layout,
&light_bind_group_layout,
]
});
let vs_src = include_str!("light.vert");
let fs_src = include_str!("light.frag");
create_render_pipeline(
&device,
&layout,
sc_desc.format,
Some(DEPTH_FORMAT),
&[model::ModelVertex::desc()],
vs_src,
fs_src,
)
};
```
I chose to create a seperate layout for the `light_render_pipeline`, as it doesn't need all the resources that the regular `render_pipeline` needs (main just the textures).
With that in place we need to write the actual shaders.
```glsl
// light.vert
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal; // NEW
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal; // NEW
layout(location=0) out vec4 v_color;
layout(set=1, binding=0)
layout(set=0, binding=0)
uniform Uniforms {
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
layout(set=1, binding=0)
uniform Light {
vec3 u_position;
vec3 u_color;
};
// Let's keep our light smaller than our other objects
float scale = 0.25;
void main() {
v_tex_coords = a_tex_coords;
vec3 v_position = a_position * scale + u_position;
gl_Position = u_view_proj * vec4(v_position, 1);
// UPDATED
mat4 model = s_models[gl_InstanceIndex];
v_normal = transpose(inverse(mat3(model))) * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
v_color = u_color;
}
```
We pull out the model-view-projection matrix that we use to transform our model, because we are going to need it transform our normals. Because a normal is just a direction, not a position, we need to pull out the rotational part of the `model` matrix. That's why we convert it to `mat3`. I'm not sure why the `transpose` and `invert` bit are needed, but they are.
The fragment shader will take that normal, and a new `u_light` uniform, and perform the calculation.
```glsl
// light.frag
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=0) in vec3 v_color;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
void main() {
f_color = vec4(v_color, 1.0);
}
```
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
Now we could manually implement the draw code for the light in `render()`, but to keep with the pattern we developed, let's create a new trait called `DrawLight`.
```rust
pub trait DrawLight {
fn draw_light_mesh(&mut self, mesh: &Mesh, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_light_mesh_instanced(&mut self, mesh: &Mesh, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_light_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
fn draw_light_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup);
}
impl<'a> DrawLight for wgpu::RenderPass<'a> {
fn draw_light_mesh(&mut self, mesh: &Mesh, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.draw_light_mesh_instanced(mesh, 0..1, uniforms, light);
}
fn draw_light_mesh_instanced(&mut self, mesh: &Mesh, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.set_vertex_buffers(0, &[(&mesh.vertex_buffer, 0)]);
self.set_index_buffer(&mesh.index_buffer, 0);
self.set_bind_group(0, uniforms, &[]);
self.set_bind_group(1, light, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_light_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
self.draw_light_model_instanced(model, 0..1, uniforms, light);
}
fn draw_light_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup, light: &wgpu::BindGroup) {
for mesh in &model.meshes {
self.draw_light_mesh_instanced(mesh, instances.clone(), uniforms, light);
}
}
}
```
With all that we'll end up with something like this.
![./light-in-scene.png](./light-in-scene.png)
## Ambient Lighting
Light has a tendency to bounce around before entering our eyes. That's why you can see in areas that are in shadow. Actually modeling this interaction is computationally expensive, so we cheat. We define an ambient lighting value that stands in for the light bouncing of other parts of the scene to light our objects.
The ambient part is based on the light color as well as the object color. We've already added our `light_bind_group`, so we just need to use it in our shader. In `shader.frag`, add the following below the texture uniforms.
```glsl
layout(set = 2, binding = 0) uniform Light {
vec3 u_position;
vec3 u_color;
};
```
Then we need to update our main shader code to calculate and use the ambient color value.
```glsl
void main() {
vec4 diffuse = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float brightness = dot(normalize(v_normal), normalize(u_light)); // 1.
vec4 ambient = vec4(0.0, 0.0, 0.0, 1.0); // 2.
f_color = mix(ambient, diffuse, brightness); // 3.
vec4 object_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
// We don't need (or want) much ambient light, so 0.1 is fine
float ambient_strength = 0.1;
vec3 ambient_color = light_color * ambient_strength;
vec3 result = ambient_color * object_color.xyz;
// Since lights don't typically (afaik) cast transparency, so we use
// the alpha here at the end.
f_color = vec4(result, object_color.a);
}
```
1. The dot product gives us the cosine of the angle between the two vectors multiplied by the magnitude of each vector. Normalizing the vectors gives them a magnitude of one, so we get just the cosine of the angle between the two. We can use this value to determine how "similar" they are. A value of 1.0 means that the vectors are the same. A value of -1.0 means that they point in opposite directions.
2. The ambient value is the color the object would be in the dark.
3. We get the final color by mixing the ambient and diffuse colors using our brightness value.
With that we should get something like the this.
Before we can see the results, we need to create the uniform buffer to hold the light data. We're going to create a new buffer to make it easier to store multiple lights.
![./ambient_lighting.png](./ambient_lighting.png)
```rust
#[repr(C)]
#[derive(Copy, Clone)]
struct Light {
direction: cgmath::Vector3<f32>,
## Diffuse Lighting
Remember the normal vectors that were included with our model? We're finally going to use them. Normals represent the direction a surface is facing. By comparing the normal of a fragment with a vector pointing to a light source, we get a value of how light/dark that fragment should be. We compare the vector be using the dot product to get the cosine of the angle between them.
![./normal_diagram.png](./normal_diagram.png)
If the dot product of the normal and light vector is 1.0, that means that the current fragment is directly inline with the light source and will receive the lights full intensity. A value of 0.0 or lower means that the surface is perpendicular or facing away from the light, and therefore will be dark.
We're going to need to pull in the normal vector into our `shader.vert`.
```glsl
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal; // NEW!
```
We're also going to want to pass that value, as well as the vertex's position to the fragment shader.
```glsl
layout(location=1) out vec3 v_normal;
layout(location=2) out vec3 v_position;
```
For now let's just pass the normal directly as is. This is wrong, but we'll fix it later.
```glsl
void main() {
v_tex_coords = a_tex_coords;
v_normal = a_normal; // NEW!
v_position = s_models[gl_InstanceIndex] * vec4(a_position, 1.0); // NEW!
gl_Position = u_view_proj * v_position; // UPDATED!
}
```
let light = Light {
direction: (-1.0, 0.4, -0.9).into(),
};
let light_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM)
.fill_from_slice(&[light]);
Now in `shader.frag` we'll take in the vertex's normal and position.
```glsl
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal; // NEW!
layout(location=2) in vec3 v_position; // NEW!
```
We need to update the uniform bind group as well.
With that we can do the actual calculation. Below the `ambient_color` calculation, but above `result`, add the following.
```glsl
vec3 normal = normalize(v_normal);
vec3 light_dir = normalize(light_position - v_position);
float diffuse_strength = max(dot(normal, light_dir), 0.0);
vec3 diffuse_color = light_color * diffuse_strength;
```
Now we can include the `diffuse_color` in the `result`.
```glsl
vec3 result = (ambient_color + diffuse_color) * object_color.xyz;
```
With that we get something like this.
![./ambient_diffuse_wrong.png](./ambient_diffuse_wrong.png)
## The normal matrix
Remember when I said passing the vertex normal directly to the fragment shader was wrong? Let's explore that by removing all the cubes from the scene except one that will be rotated 180 degrees on the y-axis.
```rust
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
// ...
wgpu::BindGroupLayoutBinding {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
]
});
const NUM_INSTANCES_PER_ROW: u32 = 1;
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
// ...
wgpu::Binding {
binding: 2,
resource: wgpu::BindingResource::Buffer {
buffer: &light_buffer,
range: 0..std::mem::size_of_val(&light) as wgpu::BufferAddress,
}
},
],
});
// In the loop we create the instances in
let rotation = cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(180.0));
```
With all that you should get something that looks like this.
We'll also remove the `ambient_color` from our lighting `result`.
![gouraud.png](./gouraud.png)
```glsl
vec3 result = (diffuse_color) * object_color.xyz;
```
You can see they cubes now have a light side and a dark side.
That should give us something that looks like this.
## Blinn-Phong Shading
![./diffuse_wrong.png](./diffuse_wrong.png)
Gouraud shading works, but it's not super accurate. It's missing specular reflection.
This is clearly wrong as the light is illuminating the wrong side of the cube. This is because we aren't rotating our normals with our object, so no matter what direction the object faces, the normals will always face the same way.
Specular reflection is the light that's reflected of surface without getting scattered as the diffuse reflection. It's the bright spots you see on s shiny surface such as an apple.
![./normal_not_rotated.png](./normal_not_rotated.png)
Fortunately we only have to change the shader code to get this new effect.
We need to use the model matrix to transform the normals to be in the right direction. We only want the rotation data though. A normal represents a direction, and should be a unit vector throughout the calculation. We can get our normals into the right direction using what is called a normal matrix. We can calculate the normal matrix with the following.
```glsl
// shader.vert
#version 450
mat4 model_matrix = s_models[gl_InstanceIndex];
mat3 normal_matrix = mat3(transpose(inverse(model_matrix)));
v_normal = normal_matrix * a_normal;
```
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
This takes the `model_matrix` from our `instance_buffer`, inverts it, transposes it and then pulls out the top left 3x3 to just get the rotation data. This is all necessary because because normals are technically not vectors, there bivectors. The explanation is beyond me, but I do know that it means we have to treat them differently.
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal;
layout(location=2) out vec3 v_position;
* Note: I'm currently doing things in [world space](https://gamedev.stackexchange.com/questions/65783/what-are-world-space-and-eye-space-in-game-development). Doing things in view-space also known as eye-space, is more standard as objects can have lighting issues when they are further away from the origin. If we wanted to use view-space, we would use something along the lines of `mat3(transpose(inverse(view_matrix * model_matrix)))`. Currently we are combining the view matrix and projection matrix before we draw, so we'd have to pass those in separately. We'd also have to transform our light's position using something like `view_matrix * model_matrix * light_position` to keep the calculation from getting messed up when the camera moves.
* Another Note: I'm calculating the `normal_matrix` in the vertex shader currently. This is rather expensive, so it is often suggested that you compute the `normal_matrix` on the CPU and pass it in with the other uniforms.
With that change our lighting now looks correct.
![./diffuse_right.png](./diffuse_right.png)
Bringing back our other objects, and adding the ambient lighting gives us this.
![./ambient_diffuse_lighting.png](./ambient_diffuse_lighting.png);
## Specular Lighting
Specular lighting describes the highlights that appear on objects when viewed from certain angles. If you've ever looked at a car, it's the super bright parts. Basically, some of the light can reflect of the surface like a mirror. The location of the hightlight shifts depending on what angle you view it at.
![./specular_diagram.png](./specular_diagram.png)
Because this is relative to the view angle, we are going to need to pass in the camera's position into the fragment shader.
```glsl
// shader.frag
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
vec3 u_view_position;
mat4 u_view_proj; // unused
};
```
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
We're going to need to update the `Uniforms` struct as well.
void main() {
v_tex_coords = a_tex_coords;
```rust
// main.rs
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_position: cgmath::Vector4<f32>,
view_proj: cgmath::Matrix4<f32>,
}
mat4 model = s_models[gl_InstanceIndex];
impl Uniforms {
fn new() -> Self {
Self {
view_position: Zero::zero(),
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
// We don't specifically need homogeneous coordinates since we're just using
// a vec3 in the shader. We're using Point3 for the camera.eye, and this is
// the easiest way to convert to Vector4. We're using Vector4 because of
// the uniforms 16 byte spacing requirement
self.view_position = camera.eye.to_homogeneous();
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
```
// Rotate the normals with respect to the model, ignoring scaling
mat3 normal_matrix = mat3(transpose(inverse(mat3(model))));
v_normal = normal_matrix * a_normal;
Since we want to use our uniforms in the fragment shader now, we need to change it's visibility.
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
```rust
// main.rs
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT, // Updated!
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
// ...
]
});
```
// Get the position relative to the view for the lighting calc
v_position = gl_Position.xyz / gl_Position.w;
}
We're going to get the direction from the fragment's position to the camera, and use that with the normal to calculate the `reflect_dir`.
```glsl
vec3 view_dir = normalize(u_view_position - v_position);
vec3 reflect_dir = reflect(-light_dir, normal);
```
Then we use the dot product to calculate the `specular_strength` and use that to compute the `specular_color`.
```glsl
// shader.frag
#version 450
float specular_strength = pow(max(dot(view_dir, reflect_dir), 0.0), 32);
vec3 specular_color = specular_strength * light_color;
```
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=2) in vec3 v_position;
Finally we add that to the result.
layout(location=0) out vec4 f_color;
```glsl
vec3 result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
```
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
With that you should have something like this.
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
};
![./ambient_diffuse_specular_lighting.png](./ambient_diffuse_specular_lighting.png)
const vec3 ambient_color = vec3(0.0, 0.0, 0.0);
const vec3 specular_color = vec3(1.0, 1.0, 1.0);
If we just look at the `specular_color` on it's own we get this.
const float shininess = 32;
![./specular_lighting.png](./specular_lighting.png)
void main() {
vec4 diffuse_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float diffuse_term = max(dot(normalize(v_normal), normalize(u_light)), 0);
## The half direction
vec3 camera_dir = normalize(-v_position);
Up to this point we've actually only implemented the Phong part of Blinn-Phong. The Phong reflection model works well, but it can break down under [certain circumstances](https://learnopengl.com/Advanced-Lighting/Advanced-Lighting). The Blinn part of Blinn-Phong comes from the realization that if you add the `view_dir`, and `light_dir` together, normalize the result and use the dot product of that and the `normal`, you get roughly the same results without the issues that using `reflect_dir` had.
// This is an aproximation of the actual reflection vector, aka what
// angle you have to look at the object to be blinded by the light
vec3 half_direction = normalize(normalize(u_light) + camera_dir);
float specular_term = pow(max(dot(normalize(v_normal), half_direction), 0.0), shininess);
```glsl
vec3 view_dir = normalize(u_view_position - v_position);
vec3 half_dir = normalize(view_dir + light_dir);
f_color = vec4(ambient_color, 1.0) + vec4(specular_term * specular_color, 1.0) + diffuse_term * diffuse_color;
}
float specular_strength = pow(max(dot(normal, half_dir), 0.0), 32);
```
With that we should get something like this.
![./blinn-phong.png](./blinn-phong.png)
It's hard to tell the difference, but here's the results.
This is a bit bright for a brick texture though. You can modify the `shininess` value if you want to reduce the brightness. I'm going to leave it as is though. The lighting calculations will change as we get into [Normal Mapping](../tutorial11-normals).
![./half_dir.png](./half_dir.png)
<AutoGithubLink/>

Binary file not shown.

After

Width:  |  Height:  |  Size: 700 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 705 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 748 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 594 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 708 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 783 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

@ -1,4 +1,8 @@
# News
# News **UPDATED!**
## Reworked lighting tutorial
The [lighting tutorial](/intermediate/tutorial10-lighting/) was not up to par, so I redid it.
## Added GIF showcase

2455
package-lock.json generated

File diff suppressed because it is too large Load Diff

@ -8,19 +8,18 @@
},
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"docs:dev": "vuepress dev docs",
"docs:build": "vuepress build docs",
"dev": "vuepress dev docs",
"build": "vuepress build docs",
"deploy": "sh deploy.sh"
},
"author": "",
"license": "ISC",
"devDependencies": {
"@vuepress/plugin-back-to-top": "^1.2.0",
"vuepress": "^1.2.0",
"vuepress-plugin-seo": "^0.1.2"
},
"dependencies": {
"vuepress-plugin-code-copy": "^1.0.4-alpha",
"@vuepress/plugin-back-to-top": "^1.4.0",
"vuepress": "^1.4.0",
"vuepress-plugin-code-copy": "^1.0.6",
"vuepress-plugin-seo": "^0.1.2",
"vuepress-theme-thindark": "^1.0.1"
}
},
"dependencies": {}
}

Loading…
Cancel
Save