finished normal mapping tutorial

pull/33/head
Ben Hansen 4 years ago
parent a118435ba7
commit 387e17ba5f

@ -7,8 +7,7 @@ layout(location=0) out vec4 v_color;
layout(set=0, binding=0)
uniform Uniforms {
vec3 u_view_position;
mat4 u_view;
mat4 u_proj;
mat4 u_view_proj;
};
layout(set=1, binding=0)
@ -22,7 +21,7 @@ float scale = 0.25;
void main() {
vec3 v_position = a_position * scale + u_position;
gl_Position = u_proj * u_view * vec4(v_position, 1);
gl_Position = u_view_proj * vec4(v_position, 1);
v_color = vec4(u_color, 0);
}

@ -18,7 +18,7 @@ pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
0.0, 0.0, 0.5, 1.0,
);
const NUM_INSTANCES_PER_ROW: u32 = 1;
const NUM_INSTANCES_PER_ROW: u32 = 10;
struct Camera {
eye: cgmath::Point3<f32>,
@ -31,10 +31,10 @@ struct Camera {
}
impl Camera {
fn build_matrices(&self) -> (cgmath::Matrix4<f32>, cgmath::Matrix4<f32>) {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return (view, proj);
return proj * view;
}
}
@ -43,26 +43,20 @@ impl Camera {
#[derive(Copy, Clone)]
struct Uniforms {
view_position: cgmath::Vector4<f32>,
view: cgmath::Matrix4<f32>,
proj: cgmath::Matrix4<f32>,
view_proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view_position: Zero::zero(),
view: cgmath::Matrix4::identity(),
proj: cgmath::Matrix4::identity(),
// view_3x3: cgmath::Matrix3::identity(),
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_position = camera.eye.to_homogeneous();
let matrices = camera.build_matrices();
self.view = matrices.0;
self.proj = OPENGL_TO_WGPU_MATRIX * matrices.1;
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
@ -397,7 +391,7 @@ impl State {
instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::STORAGE_READ,
wgpu::BufferUsage::STORAGE_READ | wgpu::BufferUsage::COPY_DST,
);
let uniform_bind_group_layout =
@ -525,8 +519,8 @@ impl State {
};
let debug_material = {
let diffuse_bytes = include_bytes!("res/alt-diffuse.png");
let normal_bytes = include_bytes!("res/alt-normal.jpg");
let diffuse_bytes = include_bytes!("res/cobble-diffuse.png");
let normal_bytes = include_bytes!("res/cobble-normal.png");
let mut command_buffers = vec![];
let (diffuse_texture, cmds) = texture::Texture::from_bytes(&device, diffuse_bytes, "res/alt-diffuse.png").unwrap();
@ -597,23 +591,44 @@ impl State {
std::mem::size_of::<Uniforms>() as wgpu::BufferAddress,
);
// Update the light
let old_position = self.light.position;
self.light.position =
cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0))
* old_position;
// for instance in &mut self.instances {
// instance.rotation = instance.rotation * cgmath::Quaternion::from_axis_angle(
// cgmath::Vector3::unit_y(),
// cgmath::Deg(2.0),
// )
// }
// let instance_data = self.instances.iter()
// .map(Instance::to_raw)
// .collect::<Vec<_>>();
// let staging_buffer = self.device.create_buffer_with_data(
// bytemuck::cast_slice(&instance_data),
// wgpu::BufferUsage::COPY_SRC,
// );
// encoder.copy_buffer_to_buffer(
// &staging_buffer,
// 0,
// &self.instance_buffer,
// 0,
// (std::mem::size_of::<InstanceRaw>() * instance_data.len()) as wgpu::BufferAddress,
// );
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.light]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(
&staging_buffer,
0,
&self.light_buffer,
0,
std::mem::size_of::<Light>() as wgpu::BufferAddress,
);
// Update the light
// let old_position = self.light.position;
// self.light.position =
// cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0))
// * old_position;
// let staging_buffer = self.device.create_buffer_with_data(
// bytemuck::cast_slice(&[self.light]),
// wgpu::BufferUsage::COPY_SRC,
// );
// encoder.copy_buffer_to_buffer(
// &staging_buffer,
// 0,
// &self.light_buffer,
// 0,
// std::mem::size_of::<Light>() as wgpu::BufferAddress,
// );
self.queue.submit(&[encoder.finish()]);
}
@ -664,13 +679,6 @@ impl State {
&self.uniform_bind_group,
&self.light_bind_group,
);
// render_pass.draw_model_instanced_with_material(
// &self.obj_model,
// &self.debug_material,
// 0..self.instances.len() as u32,
// &self.uniform_bind_group,
// &self.light_bind_group,
// );
}
self.queue.submit(&[encoder.finish()]);
}

@ -181,7 +181,9 @@ impl Model {
let indices = &m.mesh.indices;
// Calculate tangents and bitangets
// Calculate tangents and bitangets. We're going to
// use the triangles, so we need to loop through the
// indices in chunks of 3
for c in indices.chunks(3) {
let v0 = vertices[c[0] as usize];
let v1 = vertices[c[1] as usize];

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

@ -0,0 +1,41 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_model_position_tangent_space;
layout(location=2) in vec3 v_light_position_tangent_space;
layout(location=3) in vec3 v_view_position_tangent_space;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set = 0, binding = 2) uniform texture2D t_normal;
layout(set = 0, binding = 3) uniform sampler s_normal;
layout(set = 2, binding = 0) uniform Light {
vec3 light_position;
vec3 light_color;
};
void main() {
vec4 object_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
vec4 object_normal = texture(sampler2D(t_normal, s_normal), v_tex_coords);
float ambient_strength = 0.1;
vec3 ambient_color = light_color * ambient_strength;
vec3 normal = normalize(object_normal.rgb);
vec3 light_dir = normalize(v_light_position_tangent_space - v_model_position_tangent_space);
float diffuse_strength = max(dot(normal, light_dir), 0.0);
vec3 diffuse_color = light_color * diffuse_strength;
vec3 view_dir = normalize(v_view_position_tangent_space - v_model_position_tangent_space);
vec3 half_dir = normalize(view_dir + light_dir);
float specular_strength = pow(max(dot(normal, half_dir), 0.0), 32);
vec3 specular_color = specular_strength * light_color;
vec3 result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
f_color = vec4(result, object_color.a);
}

@ -0,0 +1,54 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
layout(location=3) in vec3 a_tangent;
layout(location=4) in vec3 a_bitangent;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_model_position_tangent_space;
layout(location=2) out vec3 v_light_position_tangent_space;
layout(location=3) out vec3 v_view_position_tangent_space;
layout(set=1, binding=0)
uniform Uniforms {
vec3 u_view_position;
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
layout(set = 2, binding = 0) uniform Light {
vec3 light_position;
vec3 light_color; // used in fragment shader
};
void main() {
v_tex_coords = a_tex_coords;
mat4 model_matrix = s_models[gl_InstanceIndex];
vec4 position_world_space = model_matrix * vec4(a_position, 1.0);
mat3 normal_matrix = transpose(inverse(mat3(model_matrix)));
vec3 normal = normalize(normal_matrix * a_normal);
vec3 tangent = normalize(normal_matrix * a_tangent);
vec3 bitangent = normalize(normal_matrix * a_bitangent);
mat3 TBN = transpose(mat3(
tangent,
bitangent,
normal
));
// Transform the lighting values
v_light_position_tangent_space = TBN * light_position;
v_model_position_tangent_space = TBN * position_world_space.xyz;
v_view_position_tangent_space = TBN * u_view_position;
gl_Position = u_view_proj * position_world_space;
}

@ -1,10 +1,9 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=2) in vec3 v_position;
layout(location=3) in vec3 v_light_position;
layout(location=4) in vec3 v_view_position;
layout(location=1) in vec3 v_position; // UPDATED!
layout(location=2) in vec3 v_light_position; // NEW!
layout(location=3) in vec3 v_view_position; // NEW!
layout(location=0) out vec4 f_color;
@ -22,25 +21,20 @@ void main() {
vec4 object_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
vec4 object_normal = texture(sampler2D(t_normal, s_normal), v_tex_coords);
float ambient_strength = 0.01;
float ambient_strength = 0.1;
vec3 ambient_color = light_color * ambient_strength;
// We can't use the normal data directly, and we have to
// transform it for the range [0, 1] to [-1, 1].
vec3 normal = normalize(object_normal.rgb * 2.0 - 1.0);
vec3 light_dir = normalize(v_light_position - v_position);
vec3 normal = normalize(object_normal.rgb); // UPDATED!
vec3 light_dir = normalize(v_light_position - v_position); // UPDATED!
float diffuse_strength = max(dot(normal, light_dir), 0.0);
vec3 diffuse_color = light_color * diffuse_strength;
vec3 view_dir = normalize(v_view_position - v_position);
vec3 view_dir = normalize(v_view_position - v_position); // UPDATED!
vec3 half_dir = normalize(view_dir + light_dir);
float specular_strength = pow(max(dot(normal, half_dir), 0.0), 32);
vec3 specular_color = specular_strength * light_color;
// vec3 result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
vec3 result = (ambient_color + diffuse_color) * object_color.xyz;
vec3 result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
f_color = vec4(result, object_color.a);
// f_color = vec4(normal, object_color.a);
}

@ -7,16 +7,14 @@ layout(location=3) in vec3 a_tangent;
layout(location=4) in vec3 a_bitangent;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal;
layout(location=2) out vec3 v_position_model_tangent_space;
layout(location=3) out vec3 v_light_position_tangent_space;
layout(location=4) out vec3 v_view_position_tangent_space;
layout(location=1) out vec3 v_position; // UPDATED!
layout(location=2) out vec3 v_light_position; // NEW!
layout(location=3) out vec3 v_view_position; // NEW!
layout(set=1, binding=0)
uniform Uniforms {
vec3 u_view_position;
mat4 u_view;
mat4 u_proj;
vec3 u_view_position;
mat4 u_view_proj;
};
layout(set=1, binding=1)
@ -24,37 +22,36 @@ buffer Instances {
mat4 s_models[];
};
layout(set = 2, binding = 0) uniform Light {
// NEW!
layout(set=2, binding=0) uniform Light {
vec3 light_position;
vec3 light_color; // used in fragment shader
vec3 light_color;
};
void main() {
v_tex_coords = a_tex_coords;
mat4 model_matrix = s_models[gl_InstanceIndex];
mat4 model_view_matrix = u_view * model_matrix;
// vec4 light_position_view_space = u_view * vec4(light_position, 1.0);
// vec4 position_model_view_space = model_view_matrix * vec4(a_position, 1.0);
vec4 position_world_space = model_matrix * vec4(a_position, 1.0);
mat3 normal_matrix = transpose(inverse(mat3(model_matrix)));
vec3 normal = normal_matrix * normalize(a_normal);
vec3 tangent = normal_matrix * normalize(a_tangent);
vec3 bitangent = normal_matrix * normalize(a_bitangent);
mat3 normal_matrix = mat3(transpose(inverse(model_matrix)));
vec3 normal = normalize(normal_matrix * a_normal);
vec3 tangent = normalize(normal_matrix * a_tangent);
vec3 bitangent = normalize(normal_matrix * a_bitangent);
mat3 TBN = transpose(mat3(
// UDPATED!
mat3 tangent_matrix = transpose(mat3(
tangent,
bitangent,
normal
));
// Transform the lighting values
v_light_position_tangent_space = TBN * light_position;
v_position_model_tangent_space = TBN * position_world_space.xyz;
v_view_position_tangent_space = TBN * u_view_position;
vec4 model_space = model_matrix * vec4(a_position, 1.0);
v_position = model_space.xyz;
// NEW!
v_position = tangent_matrix * model_space.xyz;
v_light_position = tangent_matrix * light_position;
v_view_position = tangent_matrix * u_view_position;
gl_Position = u_proj * u_view * position_world_space;
gl_Position = u_view_proj * model_space;
}

@ -0,0 +1,45 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_position; // UPDATED!
layout(location=2) in mat3 v_tangent_matrix; // NEW!
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set = 0, binding = 2) uniform texture2D t_normal;
layout(set = 0, binding = 3) uniform sampler s_normal;
layout(set=1, binding=0)
uniform Uniforms {
vec3 u_view_position;
mat4 u_view_proj; // unused
};
layout(set = 2, binding = 0) uniform Light {
vec3 light_position;
vec3 light_color;
};
void main() {
vec4 object_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
vec4 object_normal = texture(sampler2D(t_normal, s_normal), v_tex_coords);
float ambient_strength = 0.1;
vec3 ambient_color = light_color * ambient_strength;
vec3 normal = normalize(v_tangent_matrix * object_normal.rgb);
vec3 light_dir = normalize(light_position - v_position);
float diffuse_strength = max(dot(normal, light_dir), 0.0);
vec3 diffuse_color = light_color * diffuse_strength;
vec3 view_dir = normalize(u_view_position - v_position);
vec3 half_dir = normalize(view_dir + light_dir);
float specular_strength = pow(max(dot(normal, half_dir), 0.0), 32);
vec3 specular_color = specular_strength * light_color;
vec3 result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
f_color = vec4(result, object_color.a);
}

@ -0,0 +1,44 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
// NEW!
layout(location=3) in vec3 a_tangent;
layout(location=4) in vec3 a_bitangent;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_position; // UPDATED!
layout(location=2) out mat3 v_tangent_matrix; // NEW!
layout(set=1, binding=0)
uniform Uniforms {
vec3 u_view_position;
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
void main() {
v_tex_coords = a_tex_coords;
mat4 model_matrix = s_models[gl_InstanceIndex];
mat3 normal_matrix = mat3(transpose(inverse(model_matrix)));
vec3 normal = normalize(normal_matrix * a_normal);
vec3 tangent = normalize(normal_matrix * a_tangent);
vec3 bitangent = normalize(normal_matrix * a_bitangent);
v_tangent_matrix = mat3(
tangent,
bitangent,
normal
);
vec4 model_space = model_matrix * vec4(a_position, 1.0);
v_position = model_space.xyz;
gl_Position = u_view_proj * model_space;
}

@ -95,7 +95,7 @@ materials.push(Material {
});
```
Now we can add use the texture in the fragment shader. Color values are by default in the range `[0, 1]`, so we'll have to convert the normal values to `[-1, 1]`.
Now we can add use the texture in the fragment shader.
```glsl
// shader.frag
@ -112,12 +112,282 @@ void main() {
// ...
vec3 normal = normalize(object_normal.rgb * 2.0 - 1.0); // UPDATED!
vec3 normal = normalize(object_normal.rgb); // UPDATED!
// ...
}
```
## View Space
Now if you've used normal mapping in OpenGL, you may be wondering by we didn't adjust the normal value. Normally you'd do something like the following.
I mentioned it briefly in the [lighting tutorial](/intermediate/tutorial10-lighting/#the-normal-matrix), that we were doing our lighting calculation in "world space". This meant that the entire scene was oriented with respect to the *world's* coordinate system.
```glsl
vec3 normal = normalize(object_normal.rgb * 2.0 - 1.0);
```
We don't have to do this because we are using `wgpu::TextureFormat::Rgba8UnormSrgb`, our shader gets the texture data in the range of [-1.0, 1.0].
If we run the code now, you'll notice things don't look quite right. Let's compare our results with the last tutorial.
![](./normal_mapping_wrong.png)
![](./ambient_diffuse_specular_lighting.png)
Parts of the scene are dark when they should be lit up, and vice versa.
## Tangent Space to World Space
I mentioned it briefly in the [lighting tutorial](/intermediate/tutorial10-lighting/#the-normal-matrix), that we were doing our lighting calculation in "world space". This meant that the entire scene was oriented with respect to the *world's* coordinate system. When we pull the normal data from our normal texture, all the normals are in what's known as pointing roughly in the positive z direction. That means that our lighting calculation thinks all of the surfaces of our models are facing in roughly the same direction. This is referred to as `tangent space`.
If we remember the [lighting-tutorial](/intermediate/tutorial10-lighting/#), we used the vertex normal to indicate the direction of the surface. It turns out we can use that to transform our normals from `tangent space` into `world space`. In order to do that we need to draw from the depths of linear algebra.
We can create a matrix that represents a coordinate system using 3 vectors that are perpendicular (or orthonormal) to each other. Basically we define the x, y, and z axes of our coordinate system.
```glsl
mat3 coordinate_system = mat3(
vec3(1, 0, 0), // x axis (right)
vec3(0, 1, 0), // y axis (up)
vec3(0, 0, 1) // z axis (forward)
);
```
We're going to create a matrix that will represent the coordinate space relative to our vertex normals. We're then going to use that to transform our normal map data to be in world space.
## The tangent, and the bitangent
We have one of the 3 vectors we need, the normal. What about the others? These are the tangent, and bitangent vectors. A tangent represents any vector that is parallel with a surface (aka. doesn't intersect with it). The tangent is always perpendicular to the normal vector. The bitangent is a tangent vector that is perpendicular to the other tangent vector. Together the tangent, bitangent, and normal represent the x, y, and z axes respectively.
Some model formats include the tanget and bitangent (sometimes called the binormal) in the vertex data, but OBJ does not. We'll have to calculate them manually. Luckily we can derive our tangent, and bitangent from our existing vertex data. Take a look at the following diagram.
![](./tangent_space.png)
Basically we can use the edges of our triangles, and our normal to calculate the tangent and bitangent. But first, we need to update our `ModelVertex` struct in `model.rs`.
```rust
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct ModelVertex {
position: cgmath::Vector3<f32>,
tex_coords: cgmath::Vector2<f32>,
normal: cgmath::Vector3<f32>,
// NEW!
tangent: cgmath::Vector3<f32>,
bitangent: cgmath::Vector3<f32>,
}
```
We'll need to upgrade our `VertexBufferDescriptor` as well.
```rust
impl Vertex for ModelVertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
// ...
// Tangent and bitangent
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
shader_location: 3,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 11]>() as wgpu::BufferAddress,
shader_location: 4,
format: wgpu::VertexFormat::Float3,
},
],
}
}
}
```
Now we can calculate the new tangent, and bitangent vectors.
```rust
impl Model {
pub fn load<P: AsRef<Path>>(
device: &wgpu::Device,
layout: &wgpu::BindGroupLayout,
path: P,
) -> Result<(Self, Vec<wgpu::CommandBuffer>), failure::Error> {
// ...
for m in obj_models {
let mut vertices = Vec::new();
for i in 0..m.mesh.positions.len() / 3 {
// ...
}
let indices = &m.mesh.indices;
// Calculate tangents and bitangets. We're going to
// use the triangles, so we need to loop through the
// indices in chunks of 3
for c in indices.chunks(3) {
let v0 = vertices[c[0] as usize];
let v1 = vertices[c[1] as usize];
let v2 = vertices[c[2] as usize];
let pos0 = v0.position;
let pos1 = v1.position;
let pos2 = v2.position;
let uv0 = v0.tex_coords;
let uv1 = v1.tex_coords;
let uv2 = v2.tex_coords;
// Calculate the edges of the triangle
let delta_pos1 = pos1 - pos0;
let delta_pos2 = pos2 - pos0;
// This will give us a direction to calculate the
// tangent and bitangent
let delta_uv1 = uv1 - uv0;
let delta_uv2 = uv2 - uv0;
// Solving the following system of equations will
// give us the tangent and bitangent.
// delta_pos1 = delta_uv1.x * T + delta_u.y * B
// delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
// Luckily, the place I found this equation provided
// the solution!
let r = 1.0 / (delta_uv1 .x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r;
// We'll use the same tangent/bitangent for each vertex in the triangle
vertices[c[0] as usize].tangent = tangent;
vertices[c[1] as usize].tangent = tangent;
vertices[c[2] as usize].tangent = tangent;
vertices[c[0] as usize].bitangent = bitangent;
vertices[c[1] as usize].bitangent = bitangent;
vertices[c[2] as usize].bitangent = bitangent;
}
// ...
}
Ok((Self { meshes, materials }, command_buffers))
}
}
```
## Shader time!
The fragment shader needs to be updated to include our tangent and bitangent.
```glsl
// shader.vert
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
// NEW!
layout(location=3) in vec3 a_tangent;
layout(location=4) in vec3 a_bitangent;
```
We're going to change up the output variables as well. We're going to calculate a `tangent_matrix` that we're going to pass to the fragment shader. We're also going to remove `v_normal` as we will be using the normal map data instead.
```glsl
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_position; // UPDATED!
layout(location=2) out mat3 v_tangent_matrix; // NEW!
```
We need to reflect these updates in the fragment shader as well. We'll also transform the normal into `world space`.
```glsl
// shader.frag
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_position; // UPDATED!
layout(location=2) in mat3 v_tangent_matrix; // NEW!
// ...
void main() {
// ...
vec3 normal = normalize(v_tangent_matrix * object_normal.rgb);
// ...
}
```
With that we get the following.
![](./normal_mapping_correct.png)
## Eww, matrix multiplication in the fragment shader...
Currently we are transforming the normal in the fragment shader. The fragment shader gets run for **every pixel**. To say this is inefficient is an understatement. Even so, we can't do the transformation in the vertex shader since we need to sample the normal map in the pixel shader. If want to use the `tangent_matrix` out of the fragment shader, we're going to have to think outside the box.
## World Space to Tangent Space
The variables we're using in the lighting calculation are `v_position`, `light_position`, and `u_view_position`. These are in `world space` while our normals are in `tangent space`. We can convert from `world space` to `tangent space` by multiplying by the inverse of the `tangent_matrix`. The inverse operation is a little expensive, but because our `tangent_matrix` is made up of vectors that are perpendicular to each other (aka. orthonormal), we can use the `transpose()` function instead!
But first, we need to change up our output variables, and import the `Light` uniforms.
```glsl
// ...
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_position; // UPDATED!
layout(location=2) out vec3 v_light_position; // NEW!
layout(location=3) out vec3 v_view_position; // NEW!
// ...
// NEW!
layout(set=2, binding=0) uniform Light {
vec3 light_position;
vec3 light_color;
};
```
Now we'll convert the other lighting values as follows.
```glsl
void main() {
// ...
// UDPATED!
mat3 tangent_matrix = transpose(mat3(
tangent,
bitangent,
normal
));
vec4 model_space = model_matrix * vec4(a_position, 1.0);
v_position = model_space.xyz;
// NEW!
v_position = tangent_matrix * model_space.xyz;
v_light_position = tangent_matrix * light_position;
v_view_position = tangent_matrix * u_view_position;
// ...
}
```
Finally we'll update `shader.frag` to import and use the transformed lighting values.
```glsl
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_position; // UPDATED!
layout(location=2) in vec3 v_light_position; // NEW!
layout(location=3) in vec3 v_view_position; // NEW!
// ...
void main() {
// ...
vec3 normal = normalize(object_normal.rgb); // UPDATED!
vec3 light_dir = normalize(v_light_position - v_position); // UPDATED!
// ...
vec3 view_dir = normalize(v_view_position - v_position); // UPDATED!
// ...
}
```
The resulting image isn't noticeably different so I won't show it here, but the calculation definitely is more efficient.
<AutoGithubLink/>

Binary file not shown.

After

Width:  |  Height:  |  Size: 705 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 722 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 731 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Loading…
Cancel
Save