Misc typo and code fixes.

009: impl Vertex for ModelVertex was using the vertex_attr_array! macro.
009: RenderPipelineDescriptor has a `vertex` member not  `vertex_state`.
009: to_rgba() should be to_rgba8(), to_rgba is set to be deprecated.

010: BindGroupDescriptor has a `entries` member not `bindings`.
010: Remove re-declaration of mat4 model_matrix;
010: Prefer hard-coded [#.#; #] instead of `Foo::fn().into()`.
010: Clarify which shader frag/vert file changes are in.

011: Change Model::load return type to Result<Self>.
011: TextureDescriptor does not have a array_layer_count member.

012: Add .into() to calls in Uniforms::update_view_proj().
012: Dereference delta variable in State::input().

013: Add more changed lines to Model::load to avoid compiler yelling.
pull/152/head
Elijah C. Voigt 3 years ago
parent 80ecaade61
commit bcc79b5e41

@ -34,13 +34,26 @@ Let's define our `VertexBufferLayout`.
```rust
impl Vertex for ModelVertex {
fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
use std::mem;
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![
0 => Float3,
1 => Float2,
2 => Float3
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float3,
},
],
}
}
@ -52,7 +65,7 @@ This is basically the same as the original `VertexBufferLayout`, but we added a
```rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
// ...
vertex_state: wgpu::VertexState {
vertex: wgpu::VertexState {
// ...
buffers: &[model::ModelVertex::desc(), InstanceRaw::desc()],
},
@ -145,10 +158,10 @@ While we're at it let's import `texture.rs` in `model.rs`.
use crate::texture;
```
We also need to make a subtle change on `from_image()` method in `texture.rs`. PNGs work fine with `as_rgba8()`, as they have an alpha channel. But, JPEGs don't have an alpha channel, and the code would panic if we try to call `as_rgba8()` on the JPEG texture image we are going to use. Instead, we can use `to_rgba()` to handle such an image.
We also need to make a subtle change on `from_image()` method in `texture.rs`. PNGs work fine with `as_rgba8()`, as they have an alpha channel. But, JPEGs don't have an alpha channel, and the code would panic if we try to call `as_rgba8()` on the JPEG texture image we are going to use. Instead, we can use `to_rgba8()` to handle such an image.
```rust
let rgba = img.to_rgba();
let rgba = img.to_rgba8();
```
`Mesh` holds a vertex buffer, an index buffer, and the number of indices in the mesh. We're using an `usize` for the material. This `usize` will be used to index the `materials` list when it comes time to draw.
@ -284,6 +297,7 @@ We could have put this methods in `impl Model`, but I felt it made more sense to
```rust
// main.rs
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
@ -401,6 +415,8 @@ where
We need to change the render code to reflect this.
```rust
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
render_pass.set_pipeline(&self.render_pipeline);
let mesh = &self.obj_model.meshes[0];
@ -456,6 +472,7 @@ where
The code in `main.rs` will change accordingly.
```rust
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
render_pass.set_pipeline(&self.render_pipeline);
render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.uniform_bind_group);
```

@ -36,9 +36,9 @@ We're going to create another buffer to store our light in.
```rust
let light = Light {
position: (2.0, 2.0, 2.0).into(),
position: [2.0, 2.0, 2.0],
_padding: 0,
color: (1.0, 1.0, 1.0).into(),
color: [1.0, 1.0, 1.0],
};
// We'll want to update our lights position, so we use COPY_DST
@ -57,11 +57,11 @@ Don't forget to add the `light` and `light_buffer` to `State`. After that we nee
let light_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
@ -71,12 +71,9 @@ let light_bind_group_layout =
let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &light_bind_group_layout,
bindings: &[wgpu::Binding {
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &light_buffer,
range: 0..std::mem::size_of_val(&light) as wgpu::BufferAddress,
},
resource: light_buffer.as_entire_binding(),
}],
label: None,
});
@ -429,6 +426,33 @@ where
}
```
Finally we want to add Light rendering to our render passes.
```rust
impl State {
// ...
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
// ...
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
use crate::model::DrawLight; // NEW!
render_pass.set_pipeline(&self.light_render_pipeline); // NEW!
render_pass.draw_light_model(
&self.obj_model,
&self.uniform_bind_group,
&self.light_bind_group,
); // NEW!
render_pass.set_pipeline(&self.render_pipeline);
render_pass.draw_model_instanced(
&self.obj_model,
0..self.instances.len() as u32,
&self.uniform_bind_group,
&self.light_bind_group,
);
}
```
With all that we'll end up with something like this.
![./light-in-scene.png](./light-in-scene.png)
@ -470,7 +494,7 @@ With that we should get something like the this.
## Diffuse Lighting
Remember the normal vectors that were included with our model? We're finally going to use them. Normals represent the direction a surface is facing. By comparing the normal of a fragment with a vector pointing to a light source, we get a value of how light/dark that fragment should be. We compare the vector be using the dot product to get the cosine of the angle between them.
Remember the normal vectors that were included with our model? We're finally going to use them. Normals represent the direction a surface is facing. By comparing the normal of a fragment with a vector pointing to a light source, we get a value of how light/dark that fragment should be. We compare the vector using the dot product to get the cosine of the angle between them.
![./normal_diagram.png](./normal_diagram.png)
@ -495,7 +519,8 @@ For now let's just pass the normal directly as is. This is wrong, but we'll fix
```glsl
void main() {
v_tex_coords = a_tex_coords; v_normal = a_normal; // NEW!
v_tex_coords = a_tex_coords;
v_normal = a_normal; // NEW!
vec4 model_space = model_matrix * vec4(a_position, 1.0); // NEW!
v_position = model_space.xyz; // NEW!
gl_Position = u_view_proj * model_space; // UPDATED!
@ -559,7 +584,6 @@ We need to use the model matrix to transform the normals to be in the right dire
```glsl
// shader.vert
mat4 model_matrix = model_matrix;
mat3 normal_matrix = mat3(transpose(inverse(model_matrix)));
v_normal = normal_matrix * a_normal;
```
@ -618,8 +642,8 @@ struct Uniforms {
impl Uniforms {
fn new() -> Self {
Self {
view_position: Zero::zero(),
view_proj: cgmath::Matrix4::identity(),
view_position: [0.0; 4],
view_proj: cgmath::Matrix4::identity().into(),
}
}
@ -639,14 +663,11 @@ Since we want to use our uniforms in the fragment shader now, we need to change
```rust
// main.rs
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
entries: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
// ...
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT, // Updated!
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
},
// ...
},
// ...
],
@ -657,6 +678,7 @@ let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroup
We're going to get the direction from the fragment's position to the camera, and use that with the normal to calculate the `reflect_dir`.
```glsl
// shader.frag
vec3 view_dir = normalize(u_view_position - v_position);
vec3 reflect_dir = reflect(-light_dir, normal);
```
@ -687,6 +709,7 @@ If we just look at the `specular_color` on it's own we get this.
Up to this point we've actually only implemented the Phong part of Blinn-Phong. The Phong reflection model works well, but it can break down under [certain circumstances](https://learnopengl.com/Advanced-Lighting/Advanced-Lighting). The Blinn part of Blinn-Phong comes from the realization that if you add the `view_dir`, and `light_dir` together, normalize the result and use the dot product of that and the `normal`, you get roughly the same results without the issues that using `reflect_dir` had.
```glsl
// sahder.frag
vec3 view_dir = normalize(u_view_position - v_position);
vec3 half_dir = normalize(view_dir + light_dir);

@ -1,6 +1,6 @@
# Normal Mapping
With just lighting, our scene is already looking pretty good. Still, our models are still overly smooth. This is understandable because we are using a very simple model. If we were using a texture that was supposed to be smooth, this wouldn't be a problem, but our brick texture is supposed to be rougher. We could solve this by adding more geometry, but that would slow our scene down, and it would hard to know where to add new polygons. This is were normal mapping comes in.
With just lighting, our scene is already looking pretty good. Still, our models are still overly smooth. This is understandable because we are using a very simple model. If we were using a texture that was supposed to be smooth, this wouldn't be a problem, but our brick texture is supposed to be rougher. We could solve this by adding more geometry, but that would slow our scene down, and it be would hard to know where to add new polygons. This is were normal mapping comes in.
Remember in [the instancing tutorial](/beginner/tutorial7-instancing/#a-different-way-textures), we experimented with storing instance data in a texture? A normal map is doing just that with normal data! We'll use the normals in the normal map in our lighting calculation in addition to the vertex normal.
@ -52,7 +52,7 @@ let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroup
});
```
We'll need to actually the normal map itself. We'll do this in the loop we create the materials in.
We'll need to actually load the normal map. We'll do this in the loop we create the materials in.
```rust
let diffuse_path = mat.diffuse_texture;
@ -63,7 +63,7 @@ We'll need to actually the normal map itself. We'll do this in the loop we creat
```
* Note: I duplicated and moved teh `command_buffers.push(cmds);` line. This means we can reuse the `cmds` variable for both the normal map and diffuse/color map.
* Note: I duplicated and moved the `command_buffers.push(cmds);` line. This means we can reuse the `cmds` variable for both the normal map and diffuse/color map.
Our `Material`'s `bind_group` will have to change as well.
@ -208,7 +208,7 @@ impl Model {
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
path: P,
) -> Result<(Self, Vec<wgpu::CommandBuffer>), failure::Error> {
) -> Result<Self> {
// ...
for m in obj_models {
let mut vertices = Vec::new();
@ -281,7 +281,7 @@ impl Model {
// ...
}
Ok((Self { meshes, materials }, command_buffers))
Ok(Self { meshes, materials })
}
}
```
@ -436,7 +436,6 @@ pub fn from_image(
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,

@ -109,7 +109,7 @@ You can tell the difference between a right-handed coordinate system and a left-
# The Camera Controller
As our camera is different, so we'll need a new camera controller. Add the following to `camera.rs`.
Our camera is different, so we'll need a new camera controller. Add the following to `camera.rs`.
```rust
#[derive(Debug)]
@ -253,8 +253,8 @@ impl Uniforms {
// UPDATED!
fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) {
self.view_position = camera.position.to_homogeneous();
self.view_proj = projection.calc_matrix() * camera.calc_matrix()
self.view_position = camera.position.to_homogeneous().into();
self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into();
}
}
```
@ -289,14 +289,17 @@ impl State {
// ...
uniforms.update_view_proj(&camera, &projection); // UPDATED!
// ...
Self {
// ...
camera,
projection,
projection, // NEW!
camera_controller,
// ...
// NEW!
mouse_pressed: false,
mouse_pressed: false, // NEW!
}
}
}
@ -326,7 +329,7 @@ fn input(&mut self, event: &DeviceEvent) -> bool {
}
) => self.camera_controller.process_keyboard(*key, *state),
DeviceEvent::MouseWheel { delta, .. } => {
self.camera_controller.process_scroll(delta);
self.camera_controller.process_scroll(*delta);
true
}
DeviceEvent::Button {

@ -167,7 +167,19 @@ impl Model {
}
}).collect::<Vec<_>>();
// ...
}
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", m.name)), // UPDATED!
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
}
);
// ...
// UPDATED!
Ok(Mesh {
// ...
})
}).collect::<Result<Vec<_>>>()?;
// ...
}
// ...

Loading…
Cancel
Save