Merge pull request #15 from sotrh/lighting

Lighting
pull/17/head
sotrh 5 years ago committed by GitHub
commit bd055ff867
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

13
Cargo.lock generated

@ -1427,6 +1427,19 @@ dependencies = [
"winit 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tutorial10-lighting"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tutorial2-swapchain"
version = "0.1.0"

@ -23,6 +23,6 @@ name = "tutorial9-models"
path = "src/main.rs"
[[bin]]
name = "tutorial9-challenge"
path = "src/challenge.rs"
# [[bin]]
# name = "tutorial9-challenge"
# path = "src/challenge.rs"

@ -1,14 +0,0 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_color;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_depth;
layout(set = 0, binding = 1) uniform samplerShadow s_depth;
void main() {
float depth = texture(sampler2DShadow(t_depth, s_depth), vec3(v_tex_coords, 1));
f_color = vec4(depth, 0, 0, 1);
}

@ -1,11 +0,0 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=0) out vec2 v_tex_coords;
void main() {
v_tex_coords = a_tex_coords;
gl_Position = vec4(a_position, 1.0);
}

@ -0,0 +1,28 @@
[package]
name = "tutorial10-lighting"
version = "0.1.0"
authors = ["Ben Hansen <bhbenjaminhansen@gmail.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
image = "0.22.4"
winit = "0.20.0"
glsl-to-spirv = "0.1.7"
cgmath = "0.17.0"
failure = "0.1"
tobj = "0.1"
wgpu = "0.4.0"
# wgpu = { git = "https://github.com/gfx-rs/wgpu-rs.git" }
# zerocopy = "0.2.8"
[[bin]]
name = "tutorial10-lighting"
path = "src/main.rs"
# [[bin]]
# name = "tutorial10-challenge"
# path = "src/challenge.rs"

@ -0,0 +1,21 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
};
void main() {
vec4 diffuse = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float brightness = dot(normalize(v_normal), normalize(u_light));
vec4 ambient = vec4(0.0, 0.0, 0.0, 1.0);
f_color = mix(ambient, diffuse, brightness);
}

@ -0,0 +1,26 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal;
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
void main() {
v_tex_coords = a_tex_coords;
mat4 model = s_models[gl_InstanceIndex];
v_normal = transpose(inverse(mat3(model))) * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

@ -10,6 +10,7 @@ use cgmath::prelude::*;
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
normal: [f32; 3],
}
impl Vertex {
@ -29,17 +30,76 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float3,
}
]
}
}
}
use std::f32::consts::PI;
fn make_sphere(radius: f32, latitudes: u16, longitudes: u16) -> (Vec<Vertex>, Vec<u16>) {
assert!(latitudes > 0 && longitudes > 0);
let mut vertices = Vec::new();
let mut indices = Vec::new();
let M = latitudes as f32;
let N = longitudes as f32;
for lat in 0..latitudes {
for lon in 0..longitudes {
let m = lat as f32;
let n = lon as f32;
let normal = cgmath::Vector3 {
x: (PI * m / M).sin() * (2.0 * PI * n / N).sin(),
y: (PI * m / M).sin() * (2.0 * PI * n / N).cos(),
z: (PI * m / M).cos(),
};
let position = normal * radius;
let tex_coords = cgmath::Vector2 {
x: (2.0 - normal.x) * 0.5,
y: (2.0 - normal.z) * 0.5,
};
vertices.push(Vertex {
position: position.into(),
tex_coords: tex_coords.into(),
normal: normal.into(),
});
}
}
for lat in 0..latitudes {
for lon in 0..longitudes {
indices.push(lat * longitudes + lon + 1);
indices.push((lat + 1) * longitudes + lon);
indices.push(lat * longitudes + lon);
indices.push((lat + 1) * longitudes + lon + 1);
indices.push((lat + 1) * longitudes + lon);
indices.push(lat * longitudes + lon + 1);
}
}
(vertices, indices)
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], normal: [0.0, 0.0, -1.0]}, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], normal: [0.0, 0.0, -1.0]}, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], normal: [0.0, 0.0, -1.0]}, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], normal: [0.0, 0.0, -1.0]}, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], normal: [0.0, 0.0, -1.0]}, // E
];
const INDICES: &[u16] = &[
@ -48,18 +108,6 @@ const INDICES: &[u16] = &[
2, 3, 4,
];
const DEPTH_VERTICES: &[Vertex] = &[
Vertex { position: [0.0, 0.0, 0.0], tex_coords: [1.0, 1.0]},
Vertex { position: [1.0, 0.0, 0.0], tex_coords: [0.0, 1.0]},
Vertex { position: [1.0, -1.0, 0.0], tex_coords: [0.0, 0.0]},
Vertex { position: [0.0, -1.0, 0.0], tex_coords: [1.0, 0.0]},
];
const DEPTH_INDICES: &[u16] = &[
0, 1, 2,
0, 2, 3,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
@ -68,6 +116,7 @@ pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
0.0, 0.0, 0.5, 1.0,
);
// const NUM_INSTANCES_PER_ROW: u32 = 1;
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
@ -84,28 +133,35 @@ struct Camera {
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
fn build_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
return proj;
}
fn build_view_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
return view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
view: cgmath::Matrix4<f32>,
proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
view: cgmath::Matrix4::identity(),
proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
self.proj = OPENGL_TO_WGPU_MATRIX * camera.build_projection_matrix();
self.view = camera.build_view_matrix();
}
}
@ -207,131 +263,11 @@ impl Instance {
}
}
struct DepthPass {
texture: wgpu::Texture,
view: wgpu::TextureView,
sampler: wgpu::Sampler,
bind_group: wgpu::BindGroup,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_depth_indices: u32,
render_pipeline: wgpu::RenderPipeline,
has_saved_to_file: bool,
}
impl DepthPass {
fn new(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, texture_bind_group_layout: &wgpu::BindGroupLayout) -> Self {
let texture = create_depth_texture(device, sc_desc);
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::LessEqual,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
}
],
});
let vertex_buffer = device
.create_buffer_mapped(DEPTH_VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(DEPTH_VERTICES);
let index_buffer = device
.create_buffer_mapped(DEPTH_INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(DEPTH_INDICES);
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[texture_bind_group_layout],
});
let vs_src = include_str!("challenge.vert");
let fs_src = include_str!("challenge.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
Self {
texture, view, sampler, bind_group,
vertex_buffer, index_buffer,
num_depth_indices: DEPTH_INDICES.len() as u32,
render_pipeline,
has_saved_to_file: false,
}
}
fn resize(&mut self, device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, texture_bind_group_layout: &wgpu::BindGroupLayout) {
self.texture = create_depth_texture(device, sc_desc);
self.view = self.texture.create_default_view();
self.bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&self.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.sampler),
}
],
});
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Light {
direction: cgmath::Vector3<f32>,
}
struct State {
@ -351,7 +287,6 @@ struct State {
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
texture_bind_group_layout: wgpu::BindGroupLayout,
camera: Camera,
camera_controller: CameraController,
@ -364,16 +299,18 @@ struct State {
instances: Vec<Instance>,
instance_buffer: wgpu::Buffer,
depth_pass: DepthPass,
depth_texture: wgpu::Texture,
depth_texture_view: wgpu::TextureView,
light: Light,
light_buffer: wgpu::Buffer,
}
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> wgpu::Texture {
let desc = wgpu::TextureDescriptor {
format: DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::COPY_SRC,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
..sc_desc.to_texture_desc()
};
device.create_texture(&desc)
@ -533,6 +470,13 @@ impl State {
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ)
.fill_from_slice(&instance_data);
let light = Light {
direction: (-1.0, 0.4, -0.9).into(),
};
let light_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM)
.fill_from_slice(&[light]);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
@ -549,7 +493,14 @@ impl State {
dynamic: false,
readonly: true,
}
}
},
wgpu::BindGroupLayoutBinding {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
]
});
@ -569,7 +520,14 @@ impl State {
buffer: &instance_buffer,
range: 0..instance_buffer_size as wgpu::BufferAddress,
}
}
},
wgpu::Binding {
binding: 2,
resource: wgpu::BindingResource::Buffer {
buffer: &light_buffer,
range: 0..std::mem::size_of_val(&light) as wgpu::BufferAddress,
}
},
],
});
@ -582,6 +540,9 @@ impl State {
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let depth_texture = create_depth_texture(&device, &sc_desc);
let depth_texture_view = depth_texture.create_default_view();
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
@ -630,15 +591,19 @@ impl State {
alpha_to_coverage_enabled: false,
});
let (vertices, indices) = make_sphere(0.5, 10, 10);
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
.create_buffer_mapped(vertices.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(&vertices);
// .create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
// .fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let num_indices = INDICES.len() as u32;
let depth_pass = DepthPass::new(&device, &sc_desc, &texture_bind_group_layout);
.create_buffer_mapped(indices.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(&indices);
// .create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
// .fill_from_slice(INDICES);
let num_indices = indices.len() as u32;
// let num_indices = INDICES.len() as u32;
Self {
surface,
@ -654,7 +619,6 @@ impl State {
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
texture_bind_group_layout,
camera,
camera_controller,
uniform_buffer,
@ -663,7 +627,10 @@ impl State {
size,
instances,
instance_buffer,
depth_pass,
depth_texture,
depth_texture_view,
light,
light_buffer,
}
}
@ -674,10 +641,8 @@ impl State {
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_pass.resize(&self.device, &self.sc_desc, &self.texture_bind_group_layout);
// self.depth_pass.texture = create_depth_texture(&self.device, &self.sc_desc);
// self.depth_pass.view = self.depth_pass.texture.create_default_view();
self.depth_texture = create_depth_texture(&self.device, &self.sc_desc);
self.depth_texture_view = self.depth_texture.create_default_view();
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
@ -727,7 +692,7 @@ impl State {
}
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_pass.view,
attachment: &self.depth_texture_view,
depth_load_op: wgpu::LoadOp::Clear,
depth_store_op: wgpu::StoreOp::Store,
clear_depth: 1.0,
@ -745,105 +710,9 @@ impl State {
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
}
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Load,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color::BLACK,
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.depth_pass.render_pipeline);
render_pass.set_bind_group(0, &self.depth_pass.bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.depth_pass.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.depth_pass.index_buffer, 0);
render_pass.draw_indexed(0..self.depth_pass.num_depth_indices, 0, 0..1);
}
let buffer = if !self.depth_pass.has_saved_to_file {
const U32_SIZE: u32 = std::mem::size_of::<u32>() as u32;
let buffer_size = (U32_SIZE * self.sc_desc.width * self.sc_desc.height) as wgpu::BufferAddress;
let buffer_desc = wgpu::BufferDescriptor {
size: buffer_size,
usage: wgpu::BufferUsage::COPY_DST
| wgpu::BufferUsage::MAP_READ,
};
let buffer = self.device.create_buffer(&buffer_desc);
encoder.copy_texture_to_buffer(
wgpu::TextureCopyView {
texture: &self.depth_pass.texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
row_pitch: U32_SIZE * self.sc_desc.width,
image_height: self.sc_desc.height,
},
wgpu::Extent3d {
width: self.sc_desc.width,
height: self.sc_desc.height,
depth: 1,
},
);
self.depth_pass.has_saved_to_file = true;
Some((buffer, buffer_size))
} else {
None
};
self.queue.submit(&[
encoder.finish()
]);
if let Some((buffer, buffer_size)) = buffer {
let width = self.sc_desc.width;
let height = self.sc_desc.height;
let near = self.camera.znear;
let far = self.camera.zfar;
buffer.map_read_async(0, buffer_size, move |result: wgpu::BufferMapAsyncResult<&[f32]>| {
let mapping = result.unwrap();
let data = mapping.data;
use image::{ImageBuffer, Rgba, Pixel};
let mut buffer = ImageBuffer::<Rgba<u8>, _>::new(
width,
height,
);
let mut x = 0;
let mut y = 0;
for pixel in data {
let z = pixel * 2.0 - 1.0;
let r = (2.0 * near * far) / (far + near - z * (far - near));
let p = (r.floor() * 255.0 / far) as u8;
buffer.put_pixel(x, y, Pixel::from_channels(
p, p, p, 255,
));
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
buffer.save("image.png").unwrap();
});
}
}
}

@ -0,0 +1,560 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
mod texture;
mod model;
use model::{DrawModel, Vertex};
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
struct CameraController {
speed: f32,
is_up_pressed: bool,
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
VirtualKeyCode::LShift => {
self.is_down_pressed = is_pressed;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = (camera.target - camera.eye).normalize();
if self.is_forward_pressed {
camera.eye += forward * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward * self.speed;
}
let right = forward.cross(camera.up);
if self.is_right_pressed {
camera.eye += right * self.speed;
}
if self.is_left_pressed {
camera.eye -= right * self.speed;
}
}
}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
fn to_matrix(&self) -> cgmath::Matrix4<f32> {
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Light {
direction: cgmath::Vector3<f32>,
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
obj_model: model::Model,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
instance_buffer: wgpu::Buffer,
depth_texture: wgpu::Texture,
depth_texture_view: wgpu::TextureView,
light: Light,
light_buffer: wgpu::Buffer,
}
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> wgpu::Texture {
let desc = wgpu::TextureDescriptor {
format: DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
..sc_desc.to_texture_desc()
};
device.create_texture(&desc)
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&Default::default()).unwrap();
let (device, mut queue) = adapter.request_device(&Default::default());
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
},
],
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let position = cgmath::Vector3 { x, y: 0.0, z };
let rotation = if position.is_zero() {
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_matrix).collect::<Vec<_>>();
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ)
.fill_from_slice(&instance_data);
let light = Light {
direction: (-1.0, 0.4, -0.9).into(),
};
let light_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM)
.fill_from_slice(&[light]);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
}
},
wgpu::BindGroupLayoutBinding {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
]
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Buffer {
buffer: &instance_buffer,
range: 0..instance_buffer_size as wgpu::BufferAddress,
}
},
wgpu::Binding {
binding: 2,
resource: wgpu::BindingResource::Buffer {
buffer: &light_buffer,
range: 0..std::mem::size_of_val(&light) as wgpu::BufferAddress,
}
},
],
});
let (obj_model, cmds) = model::Model::load(&device, &texture_bind_group_layout, "code/beginner/tutorial9-models/src/res/cube.obj").unwrap();
queue.submit(&cmds);
let vs_src = include_str!("shader.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let depth_texture = create_depth_texture(&device, &sc_desc);
let depth_texture_view = depth_texture.create_default_view();
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
}),
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: &[
model::ModelVertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
obj_model,
camera,
camera_controller,
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
instance_buffer,
depth_texture,
depth_texture_view,
light,
light_buffer,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_texture = create_depth_texture(&self.device, &self.sc_desc);
self.depth_texture_view = self.depth_texture.create_default_view();
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture_view,
depth_load_op: wgpu::LoadOp::Clear,
depth_store_op: wgpu::StoreOp::Store,
clear_depth: 1.0,
stencil_load_op: wgpu::LoadOp::Clear,
stencil_store_op: wgpu::StoreOp::Store,
clear_stencil: 0,
}),
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.draw_model_instanced(&self.obj_model, 0..self.instances.len() as u32, &self.uniform_bind_group);
}
self.queue.submit(&[
encoder.finish()
]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => { window.request_redraw(); }
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {
state.update();
state.render();
}
_ => {}
}
});
}

@ -0,0 +1,176 @@
use std::path::Path;
use std::ops::Range;
use crate::texture;
pub trait Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a>;
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct ModelVertex {
position: [f32; 3],
tex_coords: [f32; 2],
normal: [f32; 3],
}
impl Vertex for ModelVertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float3,
},
]
}
}
}
pub struct Material {
pub name: String,
pub diffuse_texture: texture::Texture,
pub bind_group: wgpu::BindGroup,
}
pub struct Mesh {
pub name: String,
pub vertex_buffer: wgpu::Buffer,
pub index_buffer: wgpu::Buffer,
pub num_elements: u32,
pub material: usize,
}
pub struct Model {
pub meshes: Vec<Mesh>,
pub materials: Vec<Material>,
}
impl Model {
pub fn load<P: AsRef<Path>>(device: &wgpu::Device, layout: &wgpu::BindGroupLayout, path: P) -> Result<(Self, Vec<wgpu::CommandBuffer>), failure::Error> {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref())?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent().unwrap();
// Our `Texure` struct currently returns a `CommandBuffer` when it's created so we need to collect those and return them.
let mut command_buffers = Vec::new();
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let (diffuse_texture, cmds) = texture::Texture::load(&device, containing_folder.join(diffuse_path))?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
]
});
materials.push(Material {
name: mat.name,
diffuse_texture,
bind_group,
});
command_buffers.push(cmds);
}
let mut meshes = Vec::new();
for m in obj_models {
let mut vertices = Vec::new();
for i in 0..m.mesh.positions.len() / 3 {
vertices.push(ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [
m.mesh.texcoords[i * 2],
m.mesh.texcoords[i * 2 + 1],
],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
});
}
let vertex_buffer = device
.create_buffer_mapped(vertices.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(&vertices);
let index_buffer = device
.create_buffer_mapped(m.mesh.indices.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(&m.mesh.indices);
meshes.push(Mesh {
name: m.name,
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
});
}
Ok((Self { meshes, materials, }, command_buffers))
}
}
pub trait DrawModel {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup);
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup);
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup);
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup);
}
impl<'a> DrawModel for wgpu::RenderPass<'a> {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup) {
self.draw_mesh_instanced(mesh, material, 0..1, uniforms);
}
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
self.set_vertex_buffers(0, &[(&mesh.vertex_buffer, 0)]);
self.set_index_buffer(&mesh.index_buffer, 0);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, &uniforms, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, uniforms);
}
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), uniforms);
}
}
}

@ -0,0 +1,35 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=2) in vec3 v_position;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
};
const vec3 ambient_color = vec3(0.0, 0.0, 0.0);
const vec3 specular_color = vec3(1.0, 1.0, 1.0);
const float shininess = 32;
void main() {
vec4 diffuse_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float diffuse_term = max(dot(normalize(v_normal), normalize(u_light)), 0);
vec3 camera_dir = normalize(-v_position);
// This is an aproximation of the actual reflection vector, aka what
// angle you have to look at the object to be blinded by the light
vec3 half_direction = normalize(normalize(u_light) + camera_dir);
float specular_term = pow(max(dot(normalize(v_normal), half_direction), 0.0), shininess);
f_color = vec4(ambient_color, 1.0) + vec4(specular_term * specular_color, 1.0) + diffuse_term * diffuse_color;
}

@ -0,0 +1,34 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal;
layout(location=2) out vec3 v_position;
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
void main() {
v_tex_coords = a_tex_coords;
mat4 model = s_models[gl_InstanceIndex];
// Rotate the normals with respect to the model, ignoring scaling
mat3 normal_matrix = mat3(transpose(inverse(mat3(model))));
v_normal = normal_matrix * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
// Get the position relative to the view for the lighting calc
v_position = gl_Position.xyz / gl_Position.w;
}

@ -0,0 +1,129 @@
use image::GenericImageView;
use std::path::Path;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler,
}
impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn load<P: AsRef<Path>>(device: &wgpu::Device, path: P) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::open(path)?;
Self::from_image(device, &img)
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> Self {
let desc = wgpu::TextureDescriptor {
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
..sc_desc.to_texture_desc()
};
let texture = device.create_texture(&desc);
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
Self { texture, view, sampler }
}
pub fn create_render_target(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> Self {
let desc = wgpu::TextureDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT | wgpu::TextureUsage::SAMPLED,
..sc_desc.to_texture_desc()
};
let texture = device.create_texture(&desc);
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
Self { texture, view, sampler }
}
pub fn from_bytes(device: &wgpu::Device, bytes: &[u8]) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, &img)
}
pub fn from_image(device: &wgpu::Device, img: &image::DynamicImage) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let rgba = img.to_rgba();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
size,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let buffer = device
.create_buffer_mapped(rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&rgba);
let mut encoder = device.create_command_encoder(&Default::default());
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size,
);
let cmd_buffer = encoder.finish();
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
Ok((Self { texture, view, sampler }, cmd_buffer))
}
}

@ -36,6 +36,7 @@ module.exports = {
title: 'Intermediate',
collapsable: false,
children: [
'/intermediate/tutorial10-lighting/',
'/intermediate/windowless/',
],
},

@ -1 +1 @@
Subproject commit 12026adcb2e4a7caf4cf8cf3c12a1950f1f86cde
Subproject commit f80cc9d0e4187921c44c87840091db70a637612a

@ -0,0 +1,230 @@
# Working with Lights
While we can tell that our scene is 3d because of our camera, it still feels very flat. That's because our model stays the same color regardless of how it's oriented. If we want to change that we need to add lighting to our scene.
In the real world, a light source emits photons which bounce around until they enter into our eyes. The color we see is the light's original color minus whatever energy it lost while it was bouncing around.
In the computer graphics world, modeling individual photons would be hilariously computationally expensive. A single 100 Watt light bulb emits about 3.27 x 10^20 photons *per second*. Just imagine that for the sun! To get around this, we're gonna use math to cheat.
Let's discuss a few options.
## Ray/Path Tracing
This is an *advanced* topic, and we won't be covering it in depth here. It's the closest model to the way light really works so I felt I had to mention it. Check out the [ray tracing tutorial](../../todo/) if you want to learn more.
## Gouraud Shading
Named after [Henri Gourad](https://en.wikipedia.org/wiki/Gouraud_shading), Gourad shading uses a surface normal vector per vertex to determine what direction the surface is facing and then compares that normal to the light's direction to calculate how bright the surface should be. Normals indicate what direction a surface is facing. We compare the normal to light vector to calculate how bright a given part of the model should be.
![normals.png](./normals.png)
Fortunately for use our cube already has normals that we can use. We can get straight to changing our vertex shader to use our normals.
```glsl
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal; // NEW
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal; // NEW
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
void main() {
v_tex_coords = a_tex_coords;
// UPDATED
mat4 model = s_models[gl_InstanceIndex];
v_normal = transpose(inverse(mat3(model))) * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
}
```
We pull out the model-view-projection matrix that we use to transform our model, because we are going to need it transform our normals. Because a normal is just a direction, not a position, we need to pull out the rotational part of the `model` matrix. That's why we convert it to `mat3`. I'm not sure why the `transpose` and `invert` bit are needed, but they are.
The fragment shader will take that normal, and a new `u_light` uniform, and perform the calculation.
```glsl
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
};
void main() {
vec4 diffuse = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float brightness = dot(normalize(v_normal), normalize(u_light)); // 1.
vec4 ambient = vec4(0.0, 0.0, 0.0, 1.0); // 2.
f_color = mix(ambient, diffuse, brightness); // 3.
}
```
1. The dot product gives us the cosine of the angle between the two vectors multiplied by the magnitude of each vector. Normalizing the vectors gives them a magnitude of one, so we get just the cosine of the angle between the two. We can use this value to determine how "similar" they are. A value of 1.0 means that the vectors are the same. A value of -1.0 means that they point in opposite directions.
2. The ambient value is the color the object would be in the dark.
3. We get the final color by mixing the ambient and diffuse colors using our brightness value.
Before we can see the results, we need to create the uniform buffer to hold the light data. We're going to create a new buffer to make it easier to store multiple lights.
```rust
#[repr(C)]
#[derive(Copy, Clone)]
struct Light {
direction: cgmath::Vector3<f32>,
}
let light = Light {
direction: (-1.0, 0.4, -0.9).into(),
};
let light_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM)
.fill_from_slice(&[light]);
```
We need to update the uniform bind group as well.
```rust
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
// ...
wgpu::BindGroupLayoutBinding {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
]
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
// ...
wgpu::Binding {
binding: 2,
resource: wgpu::BindingResource::Buffer {
buffer: &light_buffer,
range: 0..std::mem::size_of_val(&light) as wgpu::BufferAddress,
}
},
],
});
```
With all that you should get something that looks like this.
![gouraud.png](./gouraud.png)
You can see they cubes now have a light side and a dark side.
## Blinn-Phong Shading
Gouraud shading works, but it's not super accurate. It's missing specular reflection.
Specular reflection is the light that's reflected of surface without getting scattered as the diffuse reflection. It's the bright spots you see on s shiny surface such as an apple.
Fortunately we only have to change the shader code to get this new effect.
```glsl
// shader.vert
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal;
layout(location=2) out vec3 v_position;
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
void main() {
v_tex_coords = a_tex_coords;
mat4 model = s_models[gl_InstanceIndex];
// Rotate the normals with respect to the model, ignoring scaling
mat3 normal_matrix = mat3(transpose(inverse(mat3(model))));
v_normal = normal_matrix * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
// Get the position relative to the view for the lighting calc
v_position = gl_Position.xyz / gl_Position.w;
}
```
```glsl
// shader.frag
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=2) in vec3 v_position;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
};
const vec3 ambient_color = vec3(0.0, 0.0, 0.0);
const vec3 specular_color = vec3(1.0, 1.0, 1.0);
const float shininess = 32;
void main() {
vec4 diffuse_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float diffuse_term = max(dot(normalize(v_normal), normalize(u_light)), 0);
vec3 camera_dir = normalize(-v_position);
// This is an aproximation of the actual reflection vector, aka what
// angle you have to look at the object to be blinded by the light
vec3 half_direction = normalize(normalize(u_light) + camera_dir);
float specular_term = pow(max(dot(normalize(v_normal), half_direction), 0.0), shininess);
f_color = vec4(ambient_color, 1.0) + vec4(specular_term * specular_color, 1.0) + diffuse_term * diffuse_color;
}
```
With that we should get something like this.
![./blinn-phong.png](./blinn-phong.png)
This is a bit bright for a brick texture though. You can modify the `shininess` value if you want to reduce the brightness. I'm going to leave it as is though. The lighting calculations will change as we get into [Normal Mapping](../tutorial11-normals).
<AutoGithubLink/>

Binary file not shown.

After

Width:  |  Height:  |  Size: 541 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 566 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Loading…
Cancel
Save