pull/95/head
Ben Hansen 4 years ago
parent 786ce2410c
commit 73aed6c6bd

7
Cargo.lock generated

@ -1209,16 +1209,15 @@ dependencies = [
"anyhow 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
"bytemuck 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"framework 0.1.0",
"env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"fs_extra 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.23.7 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
"shaderc 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.22.2 (registry+https://github.com/rust-lang/crates.io-index)",
]

@ -1,39 +1,30 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{WindowBuilder},
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
event_loop.run(move |event, _, control_flow| match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
}
}
} => *control_flow = ControlFlow::Exit,
_ => {}
}
},
_ => {}
}
},
_ => {}
});
}

@ -2,7 +2,7 @@ use std::iter;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
@ -24,21 +24,25 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -64,7 +68,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -74,10 +77,7 @@ impl State {
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::CursorMoved {
position,
..
} => {
WindowEvent::CursorMoved { position, .. } => {
self.clear_color = wgpu::Color {
r: position.x as f64 / self.size.width as f64,
g: position.y as f64 / self.size.height as f64,
@ -90,31 +90,31 @@ impl State {
}
}
fn update(&mut self) {
}
fn update(&mut self) {}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let _render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(self.clear_color),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(self.clear_color),
store: true,
},
}],
depth_stencil_attachment: None,
});
}
@ -126,9 +126,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -140,30 +138,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -2,7 +2,7 @@ use std::iter;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
@ -23,21 +23,25 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -58,7 +62,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -71,36 +74,36 @@ impl State {
false
}
fn update(&mut self) {
}
fn update(&mut self) {}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let _render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
}
@ -113,9 +116,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -127,30 +128,28 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) { // UPDATED!
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
// UPDATED!
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &&mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &&mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,7 +1,7 @@
use glob::glob;
use anyhow::*;
use glob::glob;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -12,7 +12,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -26,33 +27,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -61,14 +65,14 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
Ok(())
}
}

@ -2,7 +2,7 @@ use std::iter;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
@ -26,21 +26,25 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -54,61 +58,54 @@ impl State {
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[],
push_constant_ranges: &[],
}
);
});
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vs_module = device.create_shader_module(wgpu::include_spirv!("challenge.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("challenge.frag.spv"));
let challenge_render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
let challenge_render_pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
@ -119,25 +116,21 @@ impl State {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
@ -146,8 +139,7 @@ impl State {
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
});
let use_color = true;
@ -164,7 +156,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -175,13 +166,12 @@ impl State {
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(
VirtualKeyCode::Space
),
..
},
input:
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
},
..
} => {
self.use_color = *state == ElementState::Released;
@ -191,38 +181,36 @@ impl State {
}
}
fn update(&mut self) {
}
fn update(&mut self) {}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -241,9 +229,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -255,30 +241,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -2,7 +2,7 @@ use std::iter;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
@ -25,21 +25,25 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -53,55 +57,48 @@ impl State {
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[],
push_constant_ranges: &[],
}
);
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
Self {
surface,
@ -114,7 +111,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -127,38 +123,36 @@ impl State {
false
}
fn update(&mut self) {
}
fn update(&mut self) {}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -173,9 +167,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -187,30 +179,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,7 +1,7 @@
use glob::glob;
use anyhow::*;
use glob::glob;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -12,7 +12,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -26,33 +27,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -61,14 +65,14 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
Ok(())
}
}

@ -1,11 +1,11 @@
use std::iter;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use wgpu::util::DeviceExt;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
@ -33,24 +33,35 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float3,
},
]
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0],color: [0.5, 0.0, 0.5] }, // E
Vertex {
position: [-0.0868241, 0.49240386, 0.0],
color: [0.5, 0.0, 0.5],
}, // A
Vertex {
position: [-0.49513406, 0.06958647, 0.0],
color: [0.5, 0.0, 0.5],
}, // B
Vertex {
position: [-0.21918549, -0.44939706, 0.0],
color: [0.5, 0.0, 0.5],
}, // C
Vertex {
position: [0.35966998, -0.3473291, 0.0],
color: [0.5, 0.0, 0.5],
}, // D
Vertex {
position: [0.44147372, 0.2347359, 0.0],
color: [0.5, 0.0, 0.5],
}, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4];
struct State {
surface: wgpu::Surface,
@ -81,21 +92,25 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -109,110 +124,91 @@ impl State {
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[],
push_constant_ranges: &[],
}
);
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let num_indices = INDICES.len() as u32;
let num_vertices = 16;
let angle = std::f32::consts::PI * 2.0 / num_vertices as f32;
let challenge_verts = (0..num_vertices).map(|i| {
let theta = angle * i as f32;
Vertex {
position: [
0.5 * theta.cos(),
-0.5 * theta.sin(),
0.0,
],
color: [
(1.0 + theta.cos()) / 2.0,
(1.0 + theta.sin()) / 2.0,
1.0,
]
}
}).collect::<Vec<_>>();
let challenge_verts = (0..num_vertices)
.map(|i| {
let theta = angle * i as f32;
Vertex {
position: [0.5 * theta.cos(), -0.5 * theta.sin(), 0.0],
color: [(1.0 + theta.cos()) / 2.0, (1.0 + theta.sin()) / 2.0, 1.0],
}
})
.collect::<Vec<_>>();
let num_triangles = num_vertices - 2;
let challenge_indices = (1u16..num_triangles+1).into_iter().flat_map(|i| {
vec![i + 1, i, 0]
}).collect::<Vec<_>>();
let challenge_indices = (1u16..num_triangles + 1)
.into_iter()
.flat_map(|i| vec![i + 1, i, 0])
.collect::<Vec<_>>();
let num_challenge_indices = challenge_indices.len() as u32;
let challenge_vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
let challenge_vertex_buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Challenge Vertex Buffer"),
contents: bytemuck::cast_slice(&challenge_verts),
usage: wgpu::BufferUsage::VERTEX,
}
);
let challenge_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Challenge Index Buffer"),
contents: bytemuck::cast_slice(&challenge_indices),
usage: wgpu::BufferUsage::INDEX,
}
);
});
let challenge_index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Challenge Index Buffer"),
contents: bytemuck::cast_slice(&challenge_indices),
usage: wgpu::BufferUsage::INDEX,
});
let use_complex = false;
@ -234,7 +230,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -245,11 +240,12 @@ impl State {
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
},
input:
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
},
..
} => {
self.use_complex = *state == ElementState::Pressed;
@ -259,38 +255,36 @@ impl State {
}
}
fn update(&mut self) {
}
fn update(&mut self) {}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -300,14 +294,10 @@ impl State {
(
&self.challenge_vertex_buffer,
&self.challenge_index_buffer,
self.num_challenge_indices
self.num_challenge_indices,
)
} else {
(
&self.vertex_buffer,
&self.index_buffer,
self.num_indices,
)
(&self.vertex_buffer, &self.index_buffer, self.num_indices)
};
render_pass.set_vertex_buffer(0, data.0.slice(..));
render_pass.set_index_buffer(data.1.slice(..));
@ -322,9 +312,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -336,30 +324,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,11 +1,11 @@
use std::iter;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use wgpu::util::DeviceExt;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
@ -33,24 +33,35 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float3,
},
]
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], color: [0.5, 0.0, 0.5] }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], color: [0.5, 0.0, 0.5] }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], color: [0.5, 0.0, 0.5] }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], color: [0.5, 0.0, 0.5] }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0],color: [0.5, 0.0, 0.5] }, // E
Vertex {
position: [-0.0868241, 0.49240386, 0.0],
color: [0.5, 0.0, 0.5],
}, // A
Vertex {
position: [-0.49513406, 0.06958647, 0.0],
color: [0.5, 0.0, 0.5],
}, // B
Vertex {
position: [-0.21918549, -0.44939706, 0.0],
color: [0.5, 0.0, 0.5],
}, // C
Vertex {
position: [0.35966998, -0.3473291, 0.0],
color: [0.5, 0.0, 0.5],
}, // D
Vertex {
position: [0.44147372, 0.2347359, 0.0],
color: [0.5, 0.0, 0.5],
}, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4];
struct State {
surface: wgpu::Surface,
@ -74,21 +85,25 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -102,70 +117,59 @@ impl State {
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[],
push_constant_ranges: &[],
}
);
});
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let num_indices = INDICES.len() as u32;
Self {
@ -182,7 +186,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -195,38 +198,36 @@ impl State {
false
}
fn update(&mut self) {
}
fn update(&mut self) {}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -243,9 +244,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -257,30 +256,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,7 +1,7 @@
use glob::glob;
use anyhow::*;
use glob::glob;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -12,7 +12,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -26,33 +27,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -61,14 +65,14 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
Ok(())
}
}

@ -1,11 +1,11 @@
use std::iter;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use wgpu::util::DeviceExt;
mod texture;
@ -36,25 +36,35 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
Vertex {
position: [-0.0868241, 0.49240386, 0.0],
tex_coords: [0.4131759, 0.00759614],
}, // A
Vertex {
position: [-0.49513406, 0.06958647, 0.0],
tex_coords: [0.0048659444, 0.43041354],
}, // B
Vertex {
position: [-0.21918549, -0.44939706, 0.0],
tex_coords: [0.28081453, 0.949397057],
}, // C
Vertex {
position: [0.35966998, -0.3473291, 0.0],
tex_coords: [0.85967, 0.84732911],
}, // D
Vertex {
position: [0.44147372, 0.2347359, 0.0],
tex_coords: [0.9414737, 0.2652641],
}, // E
];
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4];
struct State {
surface: wgpu::Surface,
@ -84,20 +94,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -108,8 +122,8 @@ impl State {
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -124,133 +138,108 @@ impl State {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"happy-tree.png",
).unwrap();
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
}
);
let diffuse_texture =
texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap();
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
});
let cartoon_bytes = include_bytes!("happy-tree-cartoon.png");
let cartoon_texture = texture::Texture::from_bytes(
&device,
&queue,
cartoon_bytes,
"happy-tree-cartoon.png",
).unwrap();
let cartoon_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&cartoon_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&cartoon_texture.sampler),
}
],
label: Some("cartoon_bind_group"),
}
);
let cartoon_texture =
texture::Texture::from_bytes(&device, &queue, cartoon_bytes, "happy-tree-cartoon.png")
.unwrap();
let cartoon_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&cartoon_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&cartoon_texture.sampler),
},
],
label: Some("cartoon_bind_group"),
});
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
}
);
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let num_indices = INDICES.len() as u32;
Self {
@ -272,7 +261,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -283,11 +271,12 @@ impl State {
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
},
input:
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
},
..
} => {
self.is_space_pressed = *state == ElementState::Pressed;
@ -297,38 +286,36 @@ impl State {
}
}
fn update(&mut self) {
}
fn update(&mut self) {}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -352,9 +339,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -366,30 +351,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,11 +1,11 @@
use std::iter;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use wgpu::util::DeviceExt;
mod texture;
@ -36,24 +36,35 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
Vertex {
position: [-0.0868241, 0.49240386, 0.0],
tex_coords: [0.4131759, 0.00759614],
}, // A
Vertex {
position: [-0.49513406, 0.06958647, 0.0],
tex_coords: [0.0048659444, 0.43041354],
}, // B
Vertex {
position: [-0.21918549, -0.44939706, 0.0],
tex_coords: [0.28081453, 0.949397057],
}, // C
Vertex {
position: [0.35966998, -0.3473291, 0.0],
tex_coords: [0.85967, 0.84732911],
}, // D
Vertex {
position: [0.44147372, 0.2347359, 0.0],
tex_coords: [0.9414737, 0.2652641],
}, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4];
struct State {
surface: wgpu::Surface,
@ -80,21 +91,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -106,15 +120,11 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"happy-tree.png"
).unwrap();
let diffuse_texture =
texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap();
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -129,100 +139,84 @@ impl State {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
});
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
}
);
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
}
);
});
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let num_indices = INDICES.len() as u32;
Self {
@ -241,7 +235,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -254,38 +247,36 @@ impl State {
false
}
fn update(&mut self) {
}
fn update(&mut self) {}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -303,9 +294,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -317,30 +306,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,5 +1,5 @@
use image::GenericImageView;
use anyhow::*;
use image::GenericImageView;
pub struct Texture {
pub texture: wgpu::Texture,
@ -11,8 +11,8 @@ impl Texture {
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str
bytes: &[u8],
label: &str,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label))
@ -22,7 +22,7 @@ impl Texture {
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>
label: Option<&str>,
) -> Result<Self> {
let rgba = img.as_rgba8().unwrap();
let dimensions = img.dimensions();
@ -32,17 +32,15 @@ impl Texture {
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(
&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
}
);
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
queue.write_texture(
wgpu::TextureCopyView {
@ -60,18 +58,20 @@ impl Texture {
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}
);
Ok(Self { texture, view, sampler })
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
}
}

@ -1,7 +1,7 @@
use glob::glob;
use anyhow::*;
use glob::glob;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -12,7 +12,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -26,33 +27,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -61,14 +65,14 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
Ok(())
}
}

@ -1,11 +1,11 @@
use std::iter;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use wgpu::util::DeviceExt;
mod texture;
@ -36,24 +36,35 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
Vertex {
position: [-0.0868241, 0.49240386, 0.0],
tex_coords: [0.4131759, 0.00759614],
}, // A
Vertex {
position: [-0.49513406, 0.06958647, 0.0],
tex_coords: [0.0048659444, 0.43041354],
}, // B
Vertex {
position: [-0.21918549, -0.44939706, 0.0],
tex_coords: [0.28081453, 0.949397057],
}, // C
Vertex {
position: [0.35966998, -0.3473291, 0.0],
tex_coords: [0.85967, 0.84732911],
}, // D
Vertex {
position: [0.44147372, 0.2347359, 0.0],
tex_coords: [0.9414737, 0.2652641],
}, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4];
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -88,17 +99,18 @@ struct UniformStaging {
impl UniformStaging {
fn new(camera: Camera) -> Self {
Self { camera, model_rotation: cgmath::Deg(0.0) }
Self {
camera,
model_rotation: cgmath::Deg(0.0),
}
}
fn update_uniforms(&self, uniforms: &mut Uniforms) {
uniforms.model_view_proj =
OPENGL_TO_WGPU_MATRIX
uniforms.model_view_proj = OPENGL_TO_WGPU_MATRIX
* self.camera.build_view_projection_matrix()
* cgmath::Matrix4::from_angle_z(self.model_rotation);
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct Uniforms {
@ -143,11 +155,12 @@ impl CameraController {
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
input:
KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
@ -205,8 +218,8 @@ impl CameraController {
let forward_mag = forward.magnitude();
if self.is_right_pressed {
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// lies on the circle made by the target and eye.
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
}
@ -245,20 +258,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -270,15 +287,11 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"happy-tree.png"
).unwrap();
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let diffuse_texture =
texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap();
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -293,32 +306,27 @@ impl State {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
}
);
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 1.0, 2.0).into(),
@ -335,17 +343,15 @@ impl State {
let uniform_staging = UniformStaging::new(camera);
uniform_staging.update_uniforms(&mut uniforms);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
@ -353,92 +359,75 @@ impl State {
min_binding_size: None,
},
count: None,
}
],
label: Some("uniform_bind_group_layout"),
});
}],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
}
],
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
}],
label: Some("uniform_bind_group"),
});
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
push_constant_ranges: &[],
}
);
});
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let num_indices = INDICES.len() as u32;
Self {
@ -462,7 +451,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -477,40 +465,45 @@ impl State {
}
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.uniform_staging.camera);
self.camera_controller
.update_camera(&mut self.uniform_staging.camera);
self.uniform_staging.model_rotation += cgmath::Deg(2.0);
self.uniform_staging.update_uniforms(&mut self.uniforms);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -529,9 +522,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -543,30 +534,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,11 +1,11 @@
use std::iter;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use wgpu::util::DeviceExt;
mod texture;
@ -36,24 +36,35 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
Vertex {
position: [-0.0868241, 0.49240386, 0.0],
tex_coords: [0.4131759, 0.00759614],
}, // A
Vertex {
position: [-0.49513406, 0.06958647, 0.0],
tex_coords: [0.0048659444, 0.43041354],
}, // B
Vertex {
position: [-0.21918549, -0.44939706, 0.0],
tex_coords: [0.28081453, 0.949397057],
}, // C
Vertex {
position: [0.35966998, -0.3473291, 0.0],
tex_coords: [0.85967, 0.84732911],
}, // D
Vertex {
position: [0.44147372, 0.2347359, 0.0],
tex_coords: [0.9414737, 0.2652641],
}, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4];
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -129,11 +140,12 @@ impl CameraController {
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
input:
KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
@ -191,8 +203,8 @@ impl CameraController {
let forward_mag = forward.magnitude();
if self.is_right_pressed {
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// lies on the circle made by the target and eye.
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
}
@ -232,20 +244,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -257,15 +273,11 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"happy-tree.png"
).unwrap();
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let diffuse_texture =
texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap();
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -280,32 +292,27 @@ impl State {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
}
);
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 1.0, 2.0).into(),
@ -321,17 +328,15 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
@ -339,92 +344,75 @@ impl State {
min_binding_size: None,
},
count: None,
}
],
label: Some("uniform_bind_group_layout"),
});
}],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
}
],
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
}],
label: Some("uniform_bind_group"),
});
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
push_constant_ranges: &[],
}
);
});
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let num_indices = INDICES.len() as u32;
Self {
@ -448,7 +436,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -465,37 +452,41 @@ impl State {
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -514,9 +505,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -528,30 +517,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,5 +1,5 @@
use image::GenericImageView;
use anyhow::*;
use image::GenericImageView;
pub struct Texture {
pub texture: wgpu::Texture,
@ -11,8 +11,8 @@ impl Texture {
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str
bytes: &[u8],
label: &str,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label))
@ -22,7 +22,7 @@ impl Texture {
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>
label: Option<&str>,
) -> Result<Self> {
let rgba = img.as_rgba8().unwrap();
let dimensions = img.dimensions();
@ -32,17 +32,15 @@ impl Texture {
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(
&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
}
);
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
queue.write_texture(
wgpu::TextureCopyView {
@ -60,18 +58,20 @@ impl Texture {
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}
);
Ok(Self { texture, view, sampler })
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
}
}

@ -1,7 +1,7 @@
use glob::glob;
use anyhow::*;
use glob::glob;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -12,7 +12,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -26,33 +27,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -61,14 +65,14 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
Ok(())
}
}

@ -1,12 +1,12 @@
use std::iter;
use cgmath::prelude::*;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use wgpu::util::DeviceExt;
mod texture;
@ -37,24 +37,35 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
Vertex {
position: [-0.0868241, -0.49240386, 0.0],
tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614],
}, // A
Vertex {
position: [-0.49513406, -0.06958647, 0.0],
tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354],
}, // B
Vertex {
position: [-0.21918549, 0.44939706, 0.0],
tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057],
}, // C
Vertex {
position: [0.35966998, 0.3473291, 0.0],
tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911],
}, // D
Vertex {
position: [0.44147372, -0.2347359, 0.0],
tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641],
}, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4];
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -65,8 +76,11 @@ pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(
NUM_INSTANCES_PER_ROW as f32 * 0.5,
0.0,
NUM_INSTANCES_PER_ROW as f32 * 0.5,
);
struct Camera {
eye: cgmath::Point3<f32>,
@ -133,11 +147,12 @@ impl CameraController {
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
input:
KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
@ -194,8 +209,8 @@ impl CameraController {
let forward_mag = forward.magnitude();
if self.is_right_pressed {
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// lies on the circle made by the target and eye.
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
}
@ -214,10 +229,9 @@ struct Instance {
impl Instance {
fn to_raw(&self) -> InstanceRaw {
let transform = cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation);
InstanceRaw {
transform,
}
let transform =
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation);
InstanceRaw { transform }
}
}
@ -254,33 +268,31 @@ struct State {
}
fn quat_mul(q: cgmath::Quaternion<f32>, r: cgmath::Quaternion<f32>) -> cgmath::Quaternion<f32> {
// This block uses quaternions of the form of
// This block uses quaternions of the form of
// q=q0+iq1+jq2+kq3
// q=q0+iq1+jq2+kq3
// and
// and
// r=r0+ir1+jr2+kr3.
// r=r0+ir1+jr2+kr3.
// The quaternion product has the form of
// The quaternion product has the form of
// t=q×r=t0+it1+jt2+kt3,
// t=q×r=t0+it1+jt2+kt3,
// where
// where
// t0=(r0 q0 r1 q1 r2 q2 r3 q3)
// t1=(r0 q1 + r1 q0 r2 q3 + r3 q2)
// t2=(r0 q2 + r1 q3 + r2 q0 r3 q1)
// t3=(r0 q3 r1 q2 + r2 q1 + r3 q0
// t0=(r0 q0 r1 q1 r2 q2 r3 q3)
// t1=(r0 q1 + r1 q0 r2 q3 + r3 q2)
// t2=(r0 q2 + r1 q3 + r2 q0 r3 q1)
// t3=(r0 q3 r1 q2 + r2 q1 + r3 q0
let w = r.s * q.s - r.v.x * q.v.x - r.v.y * q.v.y - r.v.z * q.v.z;
let xi = r.s * q.v.x + r.v.x * q.s - r.v.y * q.v.z + r.v.z * q.v.y;
let yj = r.s * q.v.y + r.v.x * q.v.z + r.v.y * q.s - r.v.z * q.v.x;
let zk = r.s * q.v.z - r.v.x * q.v.y + r.v.y * q.v.x + r.v.z * q.s;
let w = r.s * q.s - r.v.x * q.v.x - r.v.y * q.v.y - r.v.z * q.v.z;
let xi = r.s * q.v.x + r.v.x * q.s - r.v.y * q.v.z + r.v.z * q.v.y;
let yj = r.s * q.v.y + r.v.x * q.v.z + r.v.y * q.s - r.v.z * q.v.x;
let zk = r.s * q.v.z - r.v.x * q.v.y + r.v.y * q.v.x + r.v.z * q.s;
cgmath::Quaternion::new(w, xi, yj, zk)
cgmath::Quaternion::new(w, xi, yj, zk)
}
impl State {
@ -291,20 +303,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -316,15 +332,11 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"happy-tree.png"
).unwrap();
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let diffuse_texture =
texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap();
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -339,32 +351,27 @@ impl State {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
}
);
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
@ -380,80 +387,87 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_y(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
Instance {
position, rotation,
}
let instances = (0..NUM_INSTANCES_PER_ROW)
.flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 {
x: x as f32,
y: 0.0,
z: z as f32,
} - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(
cgmath::Vector3::unit_y(),
cgmath::Deg(0.0),
)
} else {
cgmath::Quaternion::from_axis_angle(
position.clone().normalize(),
cgmath::Deg(45.0),
)
};
Instance { position, rotation }
})
})
}).collect::<Vec<_>>();
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
}
);
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
count: None,
},
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
},
count: None,
},
count: None,
},
],
label: Some("uniform_bind_group_layout"),
});
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
// NEW!
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..)),
},
],
label: Some("uniform_bind_group"),
@ -462,73 +476,59 @@ impl State {
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
push_constant_ranges: &[],
}
);
});
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let num_indices = INDICES.len() as u32;
Self {
@ -554,7 +554,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -571,45 +570,57 @@ impl State {
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
for instance in &mut self.instances {
let amount = cgmath::Quaternion::from_angle_y(cgmath::Rad(ROTATION_SPEED));
let current = instance.rotation;
instance.rotation = quat_mul(amount, current);
}
let instance_data = self.instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
self.queue.write_buffer(&self.instance_buffer, 0, bytemuck::cast_slice(&instance_data));
let instance_data = self
.instances
.iter()
.map(Instance::to_raw)
.collect::<Vec<_>>();
self.queue.write_buffer(
&self.instance_buffer,
0,
bytemuck::cast_slice(&instance_data),
);
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -628,9 +639,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -642,30 +651,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,17 +1,21 @@
use std::iter;
use cgmath::prelude::*;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use wgpu::util::DeviceExt;
mod texture;
const NUM_INSTANCES_PER_ROW: u32 = 10;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(
NUM_INSTANCES_PER_ROW as f32 * 0.5,
0.0,
NUM_INSTANCES_PER_ROW as f32 * 0.5,
);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
@ -40,24 +44,35 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
Vertex {
position: [-0.0868241, 0.49240386, 0.0],
tex_coords: [0.4131759, 0.00759614],
}, // A
Vertex {
position: [-0.49513406, 0.06958647, 0.0],
tex_coords: [0.0048659444, 0.43041354],
}, // B
Vertex {
position: [-0.21918549, -0.44939706, 0.0],
tex_coords: [0.28081453, 0.949397057],
}, // C
Vertex {
position: [0.35966998, -0.3473291, 0.0],
tex_coords: [0.85967, 0.84732911],
}, // D
Vertex {
position: [0.44147372, 0.2347359, 0.0],
tex_coords: [0.9414737, 0.2652641],
}, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4];
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -132,11 +147,12 @@ impl CameraController {
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
input:
KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
@ -203,7 +219,8 @@ struct Instance {
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
model: cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation),
}
}
}
@ -251,20 +268,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -276,15 +297,11 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"happy-tree.png"
).unwrap();
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let diffuse_texture =
texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap();
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -299,32 +316,27 @@ impl State {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
}
);
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 5.0, 10.0).into(),
@ -340,80 +352,87 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not created correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
Instance {
position, rotation,
}
let instances = (0..NUM_INSTANCES_PER_ROW)
.flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 {
x: x as f32,
y: 0.0,
z: z as f32,
} - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not created correctly
cgmath::Quaternion::from_axis_angle(
cgmath::Vector3::unit_z(),
cgmath::Deg(0.0),
)
} else {
cgmath::Quaternion::from_axis_angle(
position.clone().normalize(),
cgmath::Deg(45.0),
)
};
Instance { position, rotation }
})
})
}).collect::<Vec<_>>();
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
}
);
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
count: None,
},
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
},
count: None,
},
count: None,
},
],
label: Some("uniform_bind_group_layout"),
});
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
// NEW!
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..)),
},
],
label: Some("uniform_bind_group"),
@ -422,73 +441,59 @@ impl State {
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
push_constant_ranges: &[],
}
);
});
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let num_indices = INDICES.len() as u32;
Self {
@ -515,7 +520,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -532,37 +536,41 @@ impl State {
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -582,9 +590,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -596,30 +602,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,5 +1,5 @@
use image::GenericImageView;
use anyhow::*;
use image::GenericImageView;
pub struct Texture {
pub texture: wgpu::Texture,
@ -11,8 +11,8 @@ impl Texture {
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str
bytes: &[u8],
label: &str,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label))
@ -22,7 +22,7 @@ impl Texture {
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>
label: Option<&str>,
) -> Result<Self> {
let rgba = img.as_rgba8().unwrap();
let dimensions = img.dimensions();
@ -32,17 +32,15 @@ impl Texture {
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(
&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
}
);
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
queue.write_texture(
wgpu::TextureCopyView {
@ -60,18 +58,20 @@ impl Texture {
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}
);
Ok(Self { texture, view, sampler })
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
}
}

@ -1,7 +1,7 @@
use glob::glob;
use anyhow::*;
use glob::glob;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -12,7 +12,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -26,33 +27,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -61,14 +65,14 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
Ok(())
}
}

@ -1,12 +1,12 @@
use std::iter;
use cgmath::prelude::*;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
use wgpu::util::DeviceExt;
mod texture;
@ -37,36 +37,56 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
Vertex {
position: [-0.0868241, -0.49240386, 0.0],
tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614],
}, // A
Vertex {
position: [-0.49513406, -0.06958647, 0.0],
tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354],
}, // B
Vertex {
position: [-0.21918549, 0.44939706, 0.0],
tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057],
}, // C
Vertex {
position: [0.35966998, 0.3473291, 0.0],
tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911],
}, // D
Vertex {
position: [0.44147372, -0.2347359, 0.0],
tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641],
}, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4];
const DEPTH_VERTICES: &[Vertex] = &[
Vertex { position: [0.0, 0.0, 0.0], tex_coords: [0.0, 1.0]},
Vertex { position: [1.0, 0.0, 0.0], tex_coords: [1.0, 1.0]},
Vertex { position: [1.0, 1.0, 0.0], tex_coords: [1.0, 0.0]},
Vertex { position: [0.0, 1.0, 0.0], tex_coords: [0.0, 0.0]},
Vertex {
position: [0.0, 0.0, 0.0],
tex_coords: [0.0, 1.0],
},
Vertex {
position: [1.0, 0.0, 0.0],
tex_coords: [1.0, 1.0],
},
Vertex {
position: [1.0, 1.0, 0.0],
tex_coords: [1.0, 0.0],
},
Vertex {
position: [0.0, 1.0, 0.0],
tex_coords: [0.0, 0.0],
},
];
const DEPTH_INDICES: &[u16] = &[
0, 1, 2,
0, 2, 3,
];
const DEPTH_INDICES: &[u16] = &[0, 1, 2, 0, 2, 3];
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -79,8 +99,11 @@ pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
const NUM_INSTANCES_PER_ROW: u32 = 10;
#[allow(dead_code)]
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(
NUM_INSTANCES_PER_ROW as f32 * 0.5,
0.0,
NUM_INSTANCES_PER_ROW as f32 * 0.5,
);
struct Camera {
eye: cgmath::Point3<f32>,
@ -147,11 +170,12 @@ impl CameraController {
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
input:
KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
@ -208,8 +232,8 @@ impl CameraController {
let forward_mag = forward.magnitude();
if self.is_right_pressed {
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// lies on the circle made by the target and eye.
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
}
@ -227,7 +251,8 @@ struct Instance {
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
model: cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation),
}
}
}
@ -255,31 +280,27 @@ impl DepthPass {
fn new(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> Self {
let texture = texture::Texture::create_depth_texture(device, sc_desc, "depth_texture");
let layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
label: Some("Depth Pass Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
count: None,
ty: wgpu::BindingType::SampledTexture {
component_type: wgpu::TextureComponentType::Float,
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
visibility: wgpu::ShaderStage::FRAGMENT,
let layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Depth Pass Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
count: None,
ty: wgpu::BindingType::SampledTexture {
component_type: wgpu::TextureComponentType::Float,
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
count: None,
ty: wgpu::BindingType::Sampler {
comparison: true,
},
visibility: wgpu::ShaderStage::FRAGMENT,
}
]
}
);
visibility: wgpu::ShaderStage::FRAGMENT,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
count: None,
ty: wgpu::BindingType::Sampler { comparison: true },
visibility: wgpu::ShaderStage::FRAGMENT,
},
],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &layout,
@ -291,33 +312,27 @@ impl DepthPass {
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
},
],
label: Some("depth_pass.bind_group"),
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Depth Pass VB"),
contents: bytemuck::cast_slice(DEPTH_VERTICES),
usage: wgpu::BufferUsage::VERTEX
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Depth Pass IB"),
contents: bytemuck::cast_slice(DEPTH_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Depth Pass VB"),
contents: bytemuck::cast_slice(DEPTH_VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Depth Pass IB"),
contents: bytemuck::cast_slice(DEPTH_INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
label: Some("Depth Pass Pipeline Layout"),
bind_group_layouts: &[&layout],
push_constant_ranges: &[],
}
);
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Depth Pass Pipeline Layout"),
bind_group_layouts: &[&layout],
push_constant_ranges: &[],
});
let vs_module = device.create_shader_module(wgpu::include_spirv!("challenge.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("challenge.frag.spv"));
@ -342,20 +357,16 @@ impl DepthPass {
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
@ -363,10 +374,10 @@ impl DepthPass {
});
Self {
texture,
texture,
layout,
bind_group,
vertex_buffer,
vertex_buffer,
index_buffer,
num_depth_indices: DEPTH_INDICES.len() as u32,
render_pipeline,
@ -385,7 +396,7 @@ impl DepthPass {
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.texture.sampler),
}
},
],
label: Some("depth_pass.bind_group"),
});
@ -393,16 +404,14 @@ impl DepthPass {
fn render(&self, frame: &wgpu::SwapChainTexture, encoder: &mut wgpu::CommandEncoder) {
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: true,
},
}],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
@ -446,20 +455,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -471,15 +484,11 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"happy-tree.png"
).unwrap();
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let diffuse_texture =
texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap();
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -494,32 +503,27 @@ impl State {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
}
);
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
@ -535,80 +539,87 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
Instance {
position, rotation,
}
let instances = (0..NUM_INSTANCES_PER_ROW)
.flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 {
x: x as f32,
y: 0.0,
z: z as f32,
} - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(
cgmath::Vector3::unit_z(),
cgmath::Deg(0.0),
)
} else {
cgmath::Quaternion::from_axis_angle(
position.clone().normalize(),
cgmath::Deg(45.0),
)
};
Instance { position, rotation }
})
})
}).collect::<Vec<_>>();
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
}
);
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
count: None,
},
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
},
count: None,
},
count: None,
},
],
label: Some("uniform_bind_group_layout"),
});
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
// NEW!
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..)),
},
],
label: Some("uniform_bind_group"),
@ -617,78 +628,64 @@ impl State {
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
push_constant_ranges: &[],
}
);
});
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 2, // corresponds to bilinear filtering
depth_bias_slope_scale: 2.0,
depth_bias_clamp: 0.0,
clamp_depth: device.features().contains(wgpu::Features::DEPTH_CLAMPING),
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: texture::Texture::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilStateDescriptor::default(),
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 2, // corresponds to bilinear filtering
depth_bias_slope_scale: 2.0,
depth_bias_clamp: 0.0,
clamp_depth: device.features().contains(wgpu::Features::DEPTH_CLAMPING),
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: texture::Texture::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilStateDescriptor::default(),
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let num_indices = INDICES.len() as u32;
let depth_pass = DepthPass::new(&device, &sc_desc);
@ -717,7 +714,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -736,37 +732,41 @@ impl State {
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_pass.texture.view,
depth_ops: Some(wgpu::Operations {
@ -794,9 +794,7 @@ impl State {
fn main() {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -808,30 +806,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,12 +1,12 @@
use std::iter;
use cgmath::prelude::*;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use wgpu::util::DeviceExt;
mod texture;
@ -37,24 +37,35 @@ impl Vertex {
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
Vertex {
position: [-0.0868241, -0.49240386, 0.0],
tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614],
}, // A
Vertex {
position: [-0.49513406, -0.06958647, 0.0],
tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354],
}, // B
Vertex {
position: [-0.21918549, 0.44939706, 0.0],
tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057],
}, // C
Vertex {
position: [0.35966998, 0.3473291, 0.0],
tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911],
}, // D
Vertex {
position: [0.44147372, -0.2347359, 0.0],
tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641],
}, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4];
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -65,8 +76,11 @@ pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(
NUM_INSTANCES_PER_ROW as f32 * 0.5,
0.0,
NUM_INSTANCES_PER_ROW as f32 * 0.5,
);
struct Camera {
eye: cgmath::Point3<f32>,
@ -133,11 +147,12 @@ impl CameraController {
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
input:
KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
@ -194,8 +209,8 @@ impl CameraController {
let forward_mag = forward.magnitude();
if self.is_right_pressed {
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// lies on the circle made by the target and eye.
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
}
@ -213,7 +228,8 @@ struct Instance {
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
model: cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation),
}
}
}
@ -261,20 +277,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -286,15 +306,11 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"happy-tree.png"
).unwrap();
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let diffuse_texture =
texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap();
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -309,32 +325,27 @@ impl State {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
}
);
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
@ -350,80 +361,87 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not created correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
Instance {
position, rotation,
}
let instances = (0..NUM_INSTANCES_PER_ROW)
.flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 {
x: x as f32,
y: 0.0,
z: z as f32,
} - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not created correctly
cgmath::Quaternion::from_axis_angle(
cgmath::Vector3::unit_z(),
cgmath::Deg(0.0),
)
} else {
cgmath::Quaternion::from_axis_angle(
position.clone().normalize(),
cgmath::Deg(45.0),
)
};
Instance { position, rotation }
})
})
}).collect::<Vec<_>>();
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
}
);
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
count: None,
},
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
},
count: None,
},
count: None,
},
],
label: Some("uniform_bind_group_layout"),
});
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
// NEW!
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..)),
},
],
label: Some("uniform_bind_group"),
@ -432,80 +450,67 @@ impl State {
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let depth_texture = texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let depth_texture =
texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
push_constant_ranges: &[],
}
);
});
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: texture::Texture::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilStateDescriptor::default(),
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: texture::Texture::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilStateDescriptor::default(),
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
});
let num_indices = INDICES.len() as u32;
Self {
@ -532,7 +537,6 @@ impl State {
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
@ -540,7 +544,8 @@ impl State {
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
// NEW!
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
self.depth_texture =
texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
}
fn input(&mut self, event: &WindowEvent) -> bool {
@ -550,37 +555,41 @@ impl State {
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture.view,
depth_ops: Some(wgpu::Operations {
@ -605,9 +614,7 @@ impl State {
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let window = WindowBuilder::new().build(&event_loop).unwrap();
use futures::executor::block_on;
@ -619,30 +626,27 @@ fn main() {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
state.resize(**new_inner_size);
}
_ => {}
}
}
Event::RedrawRequested(_) => {

@ -1,7 +1,6 @@
use anyhow::*;
use image::GenericImageView;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
@ -11,7 +10,11 @@ pub struct Texture {
impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, label: &str) -> Self {
pub fn create_depth_texture(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
label: &str,
) -> Self {
let size = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
@ -24,34 +27,35 @@ impl Texture {
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT | wgpu::TextureUsage::SAMPLED,
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
}
);
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
});
Self { texture, view, sampler }
Self {
texture,
view,
sampler,
}
}
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str
bytes: &[u8],
label: &str,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label))
@ -61,7 +65,7 @@ impl Texture {
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>
label: Option<&str>,
) -> Result<Self> {
let rgba = img.as_rgba8().unwrap();
let dimensions = img.dimensions();
@ -71,17 +75,15 @@ impl Texture {
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(
&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
}
);
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
queue.write_texture(
wgpu::TextureCopyView {
@ -99,18 +101,20 @@ impl Texture {
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}
);
Ok(Self { texture, view, sampler })
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
}
}

@ -4,7 +4,7 @@ use fs_extra::dir::CopyOptions;
use glob::glob;
use std::env;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -15,7 +15,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -29,34 +30,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -65,11 +68,11 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
@ -85,4 +88,4 @@ fn main() -> Result<()> {
copy_items(&paths_to_copy, out_dir, &copy_options)?;
Ok(())
}
}

@ -1,13 +1,12 @@
use std::iter;
use cgmath::prelude::*;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window},
event_loop::{ControlFlow, EventLoop},
window::Window,
};
use wgpu::util::DeviceExt;
mod model;
mod texture;
@ -149,8 +148,8 @@ impl CameraController {
let forward_mag = forward.magnitude();
if self.is_right_pressed {
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// lies on the circle made by the target and eye.
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
}
@ -168,7 +167,8 @@ struct Instance {
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
model: cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation),
}
}
}
@ -210,20 +210,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -235,8 +239,8 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -251,15 +255,12 @@ impl State {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
@ -275,13 +276,11 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW)
@ -310,53 +309,52 @@ impl State {
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
}
);
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
count: None,
},
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
},
count: None,
},
count: None,
},
],
label: Some("uniform_bind_group_layout"),
});
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
// NEW!
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..)),
},
],
label: Some("uniform_bind_group"),
@ -368,70 +366,62 @@ impl State {
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
).unwrap();
)
.unwrap();
let vs_module = device.create_shader_module(wgpu::include_spirv!("shader.vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("shader.frag.spv"));
let depth_texture = texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let depth_texture =
texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let render_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
push_constant_ranges: &[],
}
);
});
let render_pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(
wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}
),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: texture::Texture::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilStateDescriptor::default(),
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: &[model::ModelVertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}
);
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: Some(wgpu::DepthStencilStateDescriptor {
format: texture::Texture::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilStateDescriptor::default(),
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: &[model::ModelVertex::desc()],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
Self {
surface,
@ -459,7 +449,8 @@ impl State {
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
self.depth_texture =
texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
@ -468,37 +459,41 @@ impl State {
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
store: true,
}
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture.view,
depth_ops: Some(wgpu::Operations {

@ -75,13 +75,13 @@ impl Model {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref(), true)?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent()
.context("Directory has no parent")?;
let containing_folder = path.as_ref().parent().context("Directory has no parent")?;
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let diffuse_texture = texture::Texture::load(device, queue, containing_folder.join(diffuse_path))?;
let diffuse_texture =
texture::Texture::load(device, queue, containing_folder.join(diffuse_path))?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
@ -124,20 +124,16 @@ impl Model {
});
}
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
});
meshes.push(Mesh {
name: m.name,

@ -2,7 +2,6 @@ use anyhow::*;
use image::GenericImageView;
use std::path::Path;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
@ -20,12 +19,16 @@ impl Texture {
// Needed to appease the borrow checker
let path_copy = path.as_ref().to_path_buf();
let label = path_copy.to_str();
let img = image::open(path)?;
Self::from_image(device, queue, &img, label)
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, label: &str) -> Self {
pub fn create_depth_texture(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
label: &str,
) -> Self {
let size = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
@ -38,35 +41,36 @@ impl Texture {
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT | wgpu::TextureUsage::SAMPLED,
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
}
);
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
});
Self { texture, view, sampler }
Self {
texture,
view,
sampler,
}
}
#[allow(dead_code)]
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str
bytes: &[u8],
label: &str,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label))
@ -76,7 +80,7 @@ impl Texture {
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>
label: Option<&str>,
) -> Result<Self> {
let dimensions = img.dimensions();
let rgba = img.to_rgba();
@ -86,17 +90,15 @@ impl Texture {
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(
&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
}
);
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
queue.write_texture(
wgpu::TextureCopyView {
@ -114,18 +116,20 @@ impl Texture {
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}
);
Ok(Self { texture, view, sampler })
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
}

@ -4,7 +4,7 @@ use fs_extra::dir::CopyOptions;
use glob::glob;
use std::env;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -15,7 +15,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -29,34 +30,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -65,11 +68,11 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
@ -85,4 +88,4 @@ fn main() -> Result<()> {
copy_items(&paths_to_copy, out_dir, &copy_options)?;
Ok(())
}
}

@ -1,12 +1,12 @@
use std::iter;
use cgmath::prelude::*;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window},
event_loop::{ControlFlow, EventLoop},
window::Window,
};
use wgpu::util::DeviceExt;
mod model;
mod texture;
@ -156,8 +156,8 @@ impl CameraController {
let forward_mag = forward.magnitude();
if self.is_right_pressed {
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// lies on the circle made by the target and eye.
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
}
@ -175,7 +175,8 @@ struct Instance {
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
model: cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation),
}
}
}
@ -287,20 +288,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -312,8 +317,8 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -328,15 +333,12 @@ impl State {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
@ -353,13 +355,11 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW)
@ -388,53 +388,52 @@ impl State {
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
}
);
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
count: None,
},
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
},
count: None,
},
count: None,
},
],
label: Some("uniform_bind_group_layout"),
});
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
// NEW!
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..)),
},
],
label: Some("uniform_bind_group"),
@ -446,8 +445,8 @@ impl State {
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
).unwrap();
)
.unwrap();
let light = Light {
position: (2.0, 2.0, 2.0).into(),
@ -455,20 +454,18 @@ impl State {
color: (1.0, 1.0, 1.0).into(),
};
let light_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Light VB"),
contents: bytemuck::cast_slice(&[light]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Light VB"),
contents: bytemuck::cast_slice(&[light]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let light_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
@ -486,7 +483,8 @@ impl State {
label: None,
});
let depth_texture = texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let depth_texture =
texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
@ -510,16 +508,11 @@ impl State {
);
let light_render_pipeline = {
let layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
label: Some("Light Pipeline Layout"),
bind_group_layouts: &[
&uniform_bind_group_layout,
&light_bind_group_layout
],
push_constant_ranges: &[],
}
);
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Light Pipeline Layout"),
bind_group_layouts: &[&uniform_bind_group_layout, &light_bind_group_layout],
push_constant_ranges: &[],
});
create_render_pipeline(
&device,
@ -554,7 +547,6 @@ impl State {
light_bind_group,
light_render_pipeline,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
@ -563,7 +555,8 @@ impl State {
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
self.depth_texture =
texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
}
fn input(&mut self, event: &WindowEvent) -> bool {
@ -573,24 +566,33 @@ impl State {
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
// Update the light
let old_position = self.light.position;
self.light.position =
cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0))
* old_position;
self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
self.queue
.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
@ -598,16 +600,14 @@ impl State {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture.view,
@ -637,7 +637,6 @@ impl State {
}
}
fn main() {
env_logger::init();
let event_loop = EventLoop::new();

@ -75,13 +75,13 @@ impl Model {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref(), true)?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent()
.context("Directory has no parent")?;
let containing_folder = path.as_ref().parent().context("Directory has no parent")?;
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let diffuse_texture = texture::Texture::load(device, queue, containing_folder.join(diffuse_path))?;
let diffuse_texture =
texture::Texture::load(device, queue, containing_folder.join(diffuse_path))?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
@ -124,20 +124,16 @@ impl Model {
});
}
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
});
meshes.push(Mesh {
name: m.name,

@ -2,7 +2,6 @@ use anyhow::*;
use image::GenericImageView;
use std::path::Path;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
@ -20,12 +19,16 @@ impl Texture {
// Needed to appease the borrow checker
let path_copy = path.as_ref().to_path_buf();
let label = path_copy.to_str();
let img = image::open(path)?;
Self::from_image(device, queue, &img, label)
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, label: &str) -> Self {
pub fn create_depth_texture(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
label: &str,
) -> Self {
let size = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
@ -38,35 +41,36 @@ impl Texture {
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT | wgpu::TextureUsage::SAMPLED,
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
}
);
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
});
Self { texture, view, sampler }
Self {
texture,
view,
sampler,
}
}
#[allow(dead_code)]
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str
bytes: &[u8],
label: &str,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label))
@ -76,7 +80,7 @@ impl Texture {
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>
label: Option<&str>,
) -> Result<Self> {
let dimensions = img.dimensions();
let rgba = img.to_rgba();
@ -86,17 +90,15 @@ impl Texture {
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(
&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
}
);
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
queue.write_texture(
wgpu::TextureCopyView {
@ -114,18 +116,20 @@ impl Texture {
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}
);
Ok(Self { texture, view, sampler })
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
}

@ -4,7 +4,7 @@ use fs_extra::dir::CopyOptions;
use glob::glob;
use std::env;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -15,7 +15,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -29,34 +30,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -65,11 +68,11 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
@ -85,4 +88,4 @@ fn main() -> Result<()> {
copy_items(&paths_to_copy, out_dir, &copy_options)?;
Ok(())
}
}

@ -1,12 +1,12 @@
use std::iter;
use cgmath::prelude::*;
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window},
event_loop::{ControlFlow, EventLoop},
window::Window,
};
use wgpu::util::DeviceExt;
mod model;
mod texture;
@ -41,7 +41,6 @@ impl Camera {
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
@ -154,8 +153,8 @@ impl CameraController {
let forward_mag = forward.magnitude();
if self.is_right_pressed {
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// lies on the circle made by the target and eye.
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
}
@ -173,7 +172,8 @@ struct Instance {
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
model: cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation),
}
}
}
@ -287,20 +287,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -312,8 +316,8 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -350,8 +354,7 @@ impl State {
},
],
label: Some("texture_bind_group_layout"),
}
);
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
@ -368,13 +371,11 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW)
@ -403,53 +404,52 @@ impl State {
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
}
);
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
count: None,
},
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
},
count: None,
},
count: None,
},
],
label: Some("uniform_bind_group_layout"),
});
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
// NEW!
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..)),
},
],
label: Some("uniform_bind_group"),
@ -461,7 +461,8 @@ impl State {
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
).unwrap();
)
.unwrap();
let light = Light {
position: (2.0, 2.0, 2.0).into(),
@ -469,20 +470,18 @@ impl State {
color: (1.0, 1.0, 1.0).into(),
};
let light_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Light VB"),
contents: bytemuck::cast_slice(&[light]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Light VB"),
contents: bytemuck::cast_slice(&[light]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let light_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
@ -500,7 +499,8 @@ impl State {
label: None,
});
let depth_texture = texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let depth_texture =
texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
@ -524,16 +524,11 @@ impl State {
);
let light_render_pipeline = {
let layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
label: Some("Light Pipeline Layout"),
bind_group_layouts: &[
&uniform_bind_group_layout,
&light_bind_group_layout
],
push_constant_ranges: &[],
}
);
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Light Pipeline Layout"),
bind_group_layouts: &[&uniform_bind_group_layout, &light_bind_group_layout],
push_constant_ranges: &[],
});
create_render_pipeline(
&device,
@ -550,10 +545,30 @@ impl State {
let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png");
let normal_bytes = include_bytes!("../res/cobble-normal.png");
let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "res/alt-diffuse.png", false).unwrap();
let normal_texture = texture::Texture::from_bytes(&device, &queue, normal_bytes, "res/alt-normal.png", true).unwrap();
model::Material::new(&device, "alt-material", diffuse_texture, normal_texture, &texture_bind_group_layout)
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"res/alt-diffuse.png",
false,
)
.unwrap();
let normal_texture = texture::Texture::from_bytes(
&device,
&queue,
normal_bytes,
"res/alt-normal.png",
true,
)
.unwrap();
model::Material::new(
&device,
"alt-material",
diffuse_texture,
normal_texture,
&texture_bind_group_layout,
)
};
Self {
@ -580,7 +595,6 @@ impl State {
#[allow(dead_code)]
debug_material,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
@ -589,7 +603,8 @@ impl State {
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
self.depth_texture =
texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
}
fn input(&mut self, event: &WindowEvent) -> bool {
@ -599,24 +614,33 @@ impl State {
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
// Update the light
let old_position = self.light.position;
self.light.position =
cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0))
* old_position;
self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
self.queue
.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
@ -624,16 +648,14 @@ impl State {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture.view,
@ -644,7 +666,7 @@ impl State {
stencil_ops: None,
}),
});
render_pass.set_pipeline(&self.light_render_pipeline);
render_pass.draw_light_model(
&self.obj_model,
@ -664,7 +686,6 @@ impl State {
}
}
fn main() {
env_logger::init();
let event_loop = EventLoop::new();

@ -69,8 +69,8 @@ pub struct Material {
impl Material {
pub fn new(
device: &wgpu::Device,
name: &str,
device: &wgpu::Device,
name: &str,
diffuse_texture: texture::Texture,
normal_texture: texture::Texture,
layout: &wgpu::BindGroupLayout,
@ -98,7 +98,7 @@ impl Material {
label: Some(name),
});
Self {
Self {
name: String::from(name),
diffuse_texture,
normal_texture,
@ -130,16 +130,17 @@ impl Model {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref(), true)?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent()
.context("Directory has no parent")?;
let containing_folder = path.as_ref().parent().context("Directory has no parent")?;
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let diffuse_texture = texture::Texture::load(device, queue, containing_folder.join(diffuse_path), false)?;
let diffuse_texture =
texture::Texture::load(device, queue, containing_folder.join(diffuse_path), false)?;
let normal_path = mat.normal_texture;
let normal_texture = texture::Texture::load(device, queue, containing_folder.join(normal_path), true)?;
let normal_texture =
texture::Texture::load(device, queue, containing_folder.join(normal_path), true)?;
materials.push(Material::new(
device,
@ -159,16 +160,15 @@ impl Model {
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
].into(),
tex_coords: [
m.mesh.texcoords[i * 2],
m.mesh.texcoords[i * 2 + 1]
].into(),
]
.into(),
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]].into(),
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
].into(),
]
.into(),
// We'll calculate these later
tangent: [0.0; 3].into(),
bitangent: [0.0; 3].into(),
@ -206,12 +206,12 @@ impl Model {
// give us the tangent and bitangent.
// delta_pos1 = delta_uv1.x * T + delta_u.y * B
// delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
// Luckily, the place I found this equation provided
// Luckily, the place I found this equation provided
// the solution!
let r = 1.0 / (delta_uv1 .x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r;
// We'll use the same tangent/bitangent for each vertex in the triangle
vertices[c[0] as usize].tangent = tangent;
vertices[c[1] as usize].tangent = tangent;
@ -222,20 +222,16 @@ impl Model {
vertices[c[2] as usize].bitangent = bitangent;
}
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
});
meshes.push(Mesh {
name: m.name,

@ -2,7 +2,6 @@ use anyhow::*;
use image::GenericImageView;
use std::path::Path;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
@ -21,12 +20,16 @@ impl Texture {
// Needed to appease the borrow checker
let path_copy = path.as_ref().to_path_buf();
let label = path_copy.to_str();
let img = image::open(path)?;
Self::from_image(device, queue, &img, label, is_normal_map)
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, label: &str) -> Self {
pub fn create_depth_texture(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
label: &str,
) -> Self {
let size = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
@ -39,34 +42,35 @@ impl Texture {
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT | wgpu::TextureUsage::SAMPLED,
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
}
);
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
});
Self { texture, view, sampler }
Self {
texture,
view,
sampler,
}
}
#[allow(dead_code)]
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
bytes: &[u8],
label: &str,
is_normal_map: bool,
) -> Result<Self> {
@ -89,21 +93,19 @@ impl Texture {
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(
&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: if is_normal_map {
wgpu::TextureFormat::Rgba8Unorm
} else {
wgpu::TextureFormat::Rgba8UnormSrgb
},
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
}
);
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: if is_normal_map {
wgpu::TextureFormat::Rgba8Unorm
} else {
wgpu::TextureFormat::Rgba8UnormSrgb
},
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
queue.write_texture(
wgpu::TextureCopyView {
@ -121,18 +123,20 @@ impl Texture {
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}
);
Ok(Self { texture, view, sampler })
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
}

@ -4,7 +4,7 @@ use fs_extra::dir::CopyOptions;
use glob::glob;
use std::env;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -15,7 +15,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -29,34 +30,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -65,11 +68,11 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
@ -85,4 +88,4 @@ fn main() -> Result<()> {
copy_items(&paths_to_copy, out_dir, &copy_options)?;
Ok(())
}
}

@ -1,8 +1,8 @@
use cgmath::*;
use winit::event::*;
use winit::dpi::LogicalPosition;
use std::time::Duration;
use std::f32::consts::FRAC_PI_2;
use std::time::Duration;
use winit::dpi::LogicalPosition;
use winit::event::*;
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -20,11 +20,7 @@ pub struct Camera {
}
impl Camera {
pub fn new<
V: Into<Point3<f32>>,
Y: Into<Rad<f32>>,
P: Into<Rad<f32>>,
>(
pub fn new<V: Into<Point3<f32>>, Y: Into<Rad<f32>>, P: Into<Rad<f32>>>(
position: V,
yaw: Y,
pitch: P,
@ -39,11 +35,7 @@ impl Camera {
pub fn calc_matrix(&self) -> Matrix4<f32> {
Matrix4::look_at_dir(
self.position,
Vector3::new(
self.yaw.0.cos(),
self.pitch.0.sin(),
self.yaw.0.sin(),
).normalize(),
Vector3::new(self.yaw.0.cos(), self.pitch.0.sin(), self.yaw.0.sin()).normalize(),
Vector3::unit_y(),
)
}
@ -57,13 +49,7 @@ pub struct Projection {
}
impl Projection {
pub fn new<F: Into<Rad<f32>>>(
width: u32,
height: u32,
fovy: F,
znear: f32,
zfar: f32,
) -> Self {
pub fn new<F: Into<Rad<f32>>>(width: u32, height: u32, fovy: F, znear: f32, zfar: f32) -> Self {
Self {
aspect: width as f32 / height as f32,
fovy: fovy.into(),
@ -113,8 +99,12 @@ impl CameraController {
}
}
pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool{
let amount = if state == ElementState::Pressed { 1.0 } else { 0.0 };
pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool {
let amount = if state == ElementState::Pressed {
1.0
} else {
0.0
};
match key {
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.amount_forward = amount;
@ -153,10 +143,7 @@ impl CameraController {
self.scroll = match delta {
// I'm assuming a line is about 100 pixels
MouseScrollDelta::LineDelta(_, scroll) => scroll * 100.0,
MouseScrollDelta::PixelDelta(LogicalPosition {
y: scroll,
..
}) => *scroll as f32,
MouseScrollDelta::PixelDelta(LogicalPosition { y: scroll, .. }) => *scroll as f32,
};
}
@ -175,7 +162,8 @@ impl CameraController {
// changes when zooming. I've added this to make it easier
// to get closer to an object you want to focus on.
let (pitch_sin, pitch_cos) = camera.pitch.0.sin_cos();
let scrollward = Vector3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
let scrollward =
Vector3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
self.scroll = 0.0;
@ -200,4 +188,4 @@ impl CameraController {
camera.pitch = Rad(FRAC_PI_2);
}
}
}
}

@ -1,23 +1,22 @@
use std::iter;
use cgmath::prelude::*;
use wgpu::util::DeviceExt;
use winit::{
dpi::PhysicalPosition,
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window},
window::Window,
};
use wgpu::util::DeviceExt;
mod camera;
mod model;
mod texture;
mod camera; // NEW!
mod texture; // NEW!
use model::{DrawLight, DrawModel, Vertex};
const NUM_INSTANCES_PER_ROW: u32 = 10;
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
@ -51,7 +50,8 @@ struct Instance {
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
model: cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation),
}
}
}
@ -85,8 +85,8 @@ struct State {
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
obj_model: model::Model,
camera: camera::Camera, // UPDATED!
projection: camera::Projection, // NEW!
camera: camera::Camera, // UPDATED!
projection: camera::Projection, // NEW!
camera_controller: camera::CameraController, // UPDATED!
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
@ -169,21 +169,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -195,9 +198,8 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -234,24 +236,22 @@ impl State {
},
],
label: Some("texture_bind_group_layout"),
}
);
});
// UPDATED!
let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0));
let projection = camera::Projection::new(sc_desc.width, sc_desc.height, cgmath::Deg(45.0), 0.1, 100.0);
let projection =
camera::Projection::new(sc_desc.width, sc_desc.height, cgmath::Deg(45.0), 0.1, 100.0);
let camera_controller = camera::CameraController::new(4.0, 0.4);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera, &projection);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW)
@ -280,53 +280,52 @@ impl State {
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
}
);
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
count: None,
},
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
// NEW!
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
// We don't plan on changing the size of this buffer
dynamic: false,
// The shader is not allowed to modify it's contents
readonly: true,
min_binding_size: None,
},
count: None,
},
count: None,
},
],
label: Some("uniform_bind_group_layout"),
});
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
// NEW!
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..)),
},
],
label: Some("uniform_bind_group"),
@ -339,7 +338,8 @@ impl State {
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
).unwrap();
)
.unwrap();
println!("Elapsed (Original): {:?}", std::time::Instant::now() - now);
let light = Light {
@ -348,20 +348,18 @@ impl State {
color: (1.0, 1.0, 1.0).into(),
};
let light_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Light VB"),
contents: bytemuck::cast_slice(&[light]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Light VB"),
contents: bytemuck::cast_slice(&[light]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let light_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
@ -379,7 +377,8 @@ impl State {
label: None,
});
let depth_texture = texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let depth_texture =
texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
@ -403,16 +402,11 @@ impl State {
);
let light_render_pipeline = {
let layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
label: Some("Light Pipeline Layout"),
bind_group_layouts: &[
&uniform_bind_group_layout,
&light_bind_group_layout
],
push_constant_ranges: &[],
}
);
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Light Pipeline Layout"),
bind_group_layouts: &[&uniform_bind_group_layout, &light_bind_group_layout],
push_constant_ranges: &[],
});
create_render_pipeline(
&device,
@ -429,10 +423,30 @@ impl State {
let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png");
let normal_bytes = include_bytes!("../res/cobble-normal.png");
let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "res/alt-diffuse.png", false).unwrap();
let normal_texture = texture::Texture::from_bytes(&device, &queue, normal_bytes, "res/alt-normal.png", true).unwrap();
model::Material::new(&device, "alt-material", diffuse_texture, normal_texture, &texture_bind_group_layout)
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"res/alt-diffuse.png",
false,
)
.unwrap();
let normal_texture = texture::Texture::from_bytes(
&device,
&queue,
normal_bytes,
"res/alt-normal.png",
true,
)
.unwrap();
model::Material::new(
&device,
"alt-material",
diffuse_texture,
normal_texture,
&texture_bind_group_layout,
)
};
Self {
@ -463,7 +477,6 @@ impl State {
last_mouse_pos: (0.0, 0.0).into(),
mouse_pressed: false,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
@ -473,24 +486,23 @@ impl State {
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
self.depth_texture =
texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
}
// UPDATED!
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
virtual_keycode: Some(key),
state,
..
},
input:
KeyboardInput {
virtual_keycode: Some(key),
state,
..
},
..
} => self.camera_controller.process_keyboard(*key, *state),
WindowEvent::MouseWheel {
delta,
..
} => {
WindowEvent::MouseWheel { delta, .. } => {
self.camera_controller.process_scroll(delta);
true
}
@ -502,10 +514,7 @@ impl State {
self.mouse_pressed = *state == ElementState::Pressed;
true
}
WindowEvent::CursorMoved {
position,
..
} => {
WindowEvent::CursorMoved { position, .. } => {
let mouse_dx = position.x - self.last_mouse_pos.x;
let mouse_dy = position.y - self.last_mouse_pos.y;
self.last_mouse_pos = *position;
@ -521,25 +530,35 @@ impl State {
fn update(&mut self, dt: std::time::Duration) {
// UPDATED!
self.camera_controller.update_camera(&mut self.camera, dt);
self.uniforms.update_view_proj(&self.camera, &self.projection);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.uniforms
.update_view_proj(&self.camera, &self.projection);
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
// Update the light
let old_position = self.light.position;
self.light.position =
cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0))
* old_position;
self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
self.queue
.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
@ -547,16 +566,14 @@ impl State {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture.view,
@ -567,7 +584,7 @@ impl State {
stencil_ops: None,
}),
});
render_pass.set_pipeline(&self.light_render_pipeline);
render_pass.draw_light_model(
&self.obj_model,
@ -587,7 +604,6 @@ impl State {
}
}
fn main() {
env_logger::init();
let event_loop = EventLoop::new();

@ -69,8 +69,8 @@ pub struct Material {
impl Material {
pub fn new(
device: &wgpu::Device,
name: &str,
device: &wgpu::Device,
name: &str,
diffuse_texture: texture::Texture,
normal_texture: texture::Texture,
layout: &wgpu::BindGroupLayout,
@ -98,7 +98,7 @@ impl Material {
label: Some(name),
});
Self {
Self {
name: String::from(name),
diffuse_texture,
normal_texture,
@ -130,16 +130,17 @@ impl Model {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref(), true)?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent()
.context("Directory has no parent")?;
let containing_folder = path.as_ref().parent().context("Directory has no parent")?;
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let diffuse_texture = texture::Texture::load(device, queue, containing_folder.join(diffuse_path), false)?;
let diffuse_texture =
texture::Texture::load(device, queue, containing_folder.join(diffuse_path), false)?;
let normal_path = mat.normal_texture;
let normal_texture = texture::Texture::load(device, queue, containing_folder.join(normal_path), true)?;
let normal_texture =
texture::Texture::load(device, queue, containing_folder.join(normal_path), true)?;
materials.push(Material::new(
device,
@ -159,16 +160,15 @@ impl Model {
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
].into(),
tex_coords: [
m.mesh.texcoords[i * 2],
m.mesh.texcoords[i * 2 + 1]
].into(),
]
.into(),
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]].into(),
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
].into(),
]
.into(),
// We'll calculate these later
tangent: [0.0; 3].into(),
bitangent: [0.0; 3].into(),
@ -206,12 +206,12 @@ impl Model {
// give us the tangent and bitangent.
// delta_pos1 = delta_uv1.x * T + delta_u.y * B
// delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
// Luckily, the place I found this equation provided
// Luckily, the place I found this equation provided
// the solution!
let r = 1.0 / (delta_uv1 .x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r;
// We'll use the same tangent/bitangent for each vertex in the triangle
vertices[c[0] as usize].tangent = tangent;
vertices[c[1] as usize].tangent = tangent;
@ -222,20 +222,16 @@ impl Model {
vertices[c[2] as usize].bitangent = bitangent;
}
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
});
meshes.push(Mesh {
name: m.name,

@ -2,7 +2,6 @@ use anyhow::*;
use image::GenericImageView;
use std::path::Path;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
@ -21,12 +20,16 @@ impl Texture {
// Needed to appease the borrow checker
let path_copy = path.as_ref().to_path_buf();
let label = path_copy.to_str();
let img = image::open(path)?;
Self::from_image(device, queue, &img, label, is_normal_map)
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, label: &str) -> Self {
pub fn create_depth_texture(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
label: &str,
) -> Self {
let size = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
@ -39,34 +42,35 @@ impl Texture {
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT | wgpu::TextureUsage::SAMPLED,
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
}
);
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
});
Self { texture, view, sampler }
Self {
texture,
view,
sampler,
}
}
#[allow(dead_code)]
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
bytes: &[u8],
label: &str,
is_normal_map: bool,
) -> Result<Self> {
@ -89,21 +93,19 @@ impl Texture {
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(
&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: if is_normal_map {
wgpu::TextureFormat::Rgba8Unorm
} else {
wgpu::TextureFormat::Rgba8UnormSrgb
},
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
}
);
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: if is_normal_map {
wgpu::TextureFormat::Rgba8Unorm
} else {
wgpu::TextureFormat::Rgba8UnormSrgb
},
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
queue.write_texture(
wgpu::TextureCopyView {
@ -121,18 +123,20 @@ impl Texture {
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}
);
Ok(Self { texture, view, sampler })
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
}

@ -5,7 +5,7 @@ use glob::glob;
use rayon::prelude::*;
use std::env;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -16,7 +16,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -30,15 +31,19 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
// UDPATED!
let mut shader_paths = Vec::new();
@ -48,16 +53,14 @@ fn main() -> Result<()> {
// UPDATED!
// This is parallelized
let shaders = shader_paths.into_par_iter()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
let shaders = shader_paths
.into_par_iter()
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -66,11 +69,11 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
@ -86,4 +89,4 @@ fn main() -> Result<()> {
copy_items(&paths_to_copy, out_dir, &copy_options)?;
Ok(())
}
}

@ -1,8 +1,8 @@
use cgmath::*;
use winit::event::*;
use winit::dpi::LogicalPosition;
use std::time::Duration;
use std::f32::consts::FRAC_PI_2;
use std::time::Duration;
use winit::dpi::LogicalPosition;
use winit::event::*;
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -20,11 +20,7 @@ pub struct Camera {
}
impl Camera {
pub fn new<
V: Into<Point3<f32>>,
Y: Into<Rad<f32>>,
P: Into<Rad<f32>>,
>(
pub fn new<V: Into<Point3<f32>>, Y: Into<Rad<f32>>, P: Into<Rad<f32>>>(
position: V,
yaw: Y,
pitch: P,
@ -39,11 +35,7 @@ impl Camera {
pub fn calc_matrix(&self) -> Matrix4<f32> {
Matrix4::look_at_dir(
self.position,
Vector3::new(
self.yaw.0.cos(),
self.pitch.0.sin(),
self.yaw.0.sin(),
).normalize(),
Vector3::new(self.yaw.0.cos(), self.pitch.0.sin(), self.yaw.0.sin()).normalize(),
Vector3::unit_y(),
)
}
@ -57,13 +49,7 @@ pub struct Projection {
}
impl Projection {
pub fn new<F: Into<Rad<f32>>>(
width: u32,
height: u32,
fovy: F,
znear: f32,
zfar: f32,
) -> Self {
pub fn new<F: Into<Rad<f32>>>(width: u32, height: u32, fovy: F, znear: f32, zfar: f32) -> Self {
Self {
aspect: width as f32 / height as f32,
fovy: fovy.into(),
@ -113,8 +99,12 @@ impl CameraController {
}
}
pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool{
let amount = if state == ElementState::Pressed { 1.0 } else { 0.0 };
pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool {
let amount = if state == ElementState::Pressed {
1.0
} else {
0.0
};
match key {
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.amount_forward = amount;
@ -153,10 +143,7 @@ impl CameraController {
self.scroll = -match delta {
// I'm assuming a line is about 100 pixels
MouseScrollDelta::LineDelta(_, scroll) => scroll * 100.0,
MouseScrollDelta::PixelDelta(LogicalPosition {
y: scroll,
..
}) => *scroll as f32,
MouseScrollDelta::PixelDelta(LogicalPosition { y: scroll, .. }) => *scroll as f32,
};
}
@ -175,7 +162,8 @@ impl CameraController {
// changes when zooming. I've added this to make it easier
// to get closer to an object you want to focus on.
let (pitch_sin, pitch_cos) = camera.pitch.0.sin_cos();
let scrollward = Vector3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
let scrollward =
Vector3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
self.scroll = 0.0;
@ -200,4 +188,4 @@ impl CameraController {
camera.pitch = Rad(FRAC_PI_2);
}
}
}
}

@ -1,23 +1,22 @@
use cgmath::prelude::*;
use rayon::prelude::*;
use std::iter;
use wgpu::util::DeviceExt;
use winit::{
dpi::PhysicalPosition,
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window},
window::Window,
};
use wgpu::util::DeviceExt;
mod camera;
mod model;
mod texture;
mod camera; // NEW!
mod texture; // NEW!
use model::{DrawLight, DrawModel, Vertex};
const NUM_INSTANCES_PER_ROW: u32 = 10;
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
@ -51,7 +50,8 @@ struct Instance {
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
model: cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation),
}
}
}
@ -168,21 +168,24 @@ impl State {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -194,9 +197,8 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -233,24 +235,22 @@ impl State {
},
],
label: Some("texture_bind_group_layout"),
}
);
});
// UPDATED!
let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0));
let projection = camera::Projection::new(sc_desc.width, sc_desc.height, cgmath::Deg(45.0), 0.1, 100.0);
let projection =
camera::Projection::new(sc_desc.width, sc_desc.height, cgmath::Deg(45.0), 0.1, 100.0);
let camera_controller = camera::CameraController::new(4.0, 0.4);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera, &projection);
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW)
@ -281,49 +281,48 @@ impl State {
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
}
);
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
});
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
},
count: None,
},
count: None,
},
],
label: Some("uniform_bind_group_layout"),
});
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..))
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..)),
},
],
label: Some("uniform_bind_group"),
@ -335,7 +334,8 @@ impl State {
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
).unwrap();
)
.unwrap();
let light = Light {
position: (2.0, 2.0, 2.0).into(),
@ -343,20 +343,18 @@ impl State {
color: (1.0, 1.0, 1.0).into(),
};
let light_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Light VB"),
contents: bytemuck::cast_slice(&[light]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Light VB"),
contents: bytemuck::cast_slice(&[light]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let light_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
@ -374,7 +372,8 @@ impl State {
label: None,
});
let depth_texture = texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let depth_texture =
texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
@ -398,16 +397,11 @@ impl State {
);
let light_render_pipeline = {
let layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
label: Some("Light Pipeline Layout"),
bind_group_layouts: &[
&uniform_bind_group_layout,
&light_bind_group_layout
],
push_constant_ranges: &[],
}
);
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Light Pipeline Layout"),
bind_group_layouts: &[&uniform_bind_group_layout, &light_bind_group_layout],
push_constant_ranges: &[],
});
create_render_pipeline(
&device,
@ -424,10 +418,30 @@ impl State {
let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png");
let normal_bytes = include_bytes!("../res/cobble-normal.png");
let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "res/alt-diffuse.png", false).unwrap();
let normal_texture = texture::Texture::from_bytes(&device, &queue, normal_bytes, "res/alt-normal.png", true).unwrap();
model::Material::new(&device, "alt-material", diffuse_texture, normal_texture, &texture_bind_group_layout)
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"res/alt-diffuse.png",
false,
)
.unwrap();
let normal_texture = texture::Texture::from_bytes(
&device,
&queue,
normal_bytes,
"res/alt-normal.png",
true,
)
.unwrap();
model::Material::new(
&device,
"alt-material",
diffuse_texture,
normal_texture,
&texture_bind_group_layout,
)
};
Self {
@ -457,7 +471,6 @@ impl State {
last_mouse_pos: (0.0, 0.0).into(),
mouse_pressed: false,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
@ -466,23 +479,22 @@ impl State {
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
self.depth_texture =
texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
}
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
virtual_keycode: Some(key),
state,
..
},
input:
KeyboardInput {
virtual_keycode: Some(key),
state,
..
},
..
} => self.camera_controller.process_keyboard(*key, *state),
WindowEvent::MouseWheel {
delta,
..
} => {
WindowEvent::MouseWheel { delta, .. } => {
self.camera_controller.process_scroll(delta);
true
}
@ -494,10 +506,7 @@ impl State {
self.mouse_pressed = *state == ElementState::Pressed;
true
}
WindowEvent::CursorMoved {
position,
..
} => {
WindowEvent::CursorMoved { position, .. } => {
let mouse_dx = position.x - self.last_mouse_pos.x;
let mouse_dy = position.y - self.last_mouse_pos.y;
self.last_mouse_pos = *position;
@ -512,25 +521,35 @@ impl State {
fn update(&mut self, dt: std::time::Duration) {
self.camera_controller.update_camera(&mut self.camera, dt);
self.uniforms.update_view_proj(&self.camera, &self.projection);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
self.uniforms
.update_view_proj(&self.camera, &self.projection);
self.queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&[self.uniforms]),
);
// Update the light
let old_position = self.light.position;
self.light.position =
cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0))
* old_position;
self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
self.queue
.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
}
fn render(&mut self) {
let frame = self.swap_chain.get_current_frame()
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
@ -538,16 +557,14 @@ impl State {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
),
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture.view,
@ -558,7 +575,7 @@ impl State {
stencil_ops: None,
}),
});
render_pass.set_pipeline(&self.light_render_pipeline);
render_pass.draw_light_model(
&self.obj_model,
@ -578,7 +595,6 @@ impl State {
}
}
fn main() {
env_logger::init();
let event_loop = EventLoop::new();

@ -70,8 +70,8 @@ pub struct Material {
impl Material {
pub fn new(
device: &wgpu::Device,
name: &str,
device: &wgpu::Device,
name: &str,
diffuse_texture: texture::Texture,
normal_texture: texture::Texture,
layout: &wgpu::BindGroupLayout,
@ -99,7 +99,7 @@ impl Material {
label: Some(name),
});
Self {
Self {
name: String::from(name),
diffuse_texture,
normal_texture,
@ -131,127 +131,134 @@ impl Model {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref(), true)?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent()
.context("Directory has no parent")?;
let containing_folder = path.as_ref().parent().context("Directory has no parent")?;
// UPDATED!
let materials = obj_materials.par_iter().map(|mat| {
// We can also parallelize loading the textures!
let mut textures = [
(containing_folder.join(&mat.diffuse_texture), false),
(containing_folder.join(&mat.normal_texture), true),
].par_iter().map(|(texture_path, is_normal_map)| {
//
texture::Texture::load(device, queue, texture_path, *is_normal_map)
}).collect::<Result<Vec<_>>>()?;
// Pop removes from the end of the list.
let normal_texture = textures.pop().unwrap();
let diffuse_texture = textures.pop().unwrap();
Ok(Material::new(
device,
&mat.name,
diffuse_texture,
normal_texture,
layout,
))
}).collect::<Result<Vec<Material>>>()?;
let materials = obj_materials
.par_iter()
.map(|mat| {
// We can also parallelize loading the textures!
let mut textures = [
(containing_folder.join(&mat.diffuse_texture), false),
(containing_folder.join(&mat.normal_texture), true),
]
.par_iter()
.map(|(texture_path, is_normal_map)| {
//
texture::Texture::load(device, queue, texture_path, *is_normal_map)
})
.collect::<Result<Vec<_>>>()?;
// Pop removes from the end of the list.
let normal_texture = textures.pop().unwrap();
let diffuse_texture = textures.pop().unwrap();
Ok(Material::new(
device,
&mat.name,
diffuse_texture,
normal_texture,
layout,
))
})
.collect::<Result<Vec<Material>>>()?;
// UPDATED!
let meshes = obj_models.par_iter().map(|m| {
let mut vertices = (0..m.mesh.positions.len() / 3).into_par_iter().map(|i| {
ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
].into(),
tex_coords: [
m.mesh.texcoords[i * 2],
m.mesh.texcoords[i * 2 + 1]
].into(),
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
].into(),
// We'll calculate these later
tangent: [0.0; 3].into(),
bitangent: [0.0; 3].into(),
let meshes = obj_models
.par_iter()
.map(|m| {
let mut vertices = (0..m.mesh.positions.len() / 3)
.into_par_iter()
.map(|i| {
ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
]
.into(),
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]]
.into(),
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
]
.into(),
// We'll calculate these later
tangent: [0.0; 3].into(),
bitangent: [0.0; 3].into(),
}
})
.collect::<Vec<_>>();
let indices = &m.mesh.indices;
// Calculate tangents and bitangets. We're going to
// use the triangles, so we need to loop through the
// indices in chunks of 3
for c in indices.chunks(3) {
let v0 = vertices[c[0] as usize];
let v1 = vertices[c[1] as usize];
let v2 = vertices[c[2] as usize];
let pos0 = v0.position;
let pos1 = v1.position;
let pos2 = v2.position;
let uv0 = v0.tex_coords;
let uv1 = v1.tex_coords;
let uv2 = v2.tex_coords;
// Calculate the edges of the triangle
let delta_pos1 = pos1 - pos0;
let delta_pos2 = pos2 - pos0;
// This will give us a direction to calculate the
// tangent and bitangent
let delta_uv1 = uv1 - uv0;
let delta_uv2 = uv2 - uv0;
// Solving the following system of equations will
// give us the tangent and bitangent.
// delta_pos1 = delta_uv1.x * T + delta_u.y * B
// delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
// Luckily, the place I found this equation provided
// the solution!
let r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r;
// We'll use the same tangent/bitangent for each vertex in the triangle
vertices[c[0] as usize].tangent = tangent;
vertices[c[1] as usize].tangent = tangent;
vertices[c[2] as usize].tangent = tangent;
vertices[c[0] as usize].bitangent = bitangent;
vertices[c[1] as usize].bitangent = bitangent;
vertices[c[2] as usize].bitangent = bitangent;
}
}).collect::<Vec<_>>();
let indices = &m.mesh.indices;
// Calculate tangents and bitangets. We're going to
// use the triangles, so we need to loop through the
// indices in chunks of 3
for c in indices.chunks(3) {
let v0 = vertices[c[0] as usize];
let v1 = vertices[c[1] as usize];
let v2 = vertices[c[2] as usize];
let pos0 = v0.position;
let pos1 = v1.position;
let pos2 = v2.position;
let uv0 = v0.tex_coords;
let uv1 = v1.tex_coords;
let uv2 = v2.tex_coords;
// Calculate the edges of the triangle
let delta_pos1 = pos1 - pos0;
let delta_pos2 = pos2 - pos0;
// This will give us a direction to calculate the
// tangent and bitangent
let delta_uv1 = uv1 - uv0;
let delta_uv2 = uv2 - uv0;
// Solving the following system of equations will
// give us the tangent and bitangent.
// delta_pos1 = delta_uv1.x * T + delta_u.y * B
// delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
// Luckily, the place I found this equation provided
// the solution!
let r = 1.0 / (delta_uv1 .x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r;
// We'll use the same tangent/bitangent for each vertex in the triangle
vertices[c[0] as usize].tangent = tangent;
vertices[c[1] as usize].tangent = tangent;
vertices[c[2] as usize].tangent = tangent;
vertices[c[0] as usize].bitangent = bitangent;
vertices[c[1] as usize].bitangent = bitangent;
vertices[c[2] as usize].bitangent = bitangent;
};
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", m.name)),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", m.name)),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
}
);
Ok(Mesh {
name: m.name.clone(),
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
});
Ok(Mesh {
name: m.name.clone(),
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
})
})
}).collect::<Result<Vec<_>>>()?;
.collect::<Result<Vec<_>>>()?;
Ok(Self { meshes, materials })
}

@ -2,7 +2,6 @@ use anyhow::*;
use image::GenericImageView;
use std::path::Path;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
@ -21,12 +20,16 @@ impl Texture {
// Needed to appease the borrow checker
let path_copy = path.as_ref().to_path_buf();
let label = path_copy.to_str();
let img = image::open(path)?;
Self::from_image(device, queue, &img, label, is_normal_map)
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, label: &str) -> Self {
pub fn create_depth_texture(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
label: &str,
) -> Self {
let size = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
@ -39,34 +42,35 @@ impl Texture {
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT | wgpu::TextureUsage::SAMPLED,
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
}
);
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
});
Self { texture, view, sampler }
Self {
texture,
view,
sampler,
}
}
#[allow(dead_code)]
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
bytes: &[u8],
label: &str,
is_normal_map: bool,
) -> Result<Self> {
@ -89,21 +93,19 @@ impl Texture {
height: dimensions.1,
depth: 1,
};
let texture = device.create_texture(
&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: if is_normal_map {
wgpu::TextureFormat::Rgba8Unorm
} else {
wgpu::TextureFormat::Rgba8UnormSrgb
},
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
}
);
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: if is_normal_map {
wgpu::TextureFormat::Rgba8Unorm
} else {
wgpu::TextureFormat::Rgba8UnormSrgb
},
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
queue.write_texture(
wgpu::TextureCopyView {
@ -121,18 +123,20 @@ impl Texture {
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}
);
Ok(Self { texture, view, sampler })
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
}

@ -1,10 +1,10 @@
use glob::glob;
use failure::bail;
use fs_extra::copy_items;
use fs_extra::dir::CopyOptions;
use glob::glob;
use std::env;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
fn main() {
copy_res();
@ -26,20 +26,19 @@ fn copy_res() {
fn compile_shaders() {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert").unwrap(),
glob("./src/**/*.frag").unwrap(),
glob("./src/**/*.comp").unwrap(),
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result.unwrap()).unwrap()
})
.map(|glob_result| ShaderData::load(glob_result.unwrap()).unwrap())
.collect::<Vec<ShaderData>>();
let mut compiler = shaderc::Compiler::new().unwrap();
@ -50,13 +49,15 @@ fn compile_shaders() {
// be better just to only compile shaders that have been changed
// recently.
for shader in shaders {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
).unwrap();
let compiled = compiler
.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)
.unwrap();
write(shader.spv_path, compiled.as_binary_u8()).unwrap();
}
@ -83,6 +84,11 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
}

@ -1,8 +1,8 @@
use cgmath::prelude::*;
use cgmath::*;
use winit::event::*;
use std::time::Duration;
use std::f32::consts::FRAC_PI_2;
use std::time::Duration;
use winit::event::*;
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -20,11 +20,7 @@ pub struct Camera {
}
impl Camera {
pub fn new<
V: Into<Point3<f32>>,
Y: Into<Rad<f32>>,
P: Into<Rad<f32>>,
>(
pub fn new<V: Into<Point3<f32>>, Y: Into<Rad<f32>>, P: Into<Rad<f32>>>(
position: V,
yaw: Y,
pitch: P,
@ -39,11 +35,7 @@ impl Camera {
pub fn calc_matrix(&self) -> Matrix4<f32> {
Matrix4::look_at_dir(
self.position,
Vector3::new(
self.yaw.0.cos(),
self.pitch.0.sin(),
self.yaw.0.sin(),
).normalize(),
Vector3::new(self.yaw.0.cos(), self.pitch.0.sin(), self.yaw.0.sin()).normalize(),
Vector3::unit_y(),
)
}
@ -57,13 +49,7 @@ pub struct Projection {
}
impl Projection {
pub fn new<F: Into<Rad<f32>>>(
width: u32,
height: u32,
fovy: F,
znear: f32,
zfar: f32,
) -> Self {
pub fn new<F: Into<Rad<f32>>>(width: u32, height: u32, fovy: F, znear: f32, zfar: f32) -> Self {
Self {
aspect: width as f32 / height as f32,
fovy: fovy.into(),
@ -107,8 +93,12 @@ impl CameraController {
}
}
pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool{
let amount = if state == ElementState::Pressed { 1.0 } else { 0.0 };
pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool {
let amount = if state == ElementState::Pressed {
1.0
} else {
0.0
};
match key {
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.amount_forward = amount;
@ -167,4 +157,4 @@ impl CameraController {
pub fn is_dirty(&self) -> bool {
self.is_dirty
}
}
}

@ -1,7 +1,6 @@
use cgmath::*;
use std::mem::size_of;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct Vertex {
@ -43,18 +42,12 @@ impl Mesh {
vertex(0.0, 1000.0, 0.0),
vertex(0.0, 0.0, 1000.0),
];
let axes_indices: &[u16] = &[
0, 1,
0, 2,
0, 3,
];
let axes_indices: &[u16] = &[0, 1, 0, 2, 0, 3];
let index_count = axes_indices.len() as u32;
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(axes_indices),
wgpu::BufferUsage::INDEX,
);
let index_buffer = device
.create_buffer_with_data(bytemuck::cast_slice(axes_indices), wgpu::BufferUsage::INDEX);
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(axes_vertices),
bytemuck::cast_slice(axes_vertices),
wgpu::BufferUsage::VERTEX,
);
Self {

@ -1,8 +1,8 @@
use winit::window::Window;
use winit::event::*;
use winit::dpi::*;
use cgmath::*;
use std::time::Duration;
use winit::dpi::*;
use winit::event::*;
use winit::window::Window;
use crate::camera::*;
use crate::data::*;
@ -38,12 +38,13 @@ impl Demo {
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
},
wgpu::BackendBit::PRIMARY,
).await.unwrap();
let (device, queue): (wgpu::Device, wgpu::Queue) = adapter.request_device(
&Default::default(),
).await;
)
.await
.unwrap();
let (device, queue): (wgpu::Device, wgpu::Queue) =
adapter.request_device(&Default::default()).await;
let inner_size = window.inner_size();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -55,14 +56,14 @@ impl Demo {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let camera = Camera::new((0.0, 0.5, 3.0), Deg(-90.0), Deg(0.0));
let controller = CameraController::new(0.5);
let projection = Projection::new(inner_size.width, inner_size.height, Deg(45.0), 0.1, 1000.0);
let projection =
Projection::new(inner_size.width, inner_size.height, Deg(45.0), 0.1, 1000.0);
let uniforms = Uniforms::new(&device, &camera, &projection);
let (uniform_layout, uniforms_bind_group) = create_uniform_binding(&device, &uniforms);
let debug_pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&uniform_layout]
}
);
let debug_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&uniform_layout],
});
let debug_pipeline = RenderPipelineBuilder::new()
.layout(&debug_pipeline_layout)
.color_solid(sc_desc.format)
@ -72,7 +73,8 @@ impl Demo {
.index_format(wgpu::IndexFormat::Uint16)
.vertex_buffer(Vertex::desc())
.cull_mode(wgpu::CullMode::None)
.build(&device).unwrap();
.build(&device)
.unwrap();
let axes = Mesh::axes(&device);
Self {
@ -84,7 +86,12 @@ impl Demo {
swap_chain,
debug_pipeline,
axes,
clear_color: wgpu::Color { r: 0.1, g: 0.2, b: 0.3, a: 1.0 },
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
is_running: true,
camera,
controller,
@ -107,21 +114,22 @@ impl Demo {
pub fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
virtual_keycode: Some(key),
state,
..
},
input:
KeyboardInput {
virtual_keycode: Some(key),
state,
..
},
..
} => {
self.controller.process_keyboard(*key, *state)
|| match (key, *state == ElementState::Pressed) {
(VirtualKeyCode::Escape, true) => {
self.is_running = false;
true
|| match (key, *state == ElementState::Pressed) {
(VirtualKeyCode::Escape, true) => {
self.is_running = false;
true
}
_ => false,
}
_ => false,
}
}
WindowEvent::MouseInput {
button: MouseButton::Left,
@ -131,10 +139,7 @@ impl Demo {
self.mouse_pressed = *state == ElementState::Pressed;
true
}
WindowEvent::CursorMoved {
position,
..
} => {
WindowEvent::CursorMoved { position, .. } => {
let mouse_dx = position.x - self.last_mouse_pos.x;
let mouse_dy = position.y - self.last_mouse_pos.y;
self.last_mouse_pos = *position;
@ -160,27 +165,25 @@ impl Demo {
}
pub fn render(&mut self) {
let mut encoder = self.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor{ label: None },
);
let frame = self.swap_chain.get_next_texture()
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
let frame = self
.swap_chain
.get_next_texture()
.expect("Unable to retrieve swap chain texture");
{
let mut pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: self.clear_color,
}
],
depth_stencil_attachment: None,
}
);
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: self.clear_color,
}],
depth_stencil_attachment: None,
});
pass.set_pipeline(&self.debug_pipeline);
pass.set_bind_group(0, &self.uniforms_bind_group, &[]);
pass.set_index_buffer(&self.axes.index_buffer, 0, 0);
@ -190,4 +193,4 @@ impl Demo {
self.queue.submit(&[encoder.finish()]);
}
}
}

@ -6,10 +6,10 @@ mod resource;
use demo::*;
use winit::window::*;
use winit::event::*;
use winit::dpi::*;
use winit::event::*;
use winit::event_loop::{ControlFlow, EventLoop};
use winit::window::*;
use futures::executor::block_on;
@ -21,7 +21,8 @@ fn main() {
let window = WindowBuilder::new()
.with_inner_size(PhysicalSize::new(800, 600))
.with_title(env!("CARGO_PKG_NAME"))
.build(&event_loop).unwrap();
.build(&event_loop)
.unwrap();
let mut demo = block_on(Demo::new(&window));
let mut last_update = Instant::now();
let mut is_focused = false;
@ -34,30 +35,35 @@ fn main() {
ControlFlow::Exit
};
match event {
Event::MainEventsCleared => if is_focused {
window.request_redraw();
Event::MainEventsCleared => {
if is_focused {
window.request_redraw();
}
}
Event::WindowEvent {
ref event,
window_id,
} => if window_id == window.id() && !demo.input(event) {
match event {
WindowEvent::Focused(f) => is_focused = *f,
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::ScaleFactorChanged {
new_inner_size,
..
} => demo.resize(**new_inner_size),
WindowEvent::Resized(new_size) => demo.resize(*new_size),
_ => {}
} => {
if window_id == window.id() && !demo.input(event) {
match event {
WindowEvent::Focused(f) => is_focused = *f,
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
demo.resize(**new_inner_size)
}
WindowEvent::Resized(new_size) => demo.resize(*new_size),
_ => {}
}
}
}
Event::RedrawRequested(window_id) => if window_id == window.id() {
let now = Instant::now();
let dt = now - last_update;
last_update = now;
demo.update(dt);
demo.render();
Event::RedrawRequested(window_id) => {
if window_id == window.id() {
let now = Instant::now();
let dt = now - last_update;
last_update = now;
demo.update(dt);
demo.render();
}
}
_ => {}
}

@ -68,7 +68,6 @@ impl<'a> RenderPipelineBuilder<'a> {
self
}
#[allow(dead_code)]
pub fn depth_bias(&mut self, db: i32) -> &mut Self {
self.depth_bias = db;
@ -100,14 +99,12 @@ impl<'a> RenderPipelineBuilder<'a> {
/// Helper method for [RenderPipelineBuilder::color_state]
pub fn color_solid(&mut self, format: wgpu::TextureFormat) -> &mut Self {
self.color_state(
wgpu::ColorStateDescriptor {
format,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
color_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}
)
self.color_state(wgpu::ColorStateDescriptor {
format,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
color_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
})
}
pub fn depth_stencil_state(&mut self, dss: wgpu::DepthStencilStateDescriptor) -> &mut Self {
@ -122,17 +119,15 @@ impl<'a> RenderPipelineBuilder<'a> {
depth_write_enabled: bool,
depth_compare: wgpu::CompareFunction,
) -> &mut Self {
self.depth_stencil_state(
wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled,
depth_compare,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
}
)
self.depth_stencil_state(wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled,
depth_compare,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
})
}
/// Helper method for [RenderPipelineBuilder::depth_no_stencil]
@ -178,7 +173,7 @@ impl<'a> RenderPipelineBuilder<'a> {
let layout = self.layout.unwrap();
// Render pipelines always have a vertex shader, but due
// to the way the builder pattern works, we can't
// to the way the builder pattern works, we can't
// guarantee that the user will specify one, so we'll
// just return an error if they forgot.
//
@ -194,52 +189,43 @@ impl<'a> RenderPipelineBuilder<'a> {
// Having the shader be optional is giving me issues with
// the borrow checker so I'm going to use a default shader
// if the user doesn't supply one.
let fs_spv = self.fragment_shader.unwrap_or(include_bytes!("default.frag.spv"));
let fs_spv = self
.fragment_shader
.unwrap_or(include_bytes!("default.frag.spv"));
let fs = create_shader_module(device, fs_spv);
let pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
layout: &layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs,
entry_point: "main",
},
fragment_stage: Some(
wgpu::ProgrammableStageDescriptor {
module: &fs,
entry_point: "main",
}
),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: self.front_face,
cull_mode: self.cull_mode,
depth_bias: self.depth_bias,
depth_bias_slope_scale: self.depth_bias_slope_scale,
depth_bias_clamp: self.depth_bias_clamp,
}),
primitive_topology: self.primitive_topology,
color_states: &self.color_states,
depth_stencil_state: self.depth_stencil_state.clone(),
vertex_state: wgpu::VertexStateDescriptor {
index_format: self.index_format,
vertex_buffers: &self.vertex_buffers,
},
sample_count: self.sample_count,
sample_mask: self.sample_mask,
alpha_to_coverage_enabled: self.alpha_to_coverage_enabled,
}
);
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: self.front_face,
cull_mode: self.cull_mode,
depth_bias: self.depth_bias,
depth_bias_slope_scale: self.depth_bias_slope_scale,
depth_bias_clamp: self.depth_bias_clamp,
}),
primitive_topology: self.primitive_topology,
color_states: &self.color_states,
depth_stencil_state: self.depth_stencil_state.clone(),
vertex_state: wgpu::VertexStateDescriptor {
index_format: self.index_format,
vertex_buffers: &self.vertex_buffers,
},
sample_count: self.sample_count,
sample_mask: self.sample_mask,
alpha_to_coverage_enabled: self.alpha_to_coverage_enabled,
});
Ok(pipeline)
}
}
fn create_shader_module(device: &wgpu::Device, spirv: &[u8]) -> wgpu::ShaderModule {
device.create_shader_module(
&wgpu::read_spirv(
std::io::Cursor::new(
spirv
)
).unwrap()
)
}
device.create_shader_module(&wgpu::read_spirv(std::io::Cursor::new(spirv)).unwrap())
}

@ -22,11 +22,7 @@ unsafe impl bytemuck::Pod for UniformsRaw {}
unsafe impl bytemuck::Zeroable for UniformsRaw {}
impl Uniforms {
pub fn new(
device: &wgpu::Device,
camera: &Camera,
projection: &Projection,
) -> Self {
pub fn new(device: &wgpu::Device, camera: &Camera, projection: &Projection) -> Self {
let projection_matrix = projection.calc_matrix();
let view_matrix = camera.calc_matrix();
let raw = UniformsRaw {
@ -56,10 +52,7 @@ impl Uniforms {
self.dirty = true;
}
pub fn update(
&mut self,
device: &wgpu::Device,
) -> Option<wgpu::CommandBuffer> {
pub fn update(&mut self, device: &wgpu::Device) -> Option<wgpu::CommandBuffer> {
if self.dirty {
self.dirty = false;
self.raw.view_proj_matrix = self.raw.projection_matrix * self.raw.view_matrix;
@ -68,16 +61,14 @@ impl Uniforms {
bytemuck::cast_slice(&[self.raw]),
wgpu::BufferUsage::COPY_SRC,
);
let mut encoder = device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Uniforms::update()"),
}
);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Uniforms::update()"),
});
encoder.copy_buffer_to_buffer(
&copy_buffer,
0,
&self.buffer,
&copy_buffer,
0,
&self.buffer,
0,
size_of::<UniformsRaw>() as wgpu::BufferAddress,
);
@ -88,33 +79,28 @@ impl Uniforms {
}
}
pub fn create_uniform_binding(device: &wgpu::Device, uniforms: &Uniforms) -> (wgpu::BindGroupLayout, wgpu::BindGroup) {
let layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
label: Some("Uniforms::BindGroupLayout"),
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer { dynamic: false },
}
],
}
);
let bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("Uniforms::BindGroup"),
layout: &layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniforms.buffer,
range: 0..size_of::<UniformsRaw>() as wgpu::BufferAddress,
}
}
]
}
);
pub fn create_uniform_binding(
device: &wgpu::Device,
uniforms: &Uniforms,
) -> (wgpu::BindGroupLayout, wgpu::BindGroup) {
let layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Uniforms::BindGroupLayout"),
bindings: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer { dynamic: false },
}],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Uniforms::BindGroup"),
layout: &layout,
bindings: &[wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniforms.buffer,
range: 0..size_of::<UniformsRaw>() as wgpu::BufferAddress,
},
}],
});
(layout, bind_group)
}

@ -1,10 +1,10 @@
use glob::glob;
use failure::bail;
use fs_extra::copy_items;
use fs_extra::dir::CopyOptions;
use glob::glob;
use std::env;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
fn main() {
copy_res();
@ -26,20 +26,19 @@ fn copy_res() {
fn compile_shaders() {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert").unwrap(),
glob("./src/**/*.frag").unwrap(),
glob("./src/**/*.comp").unwrap(),
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result.unwrap()).unwrap()
})
.map(|glob_result| ShaderData::load(glob_result.unwrap()).unwrap())
.collect::<Vec<ShaderData>>();
let mut compiler = shaderc::Compiler::new().unwrap();
@ -50,13 +49,15 @@ fn compile_shaders() {
// be better just to only compile shaders that have been changed
// recently.
for shader in shaders {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
).unwrap();
let compiled = compiler
.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)
.unwrap();
write(shader.spv_path, compiled.as_binary_u8()).unwrap();
}
@ -83,6 +84,11 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
}

@ -1,15 +1,15 @@
use cgmath::prelude::*;
use futures::executor::block_on;
use std::path::Path;
use std::time::{Duration, Instant};
use winit::{
event::*,
event_loop::{ControlFlow, EventLoop},
window::Window,
};
use futures::executor::block_on;
use std::path::Path;
use std::time::{Duration, Instant};
mod pipeline;
mod model;
mod pipeline;
mod texture;
use model::{DrawLight, DrawModel, Vertex};
@ -42,7 +42,6 @@ impl Camera {
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
@ -155,8 +154,8 @@ impl CameraController {
let forward_mag = forward.magnitude();
if self.is_right_pressed {
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// lies on the circle made by the target and eye.
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
}
@ -174,7 +173,8 @@ struct Instance {
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
model: cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation),
}
}
}
@ -226,7 +226,6 @@ struct State {
debug_material: model::Material,
}
impl State {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
@ -239,15 +238,18 @@ impl State {
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY, // Vulkan + Metal + DX12 + Browser WebGPU
).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
}).await;
)
.await
.unwrap();
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
})
.await;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
@ -259,7 +261,8 @@ impl State {
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
@ -340,10 +343,7 @@ impl State {
})
.collect::<Vec<_>>();
let instance_data = instances
.iter()
.map(Instance::to_raw)
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer_size =
instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
@ -395,10 +395,11 @@ impl State {
let res_dir = Path::new(env!("OUT_DIR")).join("res");
let (obj_model, cmds) = model::Model::load(
&device,
&texture_bind_group_layout,
&device,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
).unwrap();
)
.unwrap();
queue.submit(&cmds);
@ -435,7 +436,8 @@ impl State {
label: None,
});
let depth_texture = texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let depth_texture =
texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
@ -453,7 +455,8 @@ impl State {
.vertex_buffer(model::ModelVertex::desc())
.vertex_shader(include_bytes!("shader.vert.spv"))
.fragment_shader(include_bytes!("shader.frag.spv"))
.build(&device).unwrap();
.build(&device)
.unwrap();
let light_render_pipeline = {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
@ -467,7 +470,8 @@ impl State {
.vertex_buffer(model::ModelVertex::desc())
.vertex_shader(include_bytes!("light.vert.spv"))
.fragment_shader(include_bytes!("light.frag.spv"))
.build(&device).unwrap()
.build(&device)
.unwrap()
};
let debug_material = {
@ -475,13 +479,23 @@ impl State {
let normal_bytes = include_bytes!("../res/cobble-normal.png");
let mut command_buffers = vec![];
let (diffuse_texture, cmds) = texture::Texture::from_bytes(&device, diffuse_bytes, "res/alt-diffuse.png", false).unwrap();
let (diffuse_texture, cmds) =
texture::Texture::from_bytes(&device, diffuse_bytes, "res/alt-diffuse.png", false)
.unwrap();
command_buffers.push(cmds);
let (normal_texture, cmds) = texture::Texture::from_bytes(&device, normal_bytes, "res/alt-normal.png", true).unwrap();
let (normal_texture, cmds) =
texture::Texture::from_bytes(&device, normal_bytes, "res/alt-normal.png", true)
.unwrap();
command_buffers.push(cmds);
queue.submit(&command_buffers);
model::Material::new(&device, "alt-material", diffuse_texture, normal_texture, &texture_bind_group_layout)
model::Material::new(
&device,
"alt-material",
diffuse_texture,
normal_texture,
&texture_bind_group_layout,
)
};
Self {
@ -508,7 +522,6 @@ impl State {
#[allow(dead_code)]
debug_material,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
@ -517,7 +530,8 @@ impl State {
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
self.depth_texture =
texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
}
fn input(&mut self, event: &WindowEvent) -> bool {
@ -528,8 +542,9 @@ impl State {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder =
self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
@ -546,11 +561,10 @@ impl State {
// Update the light
let old_position = self.light.position;
self.light.position =
cgmath::Quaternion::from_axis_angle(
(0.0, 1.0, 0.0).into(),
cgmath::Deg(45.0) * dt.as_secs_f32()
) * old_position;
self.light.position = cgmath::Quaternion::from_axis_angle(
(0.0, 1.0, 0.0).into(),
cgmath::Deg(45.0) * dt.as_secs_f32(),
) * old_position;
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.light]),
@ -568,11 +582,13 @@ impl State {
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture()
let frame = self
.swap_chain
.get_next_texture()
.expect("Timeout getting texture");
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: None
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
@ -598,7 +614,7 @@ impl State {
clear_stencil: 0,
}),
});
render_pass.set_pipeline(&self.light_render_pipeline);
render_pass.draw_light_model(
&self.obj_model,
@ -637,8 +653,10 @@ fn main() {
ControlFlow::Wait
};
match event {
Event::MainEventsCleared => if is_focused {
window.request_redraw();
Event::MainEventsCleared => {
if is_focused {
window.request_redraw();
}
}
Event::WindowEvent {
ref event,

@ -67,9 +67,9 @@ pub struct Material {
impl Material {
pub fn new(
device: &wgpu::Device,
name: &str,
diffuse_texture: texture::Texture,
device: &wgpu::Device,
name: &str,
diffuse_texture: texture::Texture,
normal_texture: texture::Texture,
layout: &wgpu::BindGroupLayout,
) -> Self {
@ -96,7 +96,7 @@ impl Material {
label: Some(name),
});
Self {
Self {
name: String::from(name),
diffuse_texture,
normal_texture,
@ -135,14 +135,16 @@ impl Model {
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let (diffuse_texture, cmds) = texture::Texture::load(device, containing_folder.join(diffuse_path), false)?;
let (diffuse_texture, cmds) =
texture::Texture::load(device, containing_folder.join(diffuse_path), false)?;
command_buffers.push(cmds);
let normal_path = match mat.unknown_param.get("map_Bump") {
Some(v) => Ok(v),
None => Err(failure::err_msg("Unable to find normal map"))
None => Err(failure::err_msg("Unable to find normal map")),
};
let (normal_texture, cmds) = texture::Texture::load(device, containing_folder.join(normal_path?), true)?;
let (normal_texture, cmds) =
texture::Texture::load(device, containing_folder.join(normal_path?), true)?;
command_buffers.push(cmds);
materials.push(Material::new(
@ -163,16 +165,15 @@ impl Model {
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
].into(),
tex_coords: [
m.mesh.texcoords[i * 2],
m.mesh.texcoords[i * 2 + 1]
].into(),
]
.into(),
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]].into(),
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
].into(),
]
.into(),
// We'll calculate these later
tangent: [0.0; 3].into(),
bitangent: [0.0; 3].into(),
@ -210,12 +211,12 @@ impl Model {
// give us the tangent and bitangent.
// delta_pos1 = delta_uv1.x * T + delta_u.y * B
// delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
// Luckily, the place I found this equation provided
// Luckily, the place I found this equation provided
// the solution!
let r = 1.0 / (delta_uv1 .x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r;
// We'll use the same tangent/bitangent for each vertex in the triangle
vertices[c[0] as usize].tangent = tangent;
vertices[c[1] as usize].tangent = tangent;
@ -231,10 +232,8 @@ impl Model {
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(indices),
wgpu::BufferUsage::INDEX,
);
let index_buffer = device
.create_buffer_with_data(bytemuck::cast_slice(indices), wgpu::BufferUsage::INDEX);
meshes.push(Mesh {
name: m.name,

@ -68,7 +68,6 @@ impl<'a> RenderPipelineBuilder<'a> {
self
}
#[allow(dead_code)]
pub fn depth_bias(&mut self, db: i32) -> &mut Self {
self.depth_bias = db;
@ -100,14 +99,12 @@ impl<'a> RenderPipelineBuilder<'a> {
/// Helper method for [RenderPipelineBuilder::color_state]
pub fn color_solid(&mut self, format: wgpu::TextureFormat) -> &mut Self {
self.color_state(
wgpu::ColorStateDescriptor {
format,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
color_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}
)
self.color_state(wgpu::ColorStateDescriptor {
format,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
color_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
})
}
pub fn depth_stencil_state(&mut self, dss: wgpu::DepthStencilStateDescriptor) -> &mut Self {
@ -122,17 +119,15 @@ impl<'a> RenderPipelineBuilder<'a> {
depth_write_enabled: bool,
depth_compare: wgpu::CompareFunction,
) -> &mut Self {
self.depth_stencil_state(
wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled,
depth_compare,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
}
)
self.depth_stencil_state(wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled,
depth_compare,
stencil_front: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_back: wgpu::StencilStateFaceDescriptor::IGNORE,
stencil_read_mask: 0,
stencil_write_mask: 0,
})
}
/// Helper method for [RenderPipelineBuilder::depth_no_stencil]
@ -177,7 +172,7 @@ impl<'a> RenderPipelineBuilder<'a> {
let layout = self.layout.unwrap();
// Render pipelines always have a vertex shader, but due
// to the way the builder pattern works, we can't
// to the way the builder pattern works, we can't
// guarantee that the user will specify one, so we'll
// just return an error if they forgot.
//
@ -193,52 +188,43 @@ impl<'a> RenderPipelineBuilder<'a> {
// Having the shader be optional is giving me issues with
// the borrow checker so I'm going to use a default shader
// if the user doesn't supply one.
let fs_spv = self.fragment_shader.unwrap_or(include_bytes!("default.frag.spv"));
let fs_spv = self
.fragment_shader
.unwrap_or(include_bytes!("default.frag.spv"));
let fs = create_shader_module(device, fs_spv);
let pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
layout: &layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs,
entry_point: "main",
},
fragment_stage: Some(
wgpu::ProgrammableStageDescriptor {
module: &fs,
entry_point: "main",
}
),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: self.front_face,
cull_mode: self.cull_mode,
depth_bias: self.depth_bias,
depth_bias_slope_scale: self.depth_bias_slope_scale,
depth_bias_clamp: self.depth_bias_clamp,
}),
primitive_topology: self.primitive_topology,
color_states: &self.color_states,
depth_stencil_state: self.depth_stencil_state.clone(),
vertex_state: wgpu::VertexStateDescriptor {
index_format: self.index_format,
vertex_buffers: &self.vertex_buffers,
},
sample_count: self.sample_count,
sample_mask: self.sample_mask,
alpha_to_coverage_enabled: self.alpha_to_coverage_enabled,
}
);
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: self.front_face,
cull_mode: self.cull_mode,
depth_bias: self.depth_bias,
depth_bias_slope_scale: self.depth_bias_slope_scale,
depth_bias_clamp: self.depth_bias_clamp,
}),
primitive_topology: self.primitive_topology,
color_states: &self.color_states,
depth_stencil_state: self.depth_stencil_state.clone(),
vertex_state: wgpu::VertexStateDescriptor {
index_format: self.index_format,
vertex_buffers: &self.vertex_buffers,
},
sample_count: self.sample_count,
sample_mask: self.sample_mask,
alpha_to_coverage_enabled: self.alpha_to_coverage_enabled,
});
Ok(pipeline)
}
}
fn create_shader_module(device: &wgpu::Device, spirv: &[u8]) -> wgpu::ShaderModule {
device.create_shader_module(
&wgpu::read_spirv(
std::io::Cursor::new(
spirv
)
).unwrap()
)
}
device.create_shader_module(&wgpu::read_spirv(std::io::Cursor::new(spirv)).unwrap())
}

@ -23,12 +23,16 @@ impl Texture {
// The label currently can only be 64 characters, so we'll need
// to use just the file name for the label.
let label = path_copy.file_name().unwrap().to_str();
let img = image::open(path)?;
Self::from_image(device, &img, label, is_normal_map)
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor, label: &str) -> Self {
pub fn create_depth_texture(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
label: &str,
) -> Self {
let size = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
@ -43,7 +47,7 @@ impl Texture {
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::COPY_SRC,
};
let texture = device.create_texture(&desc);
@ -61,14 +65,18 @@ impl Texture {
compare: wgpu::CompareFunction::LessEqual,
});
Self { texture, view, sampler }
Self {
texture,
view,
sampler,
}
}
#[allow(dead_code)]
pub fn from_bytes(
device: &wgpu::Device,
bytes: &[u8],
label: &str,
device: &wgpu::Device,
bytes: &[u8],
label: &str,
is_normal_map: bool,
) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::load_from_memory(bytes)?;
@ -86,7 +94,7 @@ impl Texture {
if dimensions.0 == 0 || dimensions.1 == 0 {
bail!(
"Image {} has invalid dimensions! {:?}",
"Image {} has invalid dimensions! {:?}",
label.unwrap_or("UNAMED_IMAGE"),
dimensions
)
@ -119,16 +127,11 @@ impl Texture {
};
let texture = device.create_texture(&texture_desc);
let buffer = device.create_buffer_with_data(
&rgba,
wgpu::BufferUsage::COPY_SRC,
);
let buffer = device.create_buffer_with_data(&rgba, wgpu::BufferUsage::COPY_SRC);
let mut encoder = device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("texture_buffer_copy_encoder"),
}
);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("texture_buffer_copy_encoder"),
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
@ -136,24 +139,24 @@ impl Texture {
offset: 0,
bytes_per_row: 4 * dimensions.0,
rows_per_image: dimensions.1,
},
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
},
size,
);
// Make sure to do this after you've copied the buffer
// to the texture, other wise your mipmaps will be black.
Self::generate_mipmaps(
&mut encoder,
&device,
&texture,
&texture_desc,
mip_level_count
&mut encoder,
&device,
&texture,
&texture_desc,
mip_level_count,
);
let cmd_buffer = encoder.finish();
@ -170,8 +173,15 @@ impl Texture {
lod_max_clamp: 100.0,
compare: wgpu::CompareFunction::LessEqual,
});
Ok((Self { texture, view, sampler }, cmd_buffer))
Ok((
Self {
texture,
view,
sampler,
},
cmd_buffer,
))
}
pub fn generate_mipmaps(
@ -181,34 +191,28 @@ impl Texture {
texture_desc: &wgpu::TextureDescriptor,
mip_count: u32,
) {
let bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
component_type: wgpu::TextureComponentType::Float,
dimension: wgpu::TextureViewDimension::D2,
}
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
component_type: wgpu::TextureComponentType::Float,
dimension: wgpu::TextureViewDimension::D2,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
}
}
],
label: None,
}
);
let pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&bind_group_layout],
},
);
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false },
},
],
label: None,
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&bind_group_layout],
});
// This pipeline will render out a texture to another texture.
// We create the mipmaps by rendering to increasingly smaller
@ -220,30 +224,29 @@ impl Texture {
.fragment_shader(include_bytes!("blit.frag.spv"))
// Using wgpu::TriangleStrip makes our lives easier in the shader.
.primitive_topology(wgpu::PrimitiveTopology::TriangleStrip)
.build(device).unwrap();
.build(device)
.unwrap();
// This sampler ensures that the smaller textures get the right
// color data.
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
// Since we are using this sampler to generate mipmaps,
// we don't need it the use level of detail values.
lod_min_clamp: 0.0,
lod_max_clamp: 0.0,
compare: wgpu::CompareFunction::Always,
}
);
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
// Since we are using this sampler to generate mipmaps,
// we don't need it the use level of detail values.
lod_min_clamp: 0.0,
lod_max_clamp: 0.0,
compare: wgpu::CompareFunction::Always,
});
// Create a view for every mip level.
let views = (0..mip_count).map(|mip| {
texture.create_view(
&wgpu::TextureViewDescriptor {
let views = (0..mip_count)
.map(|mip| {
texture.create_view(&wgpu::TextureViewDescriptor {
format: texture_desc.format,
dimension: wgpu::TextureViewDimension::D2,
aspect: wgpu::TextureAspect::All,
@ -251,44 +254,38 @@ impl Texture {
level_count: 1,
base_array_layer: 0,
array_layer_count: 1,
}
)
}).collect::<Vec<_>>();
})
})
.collect::<Vec<_>>();
// Skip the first view, as that is the base one
for target_mip in 1..mip_count as usize {
let bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
// Bind to the view before this one
resource: wgpu::BindingResource::TextureView(&views[target_mip - 1]),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
}
],
label: None,
},
);
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
// Bind to the view before this one
resource: wgpu::BindingResource::TextureView(&views[target_mip - 1]),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: None,
});
let mut pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &views[target_mip],
resolve_target: None,
clear_color: wgpu::Color::WHITE,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
}
],
depth_stencil_attachment: None,
},
);
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &views[target_mip],
resolve_target: None,
clear_color: wgpu::Color::WHITE,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
}],
depth_stencil_attachment: None,
});
pass.set_pipeline(&blit_pipeline);
pass.set_bind_group(0, &bind_group, &[]);

@ -1,34 +1,35 @@
use std::mem;
use wgpu::util::{DeviceExt, BufferInitDescriptor};
use wgpu::util::{BufferInitDescriptor, DeviceExt};
pub trait ToRaw {
type Output;
fn to_raw(&self) -> Self::Output;
}
pub struct RawBuffer<R>
where R: Copy + bytemuck::Pod + bytemuck::Zeroable
pub struct RawBuffer<R>
where
R: Copy + bytemuck::Pod + bytemuck::Zeroable,
{
pub buffer: wgpu::Buffer,
pub data: Vec<R>,
}
impl<R: Copy + bytemuck::Pod + bytemuck::Zeroable> RawBuffer<R> {
pub fn from_slice<T: ToRaw<Output=R>>(device: &wgpu::Device, data: &[T], usage: wgpu::BufferUsage) -> Self {
pub fn from_slice<T: ToRaw<Output = R>>(
device: &wgpu::Device,
data: &[T],
usage: wgpu::BufferUsage,
) -> Self {
let raw_data = data.iter().map(ToRaw::to_raw).collect::<Vec<R>>();
Self::from_vec(device, raw_data, usage)
}
pub fn from_vec(device: &wgpu::Device, data: Vec<R>, usage: wgpu::BufferUsage) -> Self {
let buffer = device.create_buffer_init(
&BufferInitDescriptor {
contents: bytemuck::cast_slice(&data),
usage,
label: None,
}
);
let buffer = device.create_buffer_init(&BufferInitDescriptor {
contents: bytemuck::cast_slice(&data),
usage,
label: None,
});
Self::from_parts(buffer, data, usage)
}
@ -41,13 +42,13 @@ impl<R: Copy + bytemuck::Pod + bytemuck::Zeroable> RawBuffer<R> {
}
}
pub struct Buffer<U: ToRaw<Output=R>, R: Copy + bytemuck::Pod + bytemuck::Zeroable> {
pub struct Buffer<U: ToRaw<Output = R>, R: Copy + bytemuck::Pod + bytemuck::Zeroable> {
pub data: Vec<U>,
pub raw_buffer: RawBuffer<R>,
pub usage: wgpu::BufferUsage,
}
impl<U: ToRaw<Output=R>, R: Copy + bytemuck::Pod + bytemuck::Zeroable> Buffer<U, R> {
impl<U: ToRaw<Output = R>, R: Copy + bytemuck::Pod + bytemuck::Zeroable> Buffer<U, R> {
pub fn uniform(device: &wgpu::Device, datum: U) -> Self {
let data = vec![datum];
let usage = wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST;
@ -61,7 +62,9 @@ impl<U: ToRaw<Output=R>, R: Copy + bytemuck::Pod + bytemuck::Zeroable> Buffer<U,
pub fn staging(device: &wgpu::Device, other: &Self) -> Self {
let buffer_size = other.raw_buffer.buffer_size();
let usage = wgpu::BufferUsage::COPY_SRC | wgpu::BufferUsage::MAP_READ | wgpu::BufferUsage::MAP_WRITE;
let usage = wgpu::BufferUsage::COPY_SRC
| wgpu::BufferUsage::MAP_READ
| wgpu::BufferUsage::MAP_WRITE;
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
size: buffer_size,
usage,
@ -78,6 +81,10 @@ impl<U: ToRaw<Output=R>, R: Copy + bytemuck::Pod + bytemuck::Zeroable> Buffer<U,
}
pub fn from_parts(data: Vec<U>, raw_buffer: RawBuffer<R>, usage: wgpu::BufferUsage) -> Self {
Self { data, raw_buffer, usage }
Self {
data,
raw_buffer,
usage,
}
}
}
}

@ -1,8 +1,8 @@
use cgmath::*;
use winit::event::*;
use winit::dpi::LogicalPosition;
use std::time::Duration;
use std::f32::consts::FRAC_PI_2;
use std::time::Duration;
use winit::dpi::LogicalPosition;
use winit::event::*;
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
@ -12,11 +12,7 @@ pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
0.0, 0.0, 0.5, 1.0,
);
pub fn camera_setup<
V: Into<Point3<f32>>,
Y: Into<Rad<f32>>,
P: Into<Rad<f32>>,
>(
pub fn camera_setup<V: Into<Point3<f32>>, Y: Into<Rad<f32>>, P: Into<Rad<f32>>>(
position: V,
yaw: Y,
pitch: P,
@ -38,11 +34,7 @@ pub struct Camera {
}
impl Camera {
pub fn new<
V: Into<Point3<f32>>,
Y: Into<Rad<f32>>,
P: Into<Rad<f32>>,
>(
pub fn new<V: Into<Point3<f32>>, Y: Into<Rad<f32>>, P: Into<Rad<f32>>>(
position: V,
yaw: Y,
pitch: P,
@ -57,11 +49,7 @@ impl Camera {
pub fn calc_matrix(&self) -> Matrix4<f32> {
Matrix4::look_at_dir(
self.position,
Vector3::new(
self.yaw.0.cos(),
self.pitch.0.sin(),
self.yaw.0.sin(),
).normalize(),
Vector3::new(self.yaw.0.cos(), self.pitch.0.sin(), self.yaw.0.sin()).normalize(),
Vector3::unit_y(),
)
}
@ -75,13 +63,7 @@ pub struct Projection {
}
impl Projection {
pub fn new<F: Into<Rad<f32>>>(
width: u32,
height: u32,
fovy: F,
znear: f32,
zfar: f32,
) -> Self {
pub fn new<F: Into<Rad<f32>>>(width: u32, height: u32, fovy: F, znear: f32, zfar: f32) -> Self {
Self {
aspect: width as f32 / height as f32,
fovy: fovy.into(),
@ -131,8 +113,12 @@ impl CameraController {
}
}
pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool{
let amount = if state == ElementState::Pressed { 1.0 } else { 0.0 };
pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool {
let amount = if state == ElementState::Pressed {
1.0
} else {
0.0
};
match key {
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.amount_forward = amount;
@ -171,10 +157,7 @@ impl CameraController {
self.scroll = -match delta {
// I'm assuming a line is about 100 pixels
MouseScrollDelta::LineDelta(_, scroll) => scroll * 100.0,
MouseScrollDelta::PixelDelta(LogicalPosition {
y: scroll,
..
}) => *scroll as f32,
MouseScrollDelta::PixelDelta(LogicalPosition { y: scroll, .. }) => *scroll as f32,
};
}
@ -193,7 +176,8 @@ impl CameraController {
// changes when zooming. I've added this to make it easier
// to get closer to an object you want to focus on.
let (pitch_sin, pitch_cos) = camera.pitch.0.sin_cos();
let scrollward = Vector3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
let scrollward =
Vector3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
// Move up/down. Since we don't use roll, we can just
@ -217,4 +201,4 @@ impl CameraController {
camera.pitch = Rad(FRAC_PI_2);
}
}
}
}

@ -3,8 +3,8 @@ mod camera;
mod light;
mod model;
mod pipeline;
mod texture;
pub mod prelude;
mod texture;
pub use buffer::*;
pub use camera::*;
@ -34,20 +34,24 @@ impl Display {
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None,
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None,
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
@ -73,7 +77,6 @@ impl Display {
}
}
/**
* Holds the camera data to be passed to wgpu.
*/
@ -98,13 +101,11 @@ impl Uniforms {
view_position: Zero::zero(),
view_proj: cgmath::Matrix4::identity(),
};
let buffer = device.create_buffer_init(
&BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[data]),
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::UNIFORM,
}
);
let buffer = device.create_buffer_init(&BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[data]),
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::UNIFORM,
});
Self { data, buffer }
}
@ -115,13 +116,11 @@ impl Uniforms {
}
pub fn update_buffer(&self, device: &wgpu::Device, encoder: &mut wgpu::CommandEncoder) {
let staging_buffer = device.create_buffer_init(
&BufferInitDescriptor {
label: Some("Uniform Update Buffer"),
contents: bytemuck::cast_slice(&[self.data]),
usage: wgpu::BufferUsage::COPY_SRC,
}
);
let staging_buffer = device.create_buffer_init(&BufferInitDescriptor {
label: Some("Uniform Update Buffer"),
contents: bytemuck::cast_slice(&[self.data]),
usage: wgpu::BufferUsage::COPY_SRC,
});
encoder.copy_buffer_to_buffer(
&staging_buffer,
0,
@ -143,51 +142,39 @@ pub struct UniformBinding {
impl UniformBinding {
pub fn new(device: &wgpu::Device, uniforms: &Uniforms) -> Self {
let layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
],
label: Some("UniformBinding::layout"),
}
);
let bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniforms.buffer.slice(..)),
},
],
label: Some("UniformBinding::bind_group")
}
);
let layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
}],
label: Some("UniformBinding::layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniforms.buffer.slice(..)),
}],
label: Some("UniformBinding::bind_group"),
});
Self { layout, bind_group }
}
pub fn rebind(&mut self, device: &wgpu::Device, uniforms: &Uniforms) {
self.bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
layout: &self.layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniforms.buffer.slice(..))
},
],
label: Some("UniformBinding::bind_group")
}
);
self.bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &self.layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniforms.buffer.slice(..)),
}],
label: Some("UniformBinding::bind_group"),
});
}
}
@ -220,13 +207,15 @@ pub async fn run<D: Demo>() -> Result<(), Error> {
match event {
Event::Resumed => is_resumed = true,
Event::Suspended => is_resumed = false,
Event::RedrawRequested(wid) => if wid == window.id() {
let now = Instant::now();
let dt = now - last_update;
last_update = now;
demo.update(&mut display, dt);
demo.render(&mut display);
Event::RedrawRequested(wid) => {
if wid == window.id() {
let now = Instant::now();
let dt = now - last_update;
last_update = now;
demo.update(&mut display, dt);
demo.render(&mut display);
}
}
Event::MainEventsCleared => {
if is_focused && is_resumed {
@ -237,28 +226,25 @@ pub async fn run<D: Demo>() -> Result<(), Error> {
}
}
Event::WindowEvent {
event,
window_id,
..
} => if window_id == window.id() {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::Focused(f) => is_focused = f,
WindowEvent::ScaleFactorChanged {
new_inner_size,
..
} => {
display.resize(new_inner_size.width, new_inner_size.height);
demo.resize(&mut display);
event, window_id, ..
} => {
if window_id == window.id() {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::Focused(f) => is_focused = f,
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
display.resize(new_inner_size.width, new_inner_size.height);
demo.resize(&mut display);
}
WindowEvent::Resized(new_inner_size) => {
display.resize(new_inner_size.width, new_inner_size.height);
demo.resize(&mut display);
}
_ => {}
}
WindowEvent::Resized(new_inner_size) => {
display.resize(new_inner_size.width, new_inner_size.height);
demo.resize(&mut display);
}
_ => {}
}
}
_ => {}
}
});
}
}

@ -24,13 +24,11 @@ impl Light {
position: Vector4::new(position.x, position.y, position.z, 1.0),
color: Vector4::new(color.x, color.y, color.z, 1.0),
};
let buffer = device.create_buffer_init(
&BufferInitDescriptor {
contents: bytemuck::cast_slice(&[data]),
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::UNIFORM,
label: Some("Light Buffer"),
}
);
let buffer = device.create_buffer_init(&BufferInitDescriptor {
contents: bytemuck::cast_slice(&[data]),
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::UNIFORM,
label: Some("Light Buffer"),
});
Self { data, buffer }
}
@ -39,4 +37,4 @@ impl Light {
pub struct LightBinding {
pub layout: wgpu::BindGroupLayout,
pub bind_group: wgpu::BindGroup,
}
}

@ -69,9 +69,9 @@ pub struct Material<'a> {
impl<'a> Material<'a> {
pub fn new(
device: &wgpu::Device,
name: &str,
diffuse_texture: texture::Texture<'a>,
device: &wgpu::Device,
name: &str,
diffuse_texture: texture::Texture<'a>,
normal_texture: texture::Texture<'a>,
layout: &wgpu::BindGroupLayout,
) -> Self {
@ -98,7 +98,7 @@ impl<'a> Material<'a> {
label: Some(name),
});
Self {
Self {
name: String::from(name),
diffuse_texture,
normal_texture,
@ -130,16 +130,17 @@ impl<'a> Model<'a> {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref(), true)?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent()
.context("Directory has no parent")?;
let containing_folder = path.as_ref().parent().context("Directory has no parent")?;
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let diffuse_texture = texture::Texture::load(device, queue, containing_folder.join(diffuse_path), false)?;
let diffuse_texture =
texture::Texture::load(device, queue, containing_folder.join(diffuse_path), false)?;
let normal_path = mat.normal_texture;
let normal_texture = texture::Texture::load(device, queue, containing_folder.join(normal_path), true)?;
let normal_texture =
texture::Texture::load(device, queue, containing_folder.join(normal_path), true)?;
materials.push(Material::new(
device,
@ -159,16 +160,15 @@ impl<'a> Model<'a> {
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
].into(),
tex_coords: [
m.mesh.texcoords[i * 2],
m.mesh.texcoords[i * 2 + 1]
].into(),
]
.into(),
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]].into(),
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
].into(),
]
.into(),
// We'll calculate these later
tangent: [0.0; 3].into(),
bitangent: [0.0; 3].into(),
@ -206,12 +206,12 @@ impl<'a> Model<'a> {
// give us the tangent and bitangent.
// delta_pos1 = delta_uv1.x * T + delta_u.y * B
// delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
// Luckily, the place I found this equation provided
// Luckily, the place I found this equation provided
// the solution!
let r = 1.0 / (delta_uv1 .x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * r;
// We'll use the same tangent/bitangent for each vertex in the triangle
vertices[c[0] as usize].tangent = tangent;
vertices[c[1] as usize].tangent = tangent;
@ -222,20 +222,16 @@ impl<'a> Model<'a> {
vertices[c[2] as usize].bitangent = bitangent;
}
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
}
);
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsage::INDEX,
});
meshes.push(Mesh {
name: m.name,

@ -1,5 +1,5 @@
use anyhow::*;
use crate::model::Vertex;
use anyhow::*;
pub struct RenderPipelineBuilder<'a> {
layout: Option<&'a wgpu::PipelineLayout>,
@ -69,7 +69,6 @@ impl<'a> RenderPipelineBuilder<'a> {
self
}
#[allow(dead_code)]
pub fn depth_bias(&mut self, db: i32) -> &mut Self {
self.depth_bias = db;
@ -101,14 +100,12 @@ impl<'a> RenderPipelineBuilder<'a> {
/// Helper method for [RenderPipelineBuilder::color_state]
pub fn color_solid(&mut self, format: wgpu::TextureFormat) -> &mut Self {
self.color_state(
wgpu::ColorStateDescriptor {
format,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
color_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}
)
self.color_state(wgpu::ColorStateDescriptor {
format,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
color_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
})
}
pub fn depth_stencil_state(&mut self, dss: wgpu::DepthStencilStateDescriptor) -> &mut Self {
@ -123,14 +120,12 @@ impl<'a> RenderPipelineBuilder<'a> {
depth_write_enabled: bool,
depth_compare: wgpu::CompareFunction,
) -> &mut Self {
self.depth_stencil_state(
wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled,
depth_compare,
stencil: Default::default(),
}
)
self.depth_stencil_state(wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled,
depth_compare,
stencil: Default::default(),
})
}
/// Helper method for [RenderPipelineBuilder::depth_no_stencil]
@ -180,7 +175,7 @@ impl<'a> RenderPipelineBuilder<'a> {
let layout = self.layout.unwrap();
// Render pipelines always have a vertex shader, but due
// to the way the builder pattern works, we can't
// to the way the builder pattern works, we can't
// guarantee that the user will specify one, so we'll
// just return an error if they forgot.
//
@ -190,54 +185,60 @@ impl<'a> RenderPipelineBuilder<'a> {
if self.vertex_shader.is_none() {
bail!("No vertex shader supplied!")
}
let vs = create_shader_module(device, self.vertex_shader.take().context("Please include a vertex shader")?);
let vs = create_shader_module(
device,
self.vertex_shader
.take()
.context("Please include a vertex shader")?,
);
// The fragment shader is optional (IDK why, but it is).
// Having the shader be optional is giving me issues with
// the borrow checker so I'm going to use a default shader
// if the user doesn't supply one.
let fs_spv = self.fragment_shader.take().context("Please include a fragment shader")?;
let fs_spv = self
.fragment_shader
.take()
.context("Please include a fragment shader")?;
let fs = create_shader_module(device, fs_spv);
let pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs,
entry_point: "main",
},
fragment_stage: Some(
wgpu::ProgrammableStageDescriptor {
module: &fs,
entry_point: "main",
}
),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: self.front_face,
cull_mode: self.cull_mode,
depth_bias: self.depth_bias,
depth_bias_slope_scale: self.depth_bias_slope_scale,
depth_bias_clamp: self.depth_bias_clamp,
clamp_depth: false,
}),
primitive_topology: self.primitive_topology,
color_states: &self.color_states,
depth_stencil_state: self.depth_stencil_state.clone(),
vertex_state: wgpu::VertexStateDescriptor {
index_format: self.index_format,
vertex_buffers: &self.vertex_buffers,
},
sample_count: self.sample_count,
sample_mask: self.sample_mask,
alpha_to_coverage_enabled: self.alpha_to_coverage_enabled,
}
);
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: self.front_face,
cull_mode: self.cull_mode,
depth_bias: self.depth_bias,
depth_bias_slope_scale: self.depth_bias_slope_scale,
depth_bias_clamp: self.depth_bias_clamp,
clamp_depth: false,
}),
primitive_topology: self.primitive_topology,
color_states: &self.color_states,
depth_stencil_state: self.depth_stencil_state.clone(),
vertex_state: wgpu::VertexStateDescriptor {
index_format: self.index_format,
vertex_buffers: &self.vertex_buffers,
},
sample_count: self.sample_count,
sample_mask: self.sample_mask,
alpha_to_coverage_enabled: self.alpha_to_coverage_enabled,
});
Ok(pipeline)
}
}
fn create_shader_module(device: &wgpu::Device, spirv: wgpu::ShaderModuleSource) -> wgpu::ShaderModule {
fn create_shader_module(
device: &wgpu::Device,
spirv: wgpu::ShaderModuleSource,
) -> wgpu::ShaderModule {
device.create_shader_module(spirv)
}
}

@ -1 +1 @@
pub use crate::model::{DrawLight, DrawModel};
pub use crate::model::{DrawLight, DrawModel};

@ -1,11 +1,10 @@
use anyhow::*;
use image::GenericImageView;
use std::path::Path;
use std::mem;
use anyhow::*;
use std::path::Path;
use crate::buffer;
pub struct Texture<'a> {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
@ -28,10 +27,7 @@ impl<'a> Texture<'a> {
Self::from_image(device, queue, &img, Some(label), is_normal_map)
}
pub fn from_descriptor(
device: &wgpu::Device,
desc: wgpu::TextureDescriptor<'a>
) -> Self {
pub fn from_descriptor(device: &wgpu::Device, desc: wgpu::TextureDescriptor<'a>) -> Self {
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
@ -48,7 +44,12 @@ impl<'a> Texture<'a> {
..Default::default()
});
Self { texture, view, sampler, desc }
Self {
texture,
view,
sampler,
desc,
}
}
pub fn from_bytes(
@ -59,7 +60,7 @@ impl<'a> Texture<'a> {
bytes: &[u8],
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, label, is_normal_map)
Self::from_image(device, queue, &img, label, is_normal_map)
}
pub fn from_image(
@ -120,11 +121,19 @@ impl<'a> Texture<'a> {
compare: Some(wgpu::CompareFunction::Always),
..Default::default()
});
Ok(Self { texture, view, sampler, desc })
Ok(Self {
texture,
view,
sampler,
desc,
})
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> Self {
pub fn create_depth_texture(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
) -> Self {
let desc = wgpu::TextureDescriptor {
label: None,
size: wgpu::Extent3d {
@ -141,10 +150,10 @@ impl<'a> Texture<'a> {
Self::from_descriptor(device, desc)
}
pub fn prepare_buffer_rgba(&self, device: &wgpu::Device) -> buffer::RawBuffer<[f32;4]> {
pub fn prepare_buffer_rgba(&self, device: &wgpu::Device) -> buffer::RawBuffer<[f32; 4]> {
let num_pixels = self.desc.size.width * self.desc.size.height * self.desc.size.depth;
let buffer_size = num_pixels * mem::size_of::<[f32;4]>() as u32;
let buffer_size = num_pixels * mem::size_of::<[f32; 4]>() as u32;
let buffer_usage = wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::MAP_READ;
let buffer_desc = wgpu::BufferDescriptor {
size: buffer_size as wgpu::BufferAddress,
@ -160,4 +169,4 @@ impl<'a> Texture<'a> {
raw_buffer
}
}
}

@ -4,7 +4,7 @@ use fs_extra::dir::CopyOptions;
use glob::glob;
use std::env;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -15,7 +15,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -29,34 +30,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert")?,
glob("./src/**/*.frag")?,
glob("./src/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -65,11 +68,11 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
@ -88,4 +91,4 @@ fn main() -> Result<()> {
}
Ok(())
}
}

@ -5,18 +5,22 @@ use std::{iter, mem};
async fn run() {
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions::default()
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions::default())
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let colors = [
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.2],
@ -40,8 +44,7 @@ async fn run() {
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::OUTPUT_ATTACHMENT,
usage: wgpu::TextureUsage::COPY_SRC | wgpu::TextureUsage::OUTPUT_ATTACHMENT,
label: None,
};
let render_target = framework::Texture::from_descriptor(&device, rt_desc);
@ -50,7 +53,7 @@ async fn run() {
// wgpu::COPY_BYTES_PER_ROW_ALIGNMENT. Because of this we'll
// need to save both the padded_bytes_per_row as well as the
// unpadded_bytes_per_row
let pixel_size = mem::size_of::<[u8;4]>() as u32;
let pixel_size = mem::size_of::<[u8; 4]>() as u32;
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
let unpadded_bytes_per_row = pixel_size * texture_size;
let padding = (align - unpadded_bytes_per_row % align) % align;
@ -72,28 +75,23 @@ async fn run() {
let mut frames = Vec::new();
for c in &colors {
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: None,
});
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &render_target.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(
wgpu::Color {
r: c[0],
g: c[1],
b: c[2],
a: 1.0,
}
),
store: true,
},
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &render_target.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: c[0],
g: c[1],
b: c[2],
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -107,51 +105,50 @@ async fn run() {
texture: &render_target.texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
},
wgpu::BufferCopyView {
buffer: &output_buffer,
layout: wgpu::TextureDataLayout {
offset: 0,
bytes_per_row: padded_bytes_per_row,
rows_per_image: texture_size,
}
},
},
render_target.desc.size
render_target.desc.size,
);
queue.submit(iter::once(encoder.finish()));
// Create the map request
let buffer_slice = output_buffer.slice(..);
let request = buffer_slice.map_async(wgpu::MapMode::Read);
// wait for the GPU to finish
device.poll(wgpu::Maintain::Wait);
let result = request.await;
match result {
Ok(()) => {
let padded_data = buffer_slice.get_mapped_range();
let data = padded_data
.chunks(padded_bytes_per_row as _)
.map(|chunk| { &chunk[..unpadded_bytes_per_row as _]})
.map(|chunk| &chunk[..unpadded_bytes_per_row as _])
.flatten()
.map(|x| { *x })
.map(|x| *x)
.collect::<Vec<_>>();
drop(padded_data);
output_buffer.unmap();
frames.push(data);
}
_ => { eprintln!("Something went wrong") }
_ => eprintln!("Something went wrong"),
}
}
save_gif("output.gif", &mut frames, 10, texture_size as u16).unwrap();
}
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<()> {
use gif::{Frame, Encoder, Repeat, SetParameter};
use gif::{Encoder, Frame, Repeat, SetParameter};
let mut image = std::fs::File::create(path)?;
let mut encoder = Encoder::new(&mut image, size, size, &[])?;
encoder.set(Repeat::Infinite)?;
@ -163,8 +160,10 @@ fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Res
Ok(())
}
fn create_render_pipeline(device: &wgpu::Device, target: &framework::Texture) -> wgpu::RenderPipeline {
fn create_render_pipeline(
device: &wgpu::Device,
target: &framework::Texture,
) -> wgpu::RenderPipeline {
let vs_src = wgpu::include_spirv!("shader.vert.spv");
let fs_src = wgpu::include_spirv!("shader.frag.spv");
let vs_module = device.create_shader_module(vs_src);
@ -189,14 +188,12 @@ fn create_render_pipeline(device: &wgpu::Device, target: &framework::Texture) ->
}),
rasterization_state: None,
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: target.desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
color_states: &[wgpu::ColorStateDescriptor {
format: target.desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,

@ -4,29 +4,20 @@ version = "0.1.0"
authors = ["Ben Hansen <bhbenjaminhansen@gmail.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0"
bytemuck = "1.3"
image = "0.23"
framework = { path = "../framework"}
bytemuck = "1.4"
cgmath = "0.17"
env_logger = "0.7"
futures = "0.3"
rand = "0.7"
tobj = "2"
image = "0.23"
log = "0.4"
tobj = "2.0"
wgpu = "0.6"
winit = "0.22"
wgpu = "0.5"
[dependencies.cgmath]
version = "0.17"
features = ["swizzle"]
[build-dependencies]
failure = "0.1"
fs_extra = "1.1"
anyhow = "1.0"
fs_extra = "1.2"
glob = "0.3"
shaderc = "0.6"
[[bin]]
name = "instancing-storage-buffers"
path = "src/storage_buffers.rs"
shaderc = "0.6"

@ -1,10 +1,10 @@
use glob::glob;
use failure::bail;
use fs_extra::copy_items;
use fs_extra::dir::CopyOptions;
use glob::glob;
use std::env;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
fn main() {
copy_res();
@ -26,20 +26,19 @@ fn copy_res() {
fn compile_shaders() {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=src/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./src/**/*.vert").unwrap(),
glob("./src/**/*.frag").unwrap(),
glob("./src/**/*.comp").unwrap(),
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result.unwrap()).unwrap()
})
.map(|glob_result| ShaderData::load(glob_result.unwrap()).unwrap())
.collect::<Vec<ShaderData>>();
let mut compiler = shaderc::Compiler::new().unwrap();
@ -50,13 +49,15 @@ fn compile_shaders() {
// be better just to only compile shaders that have been changed
// recently.
for shader in shaders {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
).unwrap();
let compiled = compiler
.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)
.unwrap();
write(shader.spv_path, compiled.as_binary_u8()).unwrap();
}
@ -83,6 +84,11 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
}

@ -4,7 +4,7 @@ use fs_extra::dir::CopyOptions;
use glob::glob;
use std::env;
use std::fs::{read_to_string, write};
use std::path::{PathBuf};
use std::path::PathBuf;
struct ShaderData {
src: String,
@ -15,7 +15,8 @@ struct ShaderData {
impl ShaderData {
pub fn load(src_path: PathBuf) -> Result<Self> {
let extension = src_path.extension()
let extension = src_path
.extension()
.context("File has no extension")?
.to_str()
.context("Extension cannot be converted to &str")?;
@ -29,34 +30,36 @@ impl ShaderData {
let src = read_to_string(src_path.clone())?;
let spv_path = src_path.with_extension(format!("{}.spv", extension));
Ok(Self { src, src_path, spv_path, kind })
Ok(Self {
src,
src_path,
spv_path,
kind,
})
}
}
fn main() -> Result<()> {
// This tells cargo to rerun this script if something in /src/ changes.
println!("cargo:rerun-if-changed=res/*");
// Collect all shaders recursively within /src/
let mut shader_paths = [
glob("./res/**/*.vert")?,
glob("./res/**/*.frag")?,
glob("./res/**/*.comp")?,
];
// This could be parallelized
let shaders = shader_paths.iter_mut()
let shaders = shader_paths
.iter_mut()
.flatten()
.map(|glob_result| {
ShaderData::load(glob_result?)
})
.map(|glob_result| ShaderData::load(glob_result?))
.collect::<Vec<Result<_>>>()
.into_iter()
.collect::<Result<Vec<_>>>();
let mut compiler = shaderc::Compiler::new()
.context("Unable to create shader compiler")?;
let mut compiler = shaderc::Compiler::new().context("Unable to create shader compiler")?;
// This can't be parallelized. The [shaderc::Compiler] is not
// thread safe. Also, it creates a lot of resources. You could
@ -65,11 +68,11 @@ fn main() -> Result<()> {
// recently.
for shader in shaders? {
let compiled = compiler.compile_into_spirv(
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None
&shader.src,
shader.kind,
&shader.src_path.to_str().unwrap(),
"main",
None,
)?;
write(shader.spv_path, compiled.as_binary_u8())?;
}
@ -88,4 +91,4 @@ fn main() -> Result<()> {
}
Ok(())
}
}

@ -1,4 +1,4 @@
use winit::event::{VirtualKeyCode, ElementState};
use winit::event::{ElementState, VirtualKeyCode};
#[derive(Debug, Default)]
pub struct Input {
@ -37,7 +37,7 @@ impl Input {
self.enter_pressed = pressed;
true
}
_ => false
_ => false,
}
}
@ -48,4 +48,4 @@ impl Input {
pub fn ui_down_pressed(&self) -> bool {
self.p1_down_pressed || self.p2_down_pressed
}
}
}

@ -1,18 +1,17 @@
mod input;
mod render;
mod sound;
mod state;
mod util;
mod system;
mod sound;
mod input;
mod util;
use system::System;
use input::Input;
use system::System;
use winit::event::*;
use winit::window::{WindowBuilder, Fullscreen};
use winit::event_loop::{EventLoop, ControlFlow};
use futures::executor::block_on;
use winit::event::*;
use winit::event_loop::{ControlFlow, EventLoop};
use winit::window::{Fullscreen, WindowBuilder};
fn main() {
let event_loop = EventLoop::new();
@ -22,7 +21,8 @@ fn main() {
.with_visible(false)
.with_title("Pong")
.with_fullscreen(Some(Fullscreen::Exclusive(video_mode.clone())))
.build(&event_loop).unwrap();
.build(&event_loop)
.unwrap();
window.set_cursor_visible(false);
let mut render = block_on(render::Render::new(&window, &video_mode));
@ -124,14 +124,16 @@ fn main() {
state.game_state = state::GameState::Quiting;
}
Event::WindowEvent {
event: WindowEvent::KeyboardInput {
input: KeyboardInput {
state: element_state,
virtual_keycode: Some(key),
event:
WindowEvent::KeyboardInput {
input:
KeyboardInput {
state: element_state,
virtual_keycode: Some(key),
..
},
..
},
..
},
..
} => {
let input_handled = match state.game_state {
@ -145,8 +147,7 @@ fn main() {
Event::RedrawRequested(_) => {
for event in &events {
match event {
state::Event::FocusChanged
| state::Event::ButtonPressed => {
state::Event::FocusChanged | state::Event::ButtonPressed => {
sound_system.queue(sound_pack.bounce());
}
state::Event::BallBounce(_pos) => {
@ -154,8 +155,7 @@ fn main() {
}
state::Event::Score(_) => {
sound_system.queue(sound_pack.bounce());
}
// _ => {}
} // _ => {}
}
}
events.clear();
@ -167,14 +167,14 @@ fn main() {
if state.game_state == state::GameState::Serving {
serving_system.start(&mut state);
}
},
}
state::GameState::Serving => {
serving_system.update_state(&input, &mut state, &mut events);
play_system.update_state(&input, &mut state, &mut events);
if state.game_state == state::GameState::Playing {
play_system.start(&mut state);
}
},
}
state::GameState::Playing => {
ball_system.update_state(&input, &mut state, &mut events);
play_system.update_state(&input, &mut state, &mut events);
@ -183,14 +183,14 @@ fn main() {
} else if state.game_state == state::GameState::GameOver {
game_over_system.start(&mut state);
}
},
}
state::GameState::GameOver => {
game_over_system.update_state(&input, &mut state, &mut events);
if state.game_state == state::GameState::MainMenu {
menu_system.start(&mut state);
}
},
state::GameState::Quiting => {},
}
state::GameState::Quiting => {}
}
render.render_state(&state);
@ -203,11 +203,10 @@ fn main() {
});
}
fn process_input(
element_state: ElementState,
keycode: VirtualKeyCode,
control_flow: &mut ControlFlow,
control_flow: &mut ControlFlow,
) {
match (keycode, element_state) {
(VirtualKeyCode::Escape, ElementState::Pressed) => {
@ -215,4 +214,4 @@ fn process_input(
}
_ => {}
}
}
}

@ -1,5 +1,5 @@
use crate::util::size_of_slice;
use crate::state;
use crate::util::size_of_slice;
use wgpu::util::{BufferInitDescriptor, DeviceExt};
pub const U32_SIZE: wgpu::BufferAddress = std::mem::size_of::<u32>() as wgpu::BufferAddress;
@ -45,7 +45,7 @@ impl QuadBufferBuilder {
let min_y = ball.position.y - ball.radius;
let max_x = ball.position.x + ball.radius;
let max_y = ball.position.y + ball.radius;
self.push_quad(min_x, min_y, max_x, max_y)
} else {
self
@ -55,10 +55,10 @@ impl QuadBufferBuilder {
pub fn push_player(self, player: &state::Player) -> Self {
if player.visible {
self.push_quad(
player.position.x - player.size.x * 0.5,
player.position.y - player.size.y * 0.5,
player.position.x - player.size.x * 0.5,
player.position.y - player.size.y * 0.5,
player.position.x + player.size.x * 0.5,
player.position.y + player.size.y * 0.5,
player.position.y + player.size.y * 0.5,
)
} else {
self
@ -109,14 +109,12 @@ pub struct StagingBuffer {
impl StagingBuffer {
pub fn new<T: bytemuck::Pod + Sized>(device: &wgpu::Device, data: &[T]) -> StagingBuffer {
StagingBuffer {
buffer: device.create_buffer_init(
&BufferInitDescriptor {
contents: bytemuck::cast_slice(data),
usage: wgpu::BufferUsage::COPY_SRC,
label: Some("Staging Buffer"),
}
),
size: size_of_slice(data) as wgpu::BufferAddress
buffer: device.create_buffer_init(&BufferInitDescriptor {
contents: bytemuck::cast_slice(data),
usage: wgpu::BufferUsage::COPY_SRC,
label: Some("Staging Buffer"),
}),
size: size_of_slice(data) as wgpu::BufferAddress,
}
}

@ -2,9 +2,9 @@ mod buffer;
use std::iter;
use winit::window::{Window};
use winit::monitor::{VideoMode};
use wgpu_glyph::{ab_glyph, Section, Text};
use winit::monitor::VideoMode;
use winit::window::Window;
use buffer::*;
@ -43,20 +43,24 @@ impl Render {
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
).await.unwrap();
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
).await.unwrap();
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let size = video_mode.size();
let sc_desc = wgpu::SwapChainDescriptor {
@ -68,19 +72,17 @@ impl Render {
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let pipeline_layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[],
push_constant_ranges: &[],
label: Some("Pipeline Layout"),
}
);
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[],
push_constant_ranges: &[],
label: Some("Pipeline Layout"),
});
let pipeline = create_render_pipeline(
&device,
&pipeline_layout,
sc_desc.format,
&[Vertex::DESC],
wgpu::include_spirv!("../../res/shaders/textured.vert.spv"),
&device,
&pipeline_layout,
sc_desc.format,
&[Vertex::DESC],
wgpu::include_spirv!("../../res/shaders/textured.vert.spv"),
wgpu::include_spirv!("../../res/shaders/textured.frag.spv"),
);
@ -90,7 +92,7 @@ impl Render {
usage: wgpu::BufferUsage::VERTEX | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let index_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: U32_SIZE * 6 * 3,
@ -99,8 +101,8 @@ impl Render {
});
let font = ab_glyph::FontArc::try_from_slice(FONT_BYTES).unwrap();
let glyph_brush = wgpu_glyph::GlyphBrushBuilder::using_font(font)
.build(&device, sc_desc.format);
let glyph_brush =
wgpu_glyph::GlyphBrushBuilder::using_font(font).build(&device, sc_desc.format);
let staging_belt = wgpu::util::StagingBelt::new(1024);
Self {
@ -119,49 +121,44 @@ impl Render {
}
pub fn render_state(&mut self, state: &state::State) {
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: None,
});
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
let num_indices = if state.ball.visible
|| state.player1.visible
|| state.player2.visible
{
let num_indices = if state.ball.visible || state.player1.visible || state.player2.visible {
let (stg_vertex, stg_index, num_indices) = QuadBufferBuilder::new()
.push_ball(&state.ball)
.push_player(&state.player1)
.push_player(&state.player2)
.build(&self.device);
stg_vertex.copy_to_buffer(&mut encoder, &self.vertex_buffer);
stg_index.copy_to_buffer(&mut encoder, &self.index_buffer);
num_indices
} else {
0
};
match self.swap_chain.get_current_frame() {
Ok(frame) => {
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.output.view,
resolve_target: None,
ops: wgpu::Operations::default(),
},
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.output.view,
resolve_target: None,
ops: wgpu::Operations::default(),
}],
depth_stencil_attachment: None,
});
if num_indices != 0 {
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(self.index_buffer.slice(..));
render_pass.set_pipeline(&self.pipeline);
render_pass.draw_indexed(0..num_indices, 0, 0..1);
}
drop(render_pass);
if state.title_text.visible {
draw_text(&state.title_text, &mut self.glyph_brush);
}
@ -180,16 +177,18 @@ impl Render {
if state.win_text.visible {
draw_text(&state.win_text, &mut self.glyph_brush);
}
self.glyph_brush.draw_queued(
&self.device,
&mut self.staging_belt,
&mut encoder,
&frame.output.view,
self.sc_desc.width,
self.sc_desc.height,
).unwrap();
self.glyph_brush
.draw_queued(
&self.device,
&mut self.staging_belt,
&mut encoder,
&frame.output.view,
self.sc_desc.width,
self.sc_desc.height,
)
.unwrap();
self.staging_belt.finish();
self.queue.submit(iter::once(encoder.finish()));
}
@ -203,43 +202,37 @@ impl Render {
}
}
fn draw_text(
text: &state::Text,
glyph_brush: &mut wgpu_glyph::GlyphBrush<()>,
) {
let layout = wgpu_glyph::Layout::default()
.h_align(
if text.centered {
wgpu_glyph::HorizontalAlign::Center
} else {
wgpu_glyph::HorizontalAlign::Left
}
);
let section = Section {
screen_position: text.position.into(),
bounds: text.bounds.into(),
layout,
..Section::default()
}.add_text(
Text::new(&text.text)
.with_color(text.color)
.with_scale(if text.focused {
fn draw_text(text: &state::Text, glyph_brush: &mut wgpu_glyph::GlyphBrush<()>) {
let layout = wgpu_glyph::Layout::default().h_align(if text.centered {
wgpu_glyph::HorizontalAlign::Center
} else {
wgpu_glyph::HorizontalAlign::Left
});
let section =
Section {
screen_position: text.position.into(),
bounds: text.bounds.into(),
layout,
..Section::default()
}
.add_text(Text::new(&text.text).with_color(text.color).with_scale(
if text.focused {
text.size + 8.0
} else {
text.size
})
);
},
));
glyph_brush.queue(section);
}
fn create_render_pipeline(
device: &wgpu::Device,
device: &wgpu::Device,
layout: &wgpu::PipelineLayout,
color_format: wgpu::TextureFormat,
vertex_descs: &[wgpu::VertexBufferDescriptor],
vs_src: wgpu::ShaderModuleSource,
vertex_descs: &[wgpu::VertexBufferDescriptor],
vs_src: wgpu::ShaderModuleSource,
fs_src: wgpu::ShaderModuleSource,
) -> wgpu::RenderPipeline {
let vs_module = device.create_shader_module(vs_src);

@ -15,12 +15,8 @@ impl SoundSystem {
let sink = rodio::Sink::new(&device);
sink.set_volume(0.5);
let spatial_sink = rodio::SpatialSink::new(
&device,
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
);
let spatial_sink =
rodio::SpatialSink::new(&device, [0.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]);
Self {
device,
@ -30,8 +26,8 @@ impl SoundSystem {
}
#[inline]
pub fn queue<S>(&self, sound: S)
where
pub fn queue<S>(&self, sound: S)
where
S: rodio::Source + Send + 'static,
S::Item: rodio::Sample,
S::Item: Send,
@ -65,4 +61,4 @@ impl SoundPack {
pub fn bounce(&self) -> rodio::Decoder<Cursor<&'static [u8]>> {
rodio::Decoder::new(self.bounce.clone()).unwrap()
}
}
}

@ -1,4 +1,3 @@
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum GameState {
MainMenu,
@ -50,10 +49,7 @@ impl Player {
let b_min = ball.position - b_radii;
let b_max = ball.position + b_radii;
min.x < b_max.x
&& max.x > b_min.x
&& min.y < b_max.y
&& max.y > b_min.y
min.x < b_max.x && max.x > b_min.x && min.y < b_max.y && max.y > b_min.y
}
}
@ -115,4 +111,4 @@ mod test {
assert!(p1.contains(&ball));
}
}
}

@ -1,14 +1,14 @@
use crate::state::{self, GameState};
use crate::input;
use crate::state::{self, GameState};
use crate::util;
pub trait System {
#[allow(unused_variables)]
fn start(&mut self, state: &mut state::State) {}
fn update_state(
&self,
input: &input::Input,
state: &mut state::State,
&self,
input: &input::Input,
state: &mut state::State,
events: &mut Vec<state::Event>,
);
}
@ -16,14 +16,19 @@ pub trait System {
pub struct VisibilitySystem;
impl System for VisibilitySystem {
fn update_state(
&self,
_input: &input::Input,
state: &mut state::State,
&self,
_input: &input::Input,
state: &mut state::State,
_events: &mut Vec<state::Event>,
) {
let gs = state.game_state;
let is_in_game = any!(gs, GameState::Serving, GameState::Playing, GameState::GameOver);
let is_in_game = any!(
gs,
GameState::Serving,
GameState::Playing,
GameState::GameOver
);
state.ball.visible = is_in_game && gs != GameState::GameOver;
state.player1.visible = is_in_game;
state.player1_score.visible = is_in_game;
@ -52,12 +57,11 @@ impl System for MenuSystem {
}
fn update_state(
&self,
&self,
input: &input::Input,
state: &mut state::State,
events: &mut Vec<state::Event>
state: &mut state::State,
events: &mut Vec<state::Event>,
) {
if state.play_button.focused && input.ui_down_pressed() {
events.push(state::Event::FocusChanged);
state.play_button.focused = false;
@ -80,12 +84,11 @@ impl System for MenuSystem {
pub struct PlaySystem;
impl System for PlaySystem {
fn update_state(
&self,
&self,
input: &input::Input,
state: &mut state::State,
_events: &mut Vec<state::Event>,
_events: &mut Vec<state::Event>,
) {
// move the players
if input.p1_up_pressed {
@ -119,14 +122,13 @@ impl System for PlaySystem {
}
}
pub struct BallSystem;
impl System for BallSystem {
fn update_state(
&self,
&self,
_input: &input::Input,
state: &mut state::State,
state: &mut state::State,
events: &mut Vec<state::Event>,
) {
// bounce the ball off the players
@ -164,7 +166,6 @@ impl System for BallSystem {
}
}
pub struct ServingSystem {
last_time: std::time::Instant,
}
@ -190,7 +191,7 @@ impl System for ServingSystem {
fn update_state(
&self,
_input: &input::Input,
state: &mut state::State,
state: &mut state::State,
_events: &mut Vec<state::Event>,
) {
let current_time = std::time::Instant::now();
@ -207,7 +208,7 @@ pub struct GameOverSystem {
impl GameOverSystem {
pub fn new() -> Self {
Self {
Self {
last_time: std::time::Instant::now(),
}
}
@ -219,7 +220,7 @@ impl System for GameOverSystem {
state.player1_score.text = format!("{}", state.player1.score);
state.player2_score.text = format!("{}", state.player2.score);
state.win_text.text = if state.player1.score > state.player2.score {
String::from("Player 1 wins!")
} else {
@ -227,7 +228,8 @@ impl System for GameOverSystem {
};
}
fn update_state(&self,
fn update_state(
&self,
_input: &input::Input,
state: &mut state::State,
_events: &mut Vec<state::Event>,
@ -238,4 +240,4 @@ impl System for GameOverSystem {
state.game_state = state::GameState::MainMenu;
}
}
}
}

@ -29,7 +29,6 @@ macro_rules! any {
};
}
#[cfg(test)]
mod test {
#[allow(unused_imports)]
@ -55,4 +54,4 @@ mod test {
state::GameState::Quiting,
));
}
}
}

@ -5,7 +5,9 @@ async fn run() {
compatible_surface: None,
},
wgpu::BackendBit::PRIMARY,
).await.unwrap();
)
.await
.unwrap();
let (device, queue) = adapter.request_device(&Default::default()).await;
let texture_size = 256u32;
@ -20,9 +22,7 @@ async fn run() {
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::OUTPUT_ATTACHMENT
,
usage: wgpu::TextureUsage::COPY_SRC | wgpu::TextureUsage::OUTPUT_ATTACHMENT,
label: None,
};
let texture = device.create_texture(&texture_desc);
@ -44,8 +44,24 @@ async fn run() {
let vs_src = include_str!("shader.vert");
let fs_src = include_str!("shader.frag");
let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(vs_src, shaderc::ShaderKind::Vertex, "shader.vert", "main", None).unwrap();
let fs_spirv = compiler.compile_into_spirv(fs_src, shaderc::ShaderKind::Fragment, "shader.frag", "main", None).unwrap();
let vs_spirv = compiler
.compile_into_spirv(
vs_src,
shaderc::ShaderKind::Vertex,
"shader.vert",
"main",
None,
)
.unwrap();
let fs_spirv = compiler
.compile_into_spirv(
fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)
.unwrap();
let vs_data = wgpu::read_spirv(std::io::Cursor::new(vs_spirv.as_binary_u8())).unwrap();
let fs_data = wgpu::read_spirv(std::io::Cursor::new(fs_spirv.as_binary_u8())).unwrap();
let vs_module = device.create_shader_module(&vs_data);
@ -73,14 +89,12 @@ async fn run() {
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: texture_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
color_states: &[wgpu::ColorStateDescriptor {
format: texture_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
@ -91,26 +105,23 @@ async fn run() {
alpha_to_coverage_enabled: false,
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: None,
});
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let render_pass_desc = wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &texture_view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &texture_view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}],
depth_stencil_attachment: None,
};
let mut render_pass = encoder.begin_render_pass(&render_pass_desc);
@ -141,16 +152,12 @@ async fn run() {
// the application will freeze.
let mapping = output_buffer.map_read(0, output_buffer_size);
device.poll(wgpu::Maintain::Wait);
let result = mapping.await.unwrap();
let data = result.as_slice();
use image::{ImageBuffer, Rgba};
let buffer = ImageBuffer::<Rgba<u8>, _>::from_raw(
texture_size,
texture_size,
data,
).unwrap();
let buffer = ImageBuffer::<Rgba<u8>, _>::from_raw(texture_size, texture_size, data).unwrap();
buffer.save("image.png").unwrap();
}
@ -158,4 +165,4 @@ async fn run() {
fn main() {
use futures::executor::block_on;
block_on(run());
}
}

@ -1 +1 @@
Subproject commit e5b27f1c61c59e17b315e1044ae582e67943ee29
Subproject commit d764e13f7b3afe4455cb376ae3eb562c001c6752
Loading…
Cancel
Save