migrated tutorial7

pull/29/head
Ben Hansen 4 years ago
parent 66eeae019c
commit a7833f3c16

10
Cargo.lock generated

@ -2009,12 +2009,14 @@ dependencies = [
name = "tutorial7-instancing"
version = "0.1.0"
dependencies = [
"bytemuck 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.23.4 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.22.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -2028,7 +2030,7 @@ dependencies = [
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.23.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.5.0 (git+https://github.com/gfx-rs/wgpu-rs.git)",
"wgpu 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.22.1 (registry+https://github.com/rust-lang/crates.io-index)",
]

@ -7,12 +7,14 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
image = "0.22"
winit = "0.20"
image = "0.23.4"
winit = "0.22.1"
glsl-to-spirv = "0.1"
wgpu = "0.4"
cgmath = "0.17"
failure = "0.1"
wgpu = "0.5.0"
futures = "0.3.4"
bytemuck = "1.2.0"
[[bin]]
name = "tutorial7-instancing"

@ -1,608 +0,0 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
model: [cgmath::Matrix4<f32>; NUM_INSTANCES as usize],
}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
model: [cgmath::Matrix4::identity(); NUM_INSTANCES as usize],
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
struct CameraController {
speed: f32,
is_up_pressed: bool,
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
VirtualKeyCode::LShift => {
self.is_down_pressed = is_pressed;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = (camera.target - camera.eye).normalize();
if self.is_forward_pressed {
camera.eye += forward * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward * self.speed;
}
let right = forward.cross(camera.up);
if self.is_right_pressed {
camera.eye += right * self.speed;
}
if self.is_left_pressed {
camera.eye -= right * self.speed;
}
}
}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
fn to_matrix(&self) -> cgmath::Matrix4<f32> {
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&Default::default()).unwrap();
let (device, mut queue) = adapter.request_device(&Default::default());
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
let diffuse_rgba = diffuse_image.as_rgba8().unwrap();
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(diffuse_rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&diffuse_rgba);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
},
],
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
}
]
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
}
],
});
let vs_src = include_str!("arrays.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let num_indices = INDICES.len() as u32;
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
})
}).collect();
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
camera,
camera_controller,
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
for (i, instance) in self.instances.iter().enumerate() {
self.uniforms.model[i] = instance.to_matrix();
}
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..NUM_INSTANCES);
}
self.queue.submit(&[
encoder.finish()
]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
}
Event::MainEventsCleared => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
});
}

@ -5,6 +5,8 @@ use winit::{
};
use cgmath::prelude::*;
mod texture;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
@ -12,6 +14,9 @@ struct Vertex {
tex_coords: [f32; 2],
}
unsafe impl bytemuck::Pod for Vertex {}
unsafe impl bytemuck::Zeroable for Vertex {}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
@ -35,11 +40,11 @@ impl Vertex {
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
@ -51,7 +56,7 @@ const INDICES: &[u16] = &[
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
@ -81,6 +86,9 @@ struct Uniforms {
model: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
Self {
@ -205,9 +213,7 @@ struct State {
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_texture: texture::Texture,
diffuse_bind_group: wgpu::BindGroup,
camera: Camera,
@ -222,108 +228,58 @@ struct State {
}
impl State {
fn new(window: &Window) -> Self {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY, // Vulkan + Metal + DX12 + Browser WebGPU
).await.unwrap();
let adapter = wgpu::Adapter::request(&wgpu::RequestAdapterOptions {
..Default::default()
}).unwrap();
let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor {
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
});
}).await;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
let diffuse_rgba = diffuse_image.as_rgba8().unwrap();
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(diffuse_rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&diffuse_rgba);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let (diffuse_texture, cmd_buffer) = texture::Texture::from_bytes(&device, diffuse_bytes, "happy-tree.png").unwrap();
queue.submit(&[cmd_buffer]);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
},
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -331,17 +287,18 @@ impl State {
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
eye: (0.0, 5.0, 10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
@ -354,20 +311,22 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let uniform_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[uniforms]),
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
}
]
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -379,8 +338,9 @@ impl State {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
}
},
],
label: Some("uniform_bind_group"),
});
let vs_src = include_str!("naive.vert");
@ -423,21 +383,25 @@ impl State {
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(VERTICES),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(INDICES),
wgpu::BufferUsage::INDEX,
);
let num_indices = INDICES.len() as u32;
const NUM_INSTANCES_PER_ROW: u32 = 10;
@ -472,8 +436,6 @@ impl State {
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
camera,
camera_controller,
@ -486,7 +448,7 @@ impl State {
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
async fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
@ -499,28 +461,30 @@ impl State {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
async fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("update encoder"),
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
async fn render(&mut self) {
let frame = self.swap_chain.get_next_texture()
.expect("Timeout getting texture");
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("Render Encoder"),
});
{
@ -545,9 +509,10 @@ impl State {
for instance in &self.instances {
self.uniforms.model = instance.to_matrix();
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
@ -571,8 +536,8 @@ impl State {
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_index_buffer(&self.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..1);
}
@ -588,16 +553,17 @@ fn main() {
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
use futures::executor::block_on;
// Since main can't be async, we're going to need to block
let mut state = block_on(State::new(&window));
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
@ -610,26 +576,29 @@ fn main() {
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
block_on(state.resize(*physical_size));
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
// new_inner_size is &mut so w have to dereference it twice
block_on(state.resize(**new_inner_size));
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
}
Event::RedrawRequested(_) => {
block_on(state.update());
block_on(state.render());
}
Event::MainEventsCleared => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
});
}

@ -1,632 +0,0 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
struct CameraController {
speed: f32,
is_up_pressed: bool,
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
VirtualKeyCode::LShift => {
self.is_down_pressed = is_pressed;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = (camera.target - camera.eye).normalize();
if self.is_forward_pressed {
camera.eye += forward * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward * self.speed;
}
let right = forward.cross(camera.up);
if self.is_right_pressed {
camera.eye += right * self.speed;
}
if self.is_left_pressed {
camera.eye -= right * self.speed;
}
}
}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
fn to_matrix(&self) -> cgmath::Matrix4<f32> {
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
instance_buffer: wgpu::Buffer,
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&wgpu::RequestAdapterOptions {
..Default::default()
}).unwrap();
let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
});
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
let diffuse_rgba = diffuse_image.as_rgba8().unwrap();
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(diffuse_rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&diffuse_rgba);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
},
],
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_matrix).collect::<Vec<_>>();
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ)
.fill_from_slice(&instance_data);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
}
}
]
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Buffer {
buffer: &instance_buffer,
range: 0..instance_buffer_size as wgpu::BufferAddress,
}
}
],
});
let vs_src = include_str!("storage.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let num_indices = INDICES.len() as u32;
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
camera,
camera_controller,
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
instance_buffer,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
}
self.queue.submit(&[
encoder.finish()
]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
}
Event::MainEventsCleared => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
});
}

@ -1,271 +1,29 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
use image::GenericImageView;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler,
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
}
impl Texture {
pub fn from_bytes(device: &wgpu::Device, bytes: &[u8], label: &str) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, &img, Some(label))
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
struct CameraController {
speed: f32,
is_up_pressed: bool,
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
VirtualKeyCode::LShift => {
self.is_down_pressed = is_pressed;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = (camera.target - camera.eye).normalize();
if self.is_forward_pressed {
camera.eye += forward * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward * self.speed;
}
let right = forward.cross(camera.up);
if self.is_right_pressed {
camera.eye += right * self.speed;
}
if self.is_left_pressed {
camera.eye -= right * self.speed;
}
}
}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
fn to_matrix(&self) -> cgmath::Matrix4<f32> {
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
instance_texture: wgpu::Texture,
instance_texture_view: wgpu::TextureView,
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&wgpu::RequestAdapterOptions {
..Default::default()
}).unwrap();
let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
});
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
let diffuse_rgba = diffuse_image.as_rgba8().unwrap();
pub fn from_image(device: &wgpu::Device, img: &image::DynamicImage, label: Option<&str>) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let rgba = img.as_rgba8().unwrap();
let dimensions = img.dimensions();
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let size3d = wgpu::Extent3d {
let size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
@ -274,412 +32,49 @@ impl State {
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(diffuse_rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&diffuse_rgba);
let buffer = device.create_buffer_with_data(
&rgba,
wgpu::BufferUsage::COPY_SRC,
);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("texture_buffer_copy_encoder"),
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
buffer: &buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
bytes_per_row: 4 * dimensions.0,
rows_per_image: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
texture: &texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
},
size,
);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
},
],
});
let cmd_buffer = encoder.finish();
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_matrix).collect::<Vec<_>>();
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&instance_data);
let instance_extent = wgpu::Extent3d {
width: instance_data.len() as u32 * 4,
height: 1,
depth: 1,
};
let instance_texture = device.create_texture(&wgpu::TextureDescriptor {
size: instance_extent,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D1,
format: wgpu::TextureFormat::Rgba32Float,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let instance_texture_view = instance_texture.create_default_view();
let instance_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &instance_buffer,
offset: 0,
row_pitch: std::mem::size_of::<f32>() as u32 * 4,
image_height: instance_data.len() as u32 * 4,
},
wgpu::TextureCopyView {
texture: &instance_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
instance_extent,
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D1,
}
},
wgpu::BindGroupLayoutBinding {
binding: 2,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Sampler,
},
]
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::TextureView(&instance_texture_view),
},
wgpu::Binding {
binding: 2,
resource: wgpu::BindingResource::Sampler(&instance_sampler),
},
],
compare: wgpu::CompareFunction::Always,
});
let vs_src = include_str!("texture.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let num_indices = INDICES.len() as u32;
queue.submit(&[encoder.finish()]);
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
camera,
camera_controller,
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
instance_texture,
instance_texture_view,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
}
self.queue.submit(&[
encoder.finish()
]);
Ok((Self { texture, view, sampler }, cmd_buffer))
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
}
Event::MainEventsCleared => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
});
}
#[allow(dead_code)]
fn main() {}

@ -1,659 +0,0 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
trait VBDesc {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a>;
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl VBDesc for Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
const NUM_INSTANCES_PER_ROW: u32 = 10;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
struct CameraController {
speed: f32,
is_up_pressed: bool,
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
VirtualKeyCode::LShift => {
self.is_down_pressed = is_pressed;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = (camera.target - camera.eye).normalize();
if self.is_forward_pressed {
camera.eye += forward * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward * self.speed;
}
let right = forward.cross(camera.up);
if self.is_right_pressed {
camera.eye += right * self.speed;
}
if self.is_left_pressed {
camera.eye -= right * self.speed;
}
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct InstanceRaw {
model: cgmath::Matrix4<f32>,
}
const FLOAT_SIZE: wgpu::BufferAddress = std::mem::size_of::<f32>() as wgpu::BufferAddress;
impl VBDesc for InstanceRaw {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
wgpu::VertexBufferDescriptor {
stride: std::mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Instance,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
format: wgpu::VertexFormat::Float4,
shader_location: 2,
},
wgpu::VertexAttributeDescriptor {
offset: FLOAT_SIZE * 4,
format: wgpu::VertexFormat::Float4,
shader_location: 3,
},
wgpu::VertexAttributeDescriptor {
offset: FLOAT_SIZE * 4 * 2,
format: wgpu::VertexFormat::Float4,
shader_location: 4,
},
wgpu::VertexAttributeDescriptor {
offset: FLOAT_SIZE * 4 * 3,
format: wgpu::VertexFormat::Float4,
shader_location: 5,
},
]
}
}
}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
fn to_raw(&self) -> InstanceRaw {
let model = cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation);
InstanceRaw { model }
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
instance_buffer: wgpu::Buffer,
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&wgpu::RequestAdapterOptions {
..Default::default()
}).unwrap();
let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
});
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
let diffuse_rgba = diffuse_image.as_rgba8().unwrap();
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(diffuse_rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&diffuse_rgba);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
},
],
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(&instance_data);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
]
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
},
],
});
let vs_src = include_str!("vertex.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(), InstanceRaw::desc(),
],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let num_indices = INDICES.len() as u32;
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
camera,
camera_controller,
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
instance_buffer,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0), (&self.instance_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
}
self.queue.submit(&[
encoder.finish()
]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
}
Event::MainEventsCleared => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
}
});
}

@ -14,6 +14,9 @@ struct Vertex {
tex_coords: [f32; 2],
}
unsafe impl bytemuck::Pod for Vertex {}
unsafe impl bytemuck::Zeroable for Vertex {}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
@ -53,7 +56,7 @@ const INDICES: &[u16] = &[
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
@ -86,6 +89,9 @@ struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
Self {
@ -193,11 +199,23 @@ struct Instance {
}
impl Instance {
fn to_matrix(&self) -> cgmath::Matrix4<f32> {
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)
fn to_matrix(&self) -> InstanceRaw {
let transform = cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation);
InstanceRaw {
transform,
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct InstanceRaw {
transform: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for InstanceRaw {}
unsafe impl bytemuck::Zeroable for InstanceRaw {}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
@ -257,51 +275,59 @@ fn quat_mul(q: cgmath::Quaternion<f32>, r: cgmath::Quaternion<f32>) -> cgmath::Q
}
impl State {
fn new(window: &Window) -> Self {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&wgpu::RequestAdapterOptions {
..Default::default()
}).unwrap();
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY, // Vulkan + Metal + DX12 + Browser WebGPU
).await.unwrap();
let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor {
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
});
}).await;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let (diffuse_texture, cmd_buffer) = texture::Texture::from_bytes(&device, diffuse_bytes).unwrap();
let (diffuse_texture, cmd_buffer) = texture::Texture::from_bytes(&device, diffuse_bytes, "happy-tree.png").unwrap();
queue.submit(&[cmd_buffer]);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
},
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -316,6 +342,7 @@ impl State {
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
@ -332,9 +359,10 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let uniform_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[uniforms]),
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
@ -356,20 +384,21 @@ impl State {
let instance_data = instances.iter().map(Instance::to_matrix).collect::<Vec<_>>();
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&instance_data);
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::STORAGE_READ | wgpu::BufferUsage::COPY_DST,
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
@ -377,7 +406,8 @@ impl State {
readonly: true,
}
}
]
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -398,6 +428,7 @@ impl State {
}
}
],
label: Some("uniform_bind_group"),
});
let vs_src = include_str!("challenge.vert");
@ -425,7 +456,7 @@ impl State {
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::None,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
@ -440,21 +471,25 @@ impl State {
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(VERTICES),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(INDICES),
wgpu::BufferUsage::INDEX,
);
let num_indices = INDICES.len() as u32;
Self {
@ -481,7 +516,7 @@ impl State {
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
async fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
@ -494,17 +529,18 @@ impl State {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
async fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("update encoder"),
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
@ -515,19 +551,21 @@ impl State {
}
let instance_data = self.instances.iter().map(Instance::to_matrix).collect::<Vec<_>>();
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = self.device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&instance_data);
let instance_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&instance_buffer, 0, &self.instance_buffer, 0, instance_buffer_size as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
async fn render(&mut self) {
let frame = self.swap_chain.get_next_texture()
.expect("Timeout getting texture");
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("Render Encoder"),
});
{
@ -552,8 +590,8 @@ impl State {
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_index_buffer(&self.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
}
@ -569,19 +607,17 @@ fn main() {
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
use futures::executor::block_on;
let mut old_time = std::time::Instant::now();
const MSPT: std::time::Duration = std::time::Duration::from_millis(20);
// Since main can't be async, we're going to need to block
let mut state = block_on(State::new(&window));
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
()
} else {
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
@ -594,34 +630,29 @@ fn main() {
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => (),
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
()
block_on(state.resize(*physical_size));
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
()
// new_inner_size is &mut so w have to dereference it twice
block_on(state.resize(**new_inner_size));
}
_ => (),
_ => {}
}
}
Event::RedrawRequested(_) => {
block_on(state.update());
block_on(state.render());
}
Event::MainEventsCleared => {
state.update();
state.render();
let new_time = std::time::Instant::now();
let delta_time = new_time - old_time;
*control_flow = if delta_time > MSPT {
ControlFlow::Poll
} else {
ControlFlow::WaitUntil(old_time + MSPT)
};
old_time = new_time;
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => (),
_ => {}
}
});
}

@ -18,6 +18,9 @@ struct Vertex {
tex_coords: [f32; 2],
}
unsafe impl bytemuck::Pod for Vertex {}
unsafe impl bytemuck::Zeroable for Vertex {}
impl VBDesc for Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
@ -41,11 +44,11 @@ impl VBDesc for Vertex {
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, -0.49240386, 0.0], tex_coords: [1.0 - 0.4131759, 1.0 - 0.00759614], }, // A
Vertex { position: [-0.49513406, -0.06958647, 0.0], tex_coords: [1.0 - 0.0048659444, 1.0 - 0.43041354], }, // B
Vertex { position: [-0.21918549, 0.44939706, 0.0], tex_coords: [1.0 - 0.28081453, 1.0 - 0.949397057], }, // C
Vertex { position: [0.35966998, 0.3473291, 0.0], tex_coords: [1.0 - 0.85967, 1.0 - 0.84732911], }, // D
Vertex { position: [0.44147372, -0.2347359, 0.0], tex_coords: [1.0 - 0.9414737, 1.0 - 0.2652641], }, // E
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
@ -57,7 +60,7 @@ const INDICES: &[u16] = &[
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
@ -90,6 +93,9 @@ struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
Self {
@ -195,6 +201,9 @@ struct InstanceRaw {
model: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for InstanceRaw {}
unsafe impl bytemuck::Zeroable for InstanceRaw {}
const FLOAT_SIZE: wgpu::BufferAddress = std::mem::size_of::<f32>() as wgpu::BufferAddress;
impl VBDesc for InstanceRaw {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
@ -269,51 +278,59 @@ struct State {
}
impl State {
fn new(window: &Window) -> Self {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&wgpu::RequestAdapterOptions {
..Default::default()
}).unwrap();
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY, // Vulkan + Metal + DX12 + Browser WebGPU
).await.unwrap();
let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor {
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
});
}).await;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let (diffuse_texture, cmd_buffer) = texture::Texture::from_bytes(&device, diffuse_bytes).unwrap();
let (diffuse_texture, cmd_buffer) = texture::Texture::from_bytes(&device, diffuse_bytes, "happy-tree.png").unwrap();
queue.submit(&[cmd_buffer]);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
},
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -328,10 +345,11 @@ impl State {
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 5.0, -10.0).into(),
eye: (0.0, 5.0, 10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
@ -344,9 +362,10 @@ impl State {
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device
.create_buffer_mapped(1, wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST)
.fill_from_slice(&[uniforms]);
let uniform_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[uniforms]),
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
@ -367,20 +386,22 @@ impl State {
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(&instance_data);
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::VERTEX,
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
]
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -394,6 +415,7 @@ impl State {
}
},
],
label: Some("uniform_bind_group"),
});
let vs_src = include_str!("shader.vert");
@ -436,21 +458,26 @@ impl State {
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(), InstanceRaw::desc(),
],
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
InstanceRaw::desc(),
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(VERTICES),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(INDICES),
wgpu::BufferUsage::INDEX,
);
let num_indices = INDICES.len() as u32;
Self {
@ -477,7 +504,7 @@ impl State {
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
async fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
@ -490,28 +517,30 @@ impl State {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
async fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("update encoder"),
});
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
async fn render(&mut self) {
let frame = self.swap_chain.get_next_texture()
.expect("Timeout getting texture");
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
todo: 0,
label: Some("Render Encoder"),
});
{
@ -536,8 +565,9 @@ impl State {
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0), (&self.instance_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_vertex_buffer(1, &self.instance_buffer, 0, 0);
render_pass.set_index_buffer(&self.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..self.instances.len() as u32);
}
@ -553,16 +583,17 @@ fn main() {
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
use futures::executor::block_on;
// Since main can't be async, we're going to need to block
let mut state = block_on(State::new(&window));
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if state.input(event) {
*control_flow = ControlFlow::Wait;
} else {
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
@ -575,26 +606,29 @@ fn main() {
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
block_on(state.resize(*physical_size));
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
// new_inner_size is &mut so w have to dereference it twice
block_on(state.resize(**new_inner_size));
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
}
Event::RedrawRequested(_) => {
block_on(state.update());
block_on(state.render());
}
Event::MainEventsCleared => {
state.update();
state.render();
*control_flow = ControlFlow::Wait;
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => *control_flow = ControlFlow::Wait,
_ => {}
}
});
}

@ -7,12 +7,12 @@ pub struct Texture {
}
impl Texture {
pub fn from_bytes(device: &wgpu::Device, bytes: &[u8]) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
pub fn from_bytes(device: &wgpu::Device, bytes: &[u8], label: &str) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, &img)
Self::from_image(device, &img, Some(label))
}
pub fn from_image(device: &wgpu::Device, img: &image::DynamicImage) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
pub fn from_image(device: &wgpu::Device, img: &image::DynamicImage, label: Option<&str>) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let rgba = img.as_rgba8().unwrap();
let dimensions = img.dimensions();
@ -22,6 +22,7 @@ impl Texture {
depth: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
array_layer_count: 1,
mip_level_count: 1,
@ -31,18 +32,21 @@ impl Texture {
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let buffer = device
.create_buffer_mapped(rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&rgba);
let buffer = device.create_buffer_with_data(
&rgba,
wgpu::BufferUsage::COPY_SRC,
);
let mut encoder = device.create_command_encoder(&Default::default());
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("texture_buffer_copy_encoder"),
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
bytes_per_row: 4 * dimensions.0,
rows_per_image: dimensions.1,
},
wgpu::TextureCopyView {
texture: &texture,
@ -65,7 +69,7 @@ impl Texture {
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
compare: wgpu::CompareFunction::Always,
});
Ok((Self { texture, view, sampler }, cmd_buffer))

@ -12,12 +12,10 @@ winit = "0.22"
glsl-to-spirv = "0.1"
cgmath = "0.17"
failure = "0.1"
tobj = "1"
tobj = "1.0.0"
bytemuck = "1.2"
futures = "0.3"
# wgpu = "0.5"
wgpu = { git = "https://github.com/gfx-rs/wgpu-rs.git" }
# zerocopy = "0.2.8"
futures = "0.3.4"
wgpu = "0.5.0"
[[bin]]
name = "tutorial8-depth"

@ -53,7 +53,7 @@ const INDICES: &[u16] = &[
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);

@ -14,6 +14,9 @@ struct Uniforms {
model: cgmath::Matrix4<f32>, // NEW!
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
use cgmath::SquareMatrix;
@ -110,9 +113,10 @@ If you run the program now, you won't see anything different. That's because we
for instance in &self.instances {
// 1.
self.uniforms.model = instance.to_matrix();
let staging_buffer = self.device
.create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&[self.uniforms]);
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
// 2.
@ -137,8 +141,8 @@ for instance in &self.instances {
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_index_buffer(&self.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..1);
}
```
@ -249,15 +253,38 @@ This technique has its drawbacks.
A storage buffer gives us the flexibility that arrays did not. We don't have to specify it's size in the shader, and we can even use a `Vec` to create it!
Since we're using `bytemuck` for casting our data to `&[u8]`, we're going to need to define a custom scruct to store the `cgmath::Matrix4`s. We need to do this because we can't implement `bytemuck::Pod`, and `bytemuck::Zeroable`, on `cgmath::Matrix4` because we don't that type.
```rust
// UPDATED!
impl Instance {
// This is changed from `to_matrix()`
fn to_raw(&self) -> cgmath::Matrix4<f32> {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
}
}
}
// NEW!
struct InstanceRaw {
model: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod InstanceRaw {}
unsafe impl bytemuck::Zeroable InstanceRaw {}
```
We create a storage buffer in a similar way as any other buffer.
```rust
let instance_data = instances.iter().map(Instance::to_matrix).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
// we'll need the size for later
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::STORAGE_READ)
.fill_from_slice(&instance_data);
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::STORAGE_READ,
);
```
To get this buffer into the shader, we'll need to attach it to a bind group. We'll use `uniform_bind_group` just to keep things simple.
@ -266,15 +293,16 @@ To get this buffer into the shader, we'll need to attach it to a bind group. We'
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
// ...
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
}
}
]
},
},
],
// ...
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -287,13 +315,15 @@ let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
buffer: &instance_buffer,
range: 0..instance_buffer_size as wgpu::BufferAddress,
}
}
},
],
});
```
*Note you'll probably need to shift your `instance_buffer` creation above the `uniform_bind_group` creation.*
We'll want to put `instance_buffer` into the `State` struct.
You don't need to change the draw call at all from the previous example, but we'll need to change the vertex shader.
```glsl
@ -326,7 +356,7 @@ This method is nice because it allows use to store more data overall as storage
Another benefit to storage buffers is that they can be written to by the shader, unlike uniform buffers. If we want to mutate a large amount of data with a compute shader, we'd use a writeable storage buffer for our output (and potentially input as well).
## Another better way - instance buffers
## Another better way - vertex buffers
When we created the `VertexBufferDescriptor` for our model, it required a `step_mode` field. We used `InputStepMode::Vertex`, this time we'll create a `VertexBufferDescriptor` for our `instance_buffer`.
@ -342,37 +372,6 @@ trait VBDesc {
To change `Vertex` to use this, we just have to swap `impl Vertex`, for `impl VBDesc for Vertex`.
Now we create `InstanceRaw`. It's pretty simple.
```rust
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct InstanceRaw {
model: cgmath::Matrix4<f32>,
}
```
We'll also want to change `Instance::to_matrix()` to `Instance::to_raw()`.
```rust
impl Instance {
fn to_raw(&self) -> InstanceRaw {
let model = cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation)
InstanceRaw { model }
}
}
```
Make sure to change any references to `to_matrix` to `to_raw` as well. We'll also want to change our `BufferUsage` to `VERTEX`.
```rust
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device
.create_buffer_mapped(instance_data.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(&instance_data);
```
With that done we can implement `VBDesc` for `InstanceRaw`.
```rust
@ -419,19 +418,34 @@ Now we need to add our a `VertexBufferDescriptor` to our `render_pipeline`.
```rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
// ...
vertex_buffers: &[
Vertex::desc(), InstanceRaw::desc(),
],
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
InstanceRaw::desc(), // NEW!
],
},
// ...
});
```
*You'll probably want to remove the `BindGroupLayoutBinding` and `Binding` from `uniform_bind_group_layout` and `uniform_bind_group` respectively, as we won't be accessing our buffer from there.*
We'll also want to change `instance_buffer`'s `BufferUsage` to `VERTEX`.
```rust
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::VERTEX,
);
```
This last thing we'll need to do from Rust is use our `instance_buffer` in the `render()` method.
```rust
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0), (&self.instance_buffer, 0)]);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_vertex_buffer(1, &self.instance_buffer, 0, 0); // NEW!
```
Now we get to the shader. We don't have to change much, we just make our shader reference our `instance_buffer` through the attributes rather than a uniform/buffer block.
@ -465,6 +479,8 @@ This seems like a really backwards way to do instancing. Storing non image data
If you're following along, it'd be best to start from the storage buffer example. We're going to modify it to take our `instance_buffer`, and copy it into a 1D `instance_texture`. First we need to create the texture.
* We won't use our `texture` module for this, though we could refactor it to store random data as a texture.
```rust
let instance_extent = wgpu::Extent3d {
width: instance_data.len() as u32 * 4,
@ -500,19 +516,22 @@ let instance_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
minmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
compare: wgpu::CompareFunction::Always,
});
```
Then we need to copy the `instance_buffer` to our `instance_texture`. *You may need to move the `queue.submit(&[encoder.finish()]);` line to use appease the borrow checker.*
Then we need to copy the `instance_buffer` to our `instance_texture`.
```rust
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("instance_texture_encoder"),
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &instance_buffer,
offset: 0,
row_pitch: std::mem::size_of::<f32>() as u32 * 4,
image_height: instance_data.len() * 4,
bytes_per_row: std::mem::size_of::<f32>() as u32 * 4,
rows_per_image: instance_data.len() as u32 * 4,
},
wgpu::TextureCopyView {
texture: &instance_texture,
@ -522,6 +541,7 @@ encoder.copy_buffer_to_texture(
},
instance_extent,
);
queue.submit(&[encoder.finish()]);
```
Now we need to add our texture and sampler to a bind group. Let with the storage buffer example, we'll use `uniform_bind_group` and its corresponding layout.
@ -530,20 +550,22 @@ Now we need to add our texture and sampler to a bind group. Let with the storage
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
// ...
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
component_type: wgpu::TextureComponentType::Uint,
dimension: wgpu::TextureViewDimension::D1,
}
},
wgpu::BindGroupLayoutBinding {
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Sampler,
},
]
],
// ...
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@ -559,6 +581,7 @@ let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
resource: wgpu::BindingResource::Sampler(&instance_sampler),
},
],
// ...
});
```
@ -595,6 +618,40 @@ void main() {
}
```
There's a couple of things we need to do before this method will work. First `instance_buffer` we'll need to be `BufferUsage::COPY_SRC`.
```rust
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::COPY_SRC, // UPDATED!
);
```
You'll need is to remove `InstanceRaw::desc()` from the `render_pipeline`'s `vertex_state`.
```rust
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
// ...
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
},
// ...
});
```
Lastly you'll want to store `instance_texture` in `State` to prevent it from being disposed of.
```rust
// new()
Self {
// ...
instance_texture,
}
```
That's a lot more work than the other method's, but it's still good to know that you can use textures store things other then color. This technique does come in handy when other solutions are not available, or not as performant. It's good to be aware of the possibilities!
For fun, here's what our matrix data looks like when converted into a texture (scaled up 10 times)!

Loading…
Cancel
Save