forgot to add examples

0.5
Ben Hansen 4 years ago
parent a7833f3c16
commit baf4377930

@ -0,0 +1,582 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
mod texture;
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
unsafe impl bytemuck::Pod for Vertex {}
unsafe impl bytemuck::Zeroable for Vertex {}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
model: [cgmath::Matrix4<f32>; NUM_INSTANCES as usize],
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
model: [cgmath::Matrix4::identity(); NUM_INSTANCES as usize],
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
struct CameraController {
speed: f32,
is_up_pressed: bool,
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
VirtualKeyCode::LShift => {
self.is_down_pressed = is_pressed;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = (camera.target - camera.eye).normalize();
if self.is_forward_pressed {
camera.eye += forward * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward * self.speed;
}
let right = forward.cross(camera.up);
if self.is_right_pressed {
camera.eye += right * self.speed;
}
if self.is_left_pressed {
camera.eye -= right * self.speed;
}
}
}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
fn to_matrix(&self) -> cgmath::Matrix4<f32> {
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: texture::Texture,
diffuse_bind_group: wgpu::BindGroup,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
}
impl State {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY, // Vulkan + Metal + DX12 + Browser WebGPU
).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
}).await;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let (diffuse_texture, cmd_buffer) = texture::Texture::from_bytes(&device, diffuse_bytes, "happy-tree.png").unwrap();
queue.submit(&[cmd_buffer]);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 5.0, 10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[uniforms]),
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
}
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
},
],
label: Some("uniform_bind_group"),
});
let vs_src = include_str!("arrays.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(VERTICES),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(INDICES),
wgpu::BufferUsage::INDEX,
);
let num_indices = INDICES.len() as u32;
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
})
}).collect();
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_bind_group,
camera,
camera_controller,
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
}
}
async fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
async fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
for (i, instance) in self.instances.iter().enumerate() {
self.uniforms.model[i] = instance.to_matrix();
}
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("update encoder"),
});
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
async fn render(&mut self) {
let frame = self.swap_chain.get_next_texture()
.expect("Timeout getting texture");
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_index_buffer(&self.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..NUM_INSTANCES);
}
self.queue.submit(&[
encoder.finish()
]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
use futures::executor::block_on;
// Since main can't be async, we're going to need to block
let mut state = block_on(State::new(&window));
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
block_on(state.resize(*physical_size));
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
block_on(state.resize(**new_inner_size));
}
_ => {}
}
}
Event::RedrawRequested(_) => {
block_on(state.update());
block_on(state.render());
}
Event::MainEventsCleared => {
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => {}
}
});
}

@ -0,0 +1,616 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
mod texture;
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
unsafe impl bytemuck::Pod for Vertex {}
unsafe impl bytemuck::Zeroable for Vertex {}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
struct CameraController {
speed: f32,
is_up_pressed: bool,
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
VirtualKeyCode::LShift => {
self.is_down_pressed = is_pressed;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = (camera.target - camera.eye).normalize();
if self.is_forward_pressed {
camera.eye += forward * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward * self.speed;
}
let right = forward.cross(camera.up);
if self.is_right_pressed {
camera.eye += right * self.speed;
}
if self.is_left_pressed {
camera.eye -= right * self.speed;
}
}
}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
// This is changed from `to_matrix()`
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct InstanceRaw {
model: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for InstanceRaw {}
unsafe impl bytemuck::Zeroable for InstanceRaw {}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: texture::Texture,
diffuse_bind_group: wgpu::BindGroup,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
#[allow(dead_code)]
instance_buffer: wgpu::Buffer,
}
impl State {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY, // Vulkan + Metal + DX12 + Browser WebGPU
).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
}).await;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let (diffuse_texture, cmd_buffer) = texture::Texture::from_bytes(&device, diffuse_bytes, "happy-tree.png").unwrap();
queue.submit(&[cmd_buffer]);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 5.0, 10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[uniforms]),
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
// we'll need the size for later
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::STORAGE_READ,
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
},
},
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Buffer {
buffer: &instance_buffer,
range: 0..instance_buffer_size as wgpu::BufferAddress,
}
},
],
label: Some("uniform_bind_group"),
});
let vs_src = include_str!("storage.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(VERTICES),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(INDICES),
wgpu::BufferUsage::INDEX,
);
let num_indices = INDICES.len() as u32;
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_bind_group,
camera,
camera_controller,
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
instance_buffer,
}
}
async fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
async fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("update encoder"),
});
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
async fn render(&mut self) {
let frame = self.swap_chain.get_next_texture()
.expect("Timeout getting texture");
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_index_buffer(&self.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..NUM_INSTANCES);
}
self.queue.submit(&[
encoder.finish()
]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
use futures::executor::block_on;
// Since main can't be async, we're going to need to block
let mut state = block_on(State::new(&window));
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
block_on(state.resize(*physical_size));
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
block_on(state.resize(**new_inner_size));
}
_ => {}
}
}
Event::RedrawRequested(_) => {
block_on(state.update());
block_on(state.render());
}
Event::MainEventsCleared => {
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => {}
}
});
}

@ -0,0 +1,711 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
mod texture;
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
trait VBDesc {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a>;
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
unsafe impl bytemuck::Pod for Vertex {}
unsafe impl bytemuck::Zeroable for Vertex {}
impl VBDesc for Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
struct CameraController {
speed: f32,
is_up_pressed: bool,
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
VirtualKeyCode::LShift => {
self.is_down_pressed = is_pressed;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = (camera.target - camera.eye).normalize();
if self.is_forward_pressed {
camera.eye += forward * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward * self.speed;
}
let right = forward.cross(camera.up);
if self.is_right_pressed {
camera.eye += right * self.speed;
}
if self.is_left_pressed {
camera.eye -= right * self.speed;
}
}
}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
// This is changed from `to_matrix()`
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct InstanceRaw {
model: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for InstanceRaw {}
unsafe impl bytemuck::Zeroable for InstanceRaw {}
const FLOAT_SIZE: wgpu::BufferAddress = std::mem::size_of::<f32>() as wgpu::BufferAddress;
impl VBDesc for InstanceRaw {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
wgpu::VertexBufferDescriptor {
stride: std::mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Instance, // 1.
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
format: wgpu::VertexFormat::Float4, // 2.
shader_location: 2, // 3.
},
wgpu::VertexAttributeDescriptor {
offset: FLOAT_SIZE * 4,
format: wgpu::VertexFormat::Float4,
shader_location: 3,
},
wgpu::VertexAttributeDescriptor {
offset: FLOAT_SIZE * 4 * 2,
format: wgpu::VertexFormat::Float4,
shader_location: 4,
},
wgpu::VertexAttributeDescriptor {
offset: FLOAT_SIZE * 4 * 3,
format: wgpu::VertexFormat::Float4,
shader_location: 5,
},
]
}
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
#[allow(dead_code)]
diffuse_texture: texture::Texture,
diffuse_bind_group: wgpu::BindGroup,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
#[allow(dead_code)]
instance_texture: wgpu::Texture,
}
impl State {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY, // Vulkan + Metal + DX12 + Browser WebGPU
).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
}).await;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let (diffuse_texture, cmd_buffer) = texture::Texture::from_bytes(&device, diffuse_bytes, "happy-tree.png").unwrap();
queue.submit(&[cmd_buffer]);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 5.0, 10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[uniforms]),
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
// we'll need the size for later
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::COPY_SRC,
);
let instance_extent = wgpu::Extent3d {
width: instance_data.len() as u32 * 4,
height: 1,
depth: 1,
};
let instance_texture = device.create_texture(&wgpu::TextureDescriptor {
size: instance_extent,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D1,
format: wgpu::TextureFormat::Rgba32Float,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("instance_texture"),
});
let instance_texture_view = instance_texture.create_default_view();
let instance_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare: wgpu::CompareFunction::Always,
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("instance_texture_encoder"),
});
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &instance_buffer,
offset: 0,
bytes_per_row: std::mem::size_of::<f32>() as u32 * 4,
rows_per_image: instance_data.len() as u32 * 4,
},
wgpu::TextureCopyView {
texture: &instance_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
instance_extent,
);
queue.submit(&[encoder.finish()]);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
component_type: wgpu::TextureComponentType::Uint,
dimension: wgpu::TextureViewDimension::D1,
}
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
},
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
},
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::TextureView(&instance_texture_view),
},
wgpu::Binding {
binding: 2,
resource: wgpu::BindingResource::Sampler(&instance_sampler),
},
],
label: Some("uniform_bind_group"),
});
let vs_src = include_str!("texture.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(VERTICES),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(INDICES),
wgpu::BufferUsage::INDEX,
);
let num_indices = INDICES.len() as u32;
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_bind_group,
camera,
camera_controller,
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
instance_texture,
}
}
async fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
async fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("update encoder"),
});
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
async fn render(&mut self) {
let frame = self.swap_chain.get_next_texture()
.expect("Timeout getting texture");
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_index_buffer(&self.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..NUM_INSTANCES);
}
self.queue.submit(&[
encoder.finish()
]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
use futures::executor::block_on;
// Since main can't be async, we're going to need to block
let mut state = block_on(State::new(&window));
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
block_on(state.resize(*physical_size));
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
block_on(state.resize(**new_inner_size));
}
_ => {}
}
}
Event::RedrawRequested(_) => {
block_on(state.update());
block_on(state.render());
}
Event::MainEventsCleared => {
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => {}
}
});
}

@ -0,0 +1,637 @@
use winit::{
event::*,
event_loop::{EventLoop, ControlFlow},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
mod texture;
const NUM_INSTANCES_PER_ROW: u32 = 10;
const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(NUM_INSTANCES_PER_ROW as f32 * 0.5, 0.0, NUM_INSTANCES_PER_ROW as f32 * 0.5);
trait VBDesc {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a>;
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
unsafe impl bytemuck::Pod for Vertex {}
unsafe impl bytemuck::Zeroable for Vertex {}
impl VBDesc for Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
]
}
}
}
const VERTICES: &[Vertex] = &[
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct Uniforms {
view_proj: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
Self {
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix();
}
}
struct CameraController {
speed: f32,
is_up_pressed: bool,
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input: KeyboardInput {
state,
virtual_keycode: Some(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
VirtualKeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
VirtualKeyCode::LShift => {
self.is_down_pressed = is_pressed;
true
}
VirtualKeyCode::W | VirtualKeyCode::Up => {
self.is_forward_pressed = is_pressed;
true
}
VirtualKeyCode::A | VirtualKeyCode::Left => {
self.is_left_pressed = is_pressed;
true
}
VirtualKeyCode::S | VirtualKeyCode::Down => {
self.is_backward_pressed = is_pressed;
true
}
VirtualKeyCode::D | VirtualKeyCode::Right => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = (camera.target - camera.eye).normalize();
if self.is_forward_pressed {
camera.eye += forward * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward * self.speed;
}
let right = forward.cross(camera.up);
if self.is_right_pressed {
camera.eye += right * self.speed;
}
if self.is_left_pressed {
camera.eye -= right * self.speed;
}
}
}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
// This is changed from `to_matrix()`
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation),
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct InstanceRaw {
model: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for InstanceRaw {}
unsafe impl bytemuck::Zeroable for InstanceRaw {}
const FLOAT_SIZE: wgpu::BufferAddress = std::mem::size_of::<f32>() as wgpu::BufferAddress;
impl VBDesc for InstanceRaw {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
wgpu::VertexBufferDescriptor {
stride: std::mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Instance, // 1.
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
format: wgpu::VertexFormat::Float4, // 2.
shader_location: 2, // 3.
},
wgpu::VertexAttributeDescriptor {
offset: FLOAT_SIZE * 4,
format: wgpu::VertexFormat::Float4,
shader_location: 3,
},
wgpu::VertexAttributeDescriptor {
offset: FLOAT_SIZE * 4 * 2,
format: wgpu::VertexFormat::Float4,
shader_location: 4,
},
wgpu::VertexAttributeDescriptor {
offset: FLOAT_SIZE * 4 * 3,
format: wgpu::VertexFormat::Float4,
shader_location: 5,
},
]
}
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: texture::Texture,
diffuse_bind_group: wgpu::BindGroup,
camera: Camera,
camera_controller: CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
instances: Vec<Instance>,
instance_buffer: wgpu::Buffer,
}
impl State {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
},
wgpu::BackendBit::PRIMARY, // Vulkan + Metal + DX12 + Browser WebGPU
).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
}).await;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("happy-tree.png");
let (diffuse_texture, cmd_buffer) = texture::Texture::from_bytes(&device, diffuse_bytes, "happy-tree.png").unwrap();
queue.submit(&[cmd_buffer]);
let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
},
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}
],
label: Some("diffuse_bind_group"),
});
let camera = Camera {
eye: (0.0, 5.0, 10.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: cgmath::Vector3::unit_y(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&[uniforms]),
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let position = cgmath::Vector3 { x: x as f32, y: 0.0, z: z as f32 } - INSTANCE_DISPLACEMENT;
let rotation = if position.is_zero() {
// this is needed so an object at (0, 0, 0) won't get scaled to zero
// as Quaternions can effect scale if they're not create correctly
cgmath::Quaternion::from_axis_angle(cgmath::Vector3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.clone().normalize(), cgmath::Deg(45.0))
};
Instance {
position, rotation,
}
})
}).collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<_>>();
// we'll need the size for later
let instance_buffer_size = instance_data.len() * std::mem::size_of::<cgmath::Matrix4<f32>>();
let instance_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&instance_data),
wgpu::BufferUsage::VERTEX,
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
},
},
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..std::mem::size_of_val(&uniforms) as wgpu::BufferAddress,
}
},
],
label: Some("uniform_bind_group"),
});
let vs_src = include_str!("vertex.vert");
let fs_src = include_str!("shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout, &uniform_bind_group_layout],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[
Vertex::desc(),
InstanceRaw::desc(),
],
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
let vertex_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(VERTICES),
wgpu::BufferUsage::VERTEX,
);
let index_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(INDICES),
wgpu::BufferUsage::INDEX,
);
let num_indices = INDICES.len() as u32;
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_bind_group,
camera,
camera_controller,
uniform_buffer,
uniform_bind_group,
uniforms,
size,
instances,
instance_buffer,
}
}
async fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
self.camera.aspect = self.sc_desc.width as f32 / self.sc_desc.height as f32;
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
async fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("update encoder"),
});
let staging_buffer = self.device.create_buffer_with_data(
bytemuck::cast_slice(&[self.uniforms]),
wgpu::BufferUsage::COPY_SRC,
);
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &self.uniform_buffer, 0, std::mem::size_of::<Uniforms>() as wgpu::BufferAddress);
self.queue.submit(&[encoder.finish()]);
}
async fn render(&mut self) {
let frame = self.swap_chain.get_next_texture()
.expect("Timeout getting texture");
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}
],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_bind_group(1, &self.uniform_bind_group, &[]);
render_pass.set_vertex_buffer(0, &self.vertex_buffer, 0, 0);
render_pass.set_vertex_buffer(1, &self.instance_buffer, 0, 0);
render_pass.set_index_buffer(&self.index_buffer, 0, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..NUM_INSTANCES);
}
self.queue.submit(&[
encoder.finish()
]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.unwrap();
use futures::executor::block_on;
// Since main can't be async, we're going to need to block
let mut state = block_on(State::new(&window));
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput {
input,
..
} => {
match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
}
}
WindowEvent::Resized(physical_size) => {
block_on(state.resize(*physical_size));
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
// new_inner_size is &mut so w have to dereference it twice
block_on(state.resize(**new_inner_size));
}
_ => {}
}
}
Event::RedrawRequested(_) => {
block_on(state.update());
block_on(state.render());
}
Event::MainEventsCleared => {
// RedrawRequested will only trigger once, unless we manually
// request it.
window.request_redraw();
}
_ => {}
}
});
}
Loading…
Cancel
Save