fixing some readme files and migrating the lighting code

web2
Ben Hansen 2 years ago
parent 6324e485f1
commit b8b8444030

8
Cargo.lock generated

@ -2860,14 +2860,20 @@ version = "0.1.0"
dependencies = [
"anyhow",
"bytemuck",
"cfg-if 1.0.0",
"cgmath",
"console_error_panic_hook",
"console_log",
"env_logger",
"fs_extra",
"glob",
"image 0.23.14",
"image 0.24.0",
"log",
"pollster",
"tobj 3.2.0",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
"wgpu",
"winit",
]

@ -26,7 +26,7 @@ impl Texture {
img: &image::DynamicImage,
label: Option<&str>,
) -> Result<Self> {
let rgba = img.as_rgba8().unwrap();
let rgba = img.to_rgba8();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
@ -51,7 +51,7 @@ impl Texture {
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
rgba,
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: NonZeroU32::new(4 * dimensions.0),

@ -26,7 +26,7 @@ impl Texture {
img: &image::DynamicImage,
label: Option<&str>,
) -> Result<Self> {
let rgba = img.as_rgba8().unwrap();
let rgba = img.to_rgba8();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
@ -51,7 +51,7 @@ impl Texture {
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
rgba,
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: NonZeroU32::new(4 * dimensions.0),

@ -26,7 +26,7 @@ impl Texture {
img: &image::DynamicImage,
label: Option<&str>,
) -> Result<Self> {
let rgba = img.as_rgba8().unwrap();
let rgba = img.to_rgba8();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
@ -51,7 +51,7 @@ impl Texture {
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
rgba,
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: NonZeroU32::new(4 * dimensions.0),

@ -69,7 +69,7 @@ impl Texture {
img: &image::DynamicImage,
label: Option<&str>,
) -> Result<Self> {
let rgba = img.as_rgba8().unwrap();
let rgba = img.to_rgba8();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
@ -94,7 +94,7 @@ impl Texture {
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
rgba,
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: NonZeroU32::new(4 * dimensions.0),

@ -49,9 +49,7 @@ pub async fn load_texture(
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> anyhow::Result<texture::Texture> {
log::warn!("Loading binary file {}", file_name);
let data = load_binary(file_name).await?;
log::warn!("Creating texture");
texture::Texture::from_bytes(device, queue, &data, file_name)
}
@ -61,12 +59,10 @@ pub async fn load_model(
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
) -> anyhow::Result<model::Model> {
log::warn!("Loading obj text");
let obj_text = load_string(file_name).await?;
let obj_cursor = Cursor::new(obj_text);
let mut obj_reader = BufReader::new(obj_cursor);
log::warn!("Loading model");
let (models, obj_materials) = tobj::load_obj_buf_async(
&mut obj_reader,
&tobj::LoadOptions {
@ -75,19 +71,15 @@ pub async fn load_model(
..Default::default()
},
|p| async move {
log::warn!("Loading material");
let mat_text = load_string(&p).await.unwrap();
tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
},
)
.await?;
log::warn!("Processing materials");
let mut materials = Vec::new();
for m in obj_materials? {
log::warn!("Loading diffuse texture");
let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
log::warn!("Creating bind group");
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
@ -110,7 +102,6 @@ pub async fn load_model(
})
}
log::warn!("Creating meshes");
let meshes = models
.into_iter()
.map(|m| {
@ -151,6 +142,5 @@ pub async fn load_model(
})
.collect::<Vec<_>>();
log::warn!("Done with model: {}", file_name);
Ok(model::Model { meshes, materials })
}

@ -5,17 +5,34 @@ authors = ["Ben Hansen <bhbenjaminhansen@gmail.com>"]
edition = "2018"
[dependencies]
cfg-if = "1"
anyhow = "1.0"
bytemuck = { version = "1.4", features = [ "derive" ] }
cgmath = "0.18"
env_logger = "0.9"
pollster = "0.2"
image = "0.23"
log = "0.4"
tobj = "3.0"
tobj = { version = "3.2", features = ["async"]}
wgpu = "0.12"
winit = "0.26"
[dependencies.image]
version = "0.24"
default-features = false
features = ["png", "jpeg"]
[target.'cfg(target_arch = "wasm32")'.dependencies]
console_error_panic_hook = "0.1"
console_log = "0.2"
wgpu = { version = "0.12", features = ["webgl"]}
wasm-bindgen = "0.2"
wasm-bindgen-futures = "0.4"
web-sys = { version = "0.3", features = [
"Document",
"Window",
"Element",
]}
[build-dependencies]
anyhow = "1.0"
fs_extra = "1.2"

@ -8,7 +8,11 @@ use winit::{
window::Window,
};
#[cfg(target_arch="wasm32")]
use wasm_bindgen::prelude::*;
mod model;
mod resources;
mod texture;
use model::{DrawLight, DrawModel, Vertex};
@ -476,14 +480,12 @@ impl State {
label: Some("camera_bind_group"),
});
let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res");
let obj_model = model::Model::load(
let obj_model = resources::load_model(
"cube.obj",
&device,
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
)
.unwrap();
).await.unwrap();
let light_uniform = LightUniform {
position: [2.0, 2.0, 2.0],

@ -1,8 +1,4 @@
use anyhow::*;
use std::ops::Range;
use std::path::Path;
use tobj::LoadOptions;
use wgpu::util::DeviceExt;
use crate::texture;
@ -13,9 +9,9 @@ pub trait Vertex {
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelVertex {
position: [f32; 3],
tex_coords: [f32; 2],
normal: [f32; 3],
pub position: [f32; 3],
pub tex_coords: [f32; 2],
pub normal: [f32; 3],
}
impl Vertex for ModelVertex {
@ -64,98 +60,6 @@ pub struct Model {
pub materials: Vec<Material>,
}
impl Model {
pub fn load<P: AsRef<Path>>(
device: &wgpu::Device,
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
path: P,
) -> Result<Self> {
let (obj_models, obj_materials) = tobj::load_obj(
path.as_ref(),
&LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
},
)?;
let obj_materials = obj_materials?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent().context("Directory has no parent")?;
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let diffuse_texture =
texture::Texture::load(device, queue, containing_folder.join(diffuse_path))?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
materials.push(Material {
name: mat.name,
diffuse_texture,
bind_group,
});
}
let mut meshes = Vec::new();
for m in obj_models {
let mut vertices = Vec::new();
for i in 0..m.mesh.positions.len() / 3 {
vertices.push(ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
});
}
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsages::INDEX,
});
meshes.push(Mesh {
name: m.name,
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
});
}
Ok(Self { meshes, materials })
}
}
pub trait DrawModel<'a> {
fn draw_mesh(
&mut self,

@ -0,0 +1,147 @@
use std::io::{BufReader, Cursor};
use cfg_if::cfg_if;
use wgpu::util::DeviceExt;
use crate::{model, texture};
pub async fn load_string(file_name: &str) -> anyhow::Result<String> {
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
let url = format!("http://127.0.0.1:8080/learn-wgpu/{}", file_name);
let txt = reqwest::get(&url)
.await?
.text()
.await?;
} else {
let path = std::path::Path::new(env!("OUT_DIR"))
.join("res")
.join(file_name);
let txt = std::fs::read_to_string(path)?;
}
}
Ok(txt)
}
pub async fn load_binary(file_name: &str) -> anyhow::Result<Vec<u8>> {
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
let url = format!("http://127.0.0.1:8080/learn-wgpu/{}", file_name);
let data = reqwest::get(url)
.await?
.bytes()
.await?
.to_vec();
} else {
let path = std::path::Path::new(env!("OUT_DIR"))
.join("res")
.join(file_name);
let data = std::fs::read(path)?;
}
}
Ok(data)
}
pub async fn load_texture(
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> anyhow::Result<texture::Texture> {
let data = load_binary(file_name).await?;
texture::Texture::from_bytes(device, queue, &data, file_name)
}
pub async fn load_model(
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
) -> anyhow::Result<model::Model> {
log::warn!("Loading obj text");
let obj_text = load_string(file_name).await?;
let obj_cursor = Cursor::new(obj_text);
let mut obj_reader = BufReader::new(obj_cursor);
let (models, obj_materials) = tobj::load_obj_buf_async(
&mut obj_reader,
&tobj::LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
},
|p| async move {
let mat_text = load_string(&p).await.unwrap();
tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
},
)
.await?;
let mut materials = Vec::new();
for m in obj_materials? {
let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
materials.push(model::Material {
name: m.name,
diffuse_texture,
bind_group,
})
}
let meshes = models
.into_iter()
.map(|m| {
let vertices = (0..m.mesh.positions.len() / 3)
.map(|i| model::ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
})
.collect::<Vec<_>>();
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", file_name)),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", file_name)),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsages::INDEX,
});
model::Mesh {
name: file_name.to_string(),
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
}
})
.collect::<Vec<_>>();
Ok(model::Model { meshes, materials })
}

@ -1,6 +1,6 @@
use anyhow::*;
use image::GenericImageView;
use std::{num::NonZeroU32, path::Path};
use std::num::NonZeroU32;
pub struct Texture {
pub texture: wgpu::Texture,
@ -11,19 +11,6 @@ pub struct Texture {
impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn load<P: AsRef<Path>>(
device: &wgpu::Device,
queue: &wgpu::Queue,
path: P,
) -> Result<Self> {
// Needed to appease the borrow checker
let path_copy = path.as_ref().to_path_buf();
let label = path_copy.to_str();
let img = image::open(path)?;
Self::from_image(device, queue, &img, label)
}
pub fn create_depth_texture(
device: &wgpu::Device,
config: &wgpu::SurfaceConfiguration,

@ -35,7 +35,7 @@ surface.configure(&device, &config);
let diffuse_bytes = include_bytes!("happy-tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap();
let diffuse_rgba = diffuse_image.as_rgba8().unwrap();
let diffuse_rgba = diffuse_image.to_rgba8().unwrap();
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
@ -476,7 +476,7 @@ impl Texture {
img: &image::DynamicImage,
label: Option<&str>
) -> Result<Self> {
let rgba = img.as_rgba8().unwrap();
let rgba = img.to_rgba8();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
@ -503,7 +503,7 @@ impl Texture {
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
rgba,
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
@ -530,6 +530,12 @@ impl Texture {
}
```
<div class="note">
Notice that we're using `to_rgba8()` instead of `as_rgba8()`. PNGs work fine with `as_rgba8()`, as they have an alpha channel. But, JPEGs don't have an alpha channel, and the code would panic if we try to call `as_rgba8()` on the JPEG texture image we are going to use. Instead, we can use `to_rgba8()` to handle such an image, which will generate a new image buffer with alpha channel even if the original image does not have one.
</div>
Note that we're returning a `CommandBuffer` with our texture. This means we can load multiple textures at the same time, and then submit all their command buffers at once.
We need to import `texture.rs` as a module, so somewhere at the top of `main.rs` add the following.

@ -73,7 +73,7 @@ let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescrip
});
```
Since the `desc` method is implemented on the `Vertex` trait, the trait needs to be imported before the method will be accessible. Put the import towards the top of the file with the others.
Since the `desc` method is implemented on the `Vertex` trait, the trait needs to be imported before the method will be accessible. Put the import towards the top of the file with the others.
```rust
use model::Vertex;
@ -129,6 +129,72 @@ fs_extra = "1.2"
glob = "0.3"
```
## Accessing files from WASM
By design, you can't access files on a users filesystem in Web Assembly. Instead we'll serve those files up using a web serve, and then load those files into our code using an http request. In order to simplify this, let's create a file called `resources.rs` to handle this for us. We'll create two functions that will load text files and binary files respectively.
```rust
use std::io::{BufReader, Cursor};
use cfg_if::cfg_if;
use wgpu::util::DeviceExt;
use crate::{model, texture};
pub async fn load_string(file_name: &str) -> anyhow::Result<String> {
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
let url = format!("http://127.0.0.1:8080/learn-wgpu/{}", file_name);
let txt = reqwest::get(&url)
.await?
.text()
.await?;
} else {
let path = std::path::Path::new(env!("OUT_DIR"))
.join("res")
.join(file_name);
let txt = std::fs::read_to_string(path)?;
}
}
Ok(txt)
}
pub async fn load_binary(file_name: &str) -> anyhow::Result<Vec<u8>> {
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
let url = format!("http://127.0.0.1:8080/learn-wgpu/{}", file_name);
let data = reqwest::get(url)
.await?
.bytes()
.await?
.to_vec();
} else {
let path = std::path::Path::new(env!("OUT_DIR"))
.join("res")
.join(file_name);
let data = std::fs::read(path)?;
}
}
Ok(data)
}
```
<div class="note">
We're using `OUT_DIR` on desktop to get at our `res` folder.
</div>
I'm using [reqwest](https://docs.rs/reqwest) to handle loading the requests when using WASM. Add the following to the Cargo.toml:
```toml
[target.'cfg(target_arch = "wasm32")'.dependencies]
# Other dependencies
reqwest = { version = "0.11" }
```
## Loading models with TOBJ
@ -170,104 +236,81 @@ pub struct Mesh {
The `Material` is pretty simple, it's just the name and one texture. Our cube obj actually has 2 textures, but one is a normal map, and we'll get to those [later](../../intermediate/tutorial11-normals). The name is more for debugging purposes.
Speaking of textures, we'll need to add a `load()` method to `Texture` in `texture.rs`.
Speaking of textures, we'll need to add a function to load a `Texture` in `resources.rs`.
```rust
use std::path::Path;
pub fn load<P: AsRef<Path>>(
pub async fn load_texture(
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
path: P,
) -> Result<Self> {
// Needed to appease the borrow checker
let path_copy = path.as_ref().to_path_buf();
let label = path_copy.to_str();
let img = image::open(path)?;
Self::from_image(device, queue, &img, label)
) -> anyhow::Result<texture::Texture> {
let data = load_binary(file_name).await?;
texture::Texture::from_bytes(device, queue, &data, file_name)
}
```
The `load` method will be useful when we load the textures for our models, as `include_bytes!` requires that we know the name of the file at compile time which we can't really guarantee with model textures.
While we're at it let's import `texture.rs` in `model.rs`.
```rust
use crate::texture;
```
We also need to make a subtle change on `from_image()` method in `texture.rs`. PNGs work fine with `as_rgba8()`, as they have an alpha channel. But, JPEGs don't have an alpha channel, and the code would panic if we try to call `as_rgba8()` on the JPEG texture image we are going to use. Instead, we can use `to_rgba8()` to handle such an image, which will generate a new image buffer with alpha channel even if the original image does not have one.
```rust
let rgba = img.to_rgba8();
```
Since `rgba` is now a new image buffer, and not a reference to the original image's buffer, when it is used in the call to `write_texture` later, it needs to be passed as a reference instead.
```rust
//...
&rgba, // UPDATED!
wgpu::ImageDataLayout {
```
The `load_texture` method will be useful when we load the textures for our models, as `include_bytes!` requires that we know the name of the file at compile time which we can't really guarantee with model textures.
`Mesh` holds a vertex buffer, an index buffer, and the number of indices in the mesh. We're using an `usize` for the material. This `usize` will be used to index the `materials` list when it comes time to draw.
With all that out of the way, we can get to loading our model.
```rust
impl Model {
pub fn load<P: AsRef<Path>>(
device: &wgpu::Device,
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
path: P,
) -> Result<Self> {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref(), &LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
},
)?;
let obj_materials = obj_materials?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent()
.context("Directory has no parent")?;
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let diffuse_texture = texture::Texture::load(device, queue, containing_folder.join(diffuse_path))?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
materials.push(Material {
name: mat.name,
diffuse_texture,
bind_group,
});
}
pub async fn load_model(
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
) -> anyhow::Result<model::Model> {
let obj_text = load_string(file_name).await?;
let obj_cursor = Cursor::new(obj_text);
let mut obj_reader = BufReader::new(obj_cursor);
let (models, obj_materials) = tobj::load_obj_buf_async(
&mut obj_reader,
&tobj::LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
},
|p| async move {
let mat_text = load_string(&p).await.unwrap();
tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
},
)
.await?;
let mut materials = Vec::new();
for m in obj_materials? {
let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
materials.push(model::Material {
name: m.name,
diffuse_texture,
bind_group,
})
}
let mut meshes = Vec::new();
for m in obj_models {
let mut vertices = Vec::new();
for i in 0..m.mesh.positions.len() / 3 {
vertices.push(ModelVertex {
let meshes = models
.into_iter()
.map(|m| {
let vertices = (0..m.mesh.positions.len() / 3)
.map(|i| model::ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
@ -279,36 +322,33 @@ impl Model {
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
});
}
})
.collect::<Vec<_>>();
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", file_name)),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", file_name)),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsages::INDEX,
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsages::INDEX,
}
);
meshes.push(Mesh {
name: m.name,
model::Mesh {
name: file_name.to_string(),
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
});
}
}
})
.collect::<Vec<_>>();
Ok(Self { meshes, materials })
}
Ok(model::Model { meshes, materials })
}
```
## Rendering a mesh
@ -316,6 +356,7 @@ impl Model {
Before we can draw the model, we need to be able to draw an individual mesh. Let's create a trait called `DrawModel`, and implement it for `RenderPass`.
```rust
// model.rs
pub trait DrawModel<'a> {
fn draw_mesh(&mut self, mesh: &'a Mesh);
fn draw_mesh_instanced(
@ -344,10 +385,10 @@ where
}
```
We could have put this methods in `impl Model`, but I felt it made more sense to have the `RenderPass` do all the rendering, as that's kind of it's job. This does mean we have to import `DrawModel` when we go to render though.
We could have put this methods in an `impl Model`, but I felt it made more sense to have the `RenderPass` do all the rendering, as that's kind of it's job. This does mean we have to import `DrawModel` when we go to render though.
```rust
// main.rs
// lib.rs
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
@ -360,21 +401,14 @@ render_pass.draw_mesh_instanced(&self.obj_model.meshes[0], 0..self.instances.len
Before that though we need to actually load the model and save it to `State`. Put the following in `State::new()`.
```rust
let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res");
let obj_model = model::Model::load(
let obj_model = resources::load_model(
"cube.obj",
&device,
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
).unwrap();
).await.unwrap();
```
<div class="note">
We're using `OUT_DIR` here to get at our `res` folder.
</div>
Our new model is a bit bigger than our previous one so we're gonna need to adjust the spacing on our instances a bit.
```rust

Loading…
Cancel
Save