more work on hdr tutorial

pull/501/head
Benjamin Hansen 8 months ago
parent 0bde203056
commit 67e78d9c65

@ -23,7 +23,7 @@ instant = "0.1"
[dependencies.image]
version = "0.24"
default-features = false
features = ["png", "jpeg"]
features = ["png", "jpeg", "hdr"]
[target.'cfg(target_arch = "wasm32")'.dependencies]
reqwest = { version = "0.11" }

@ -68,7 +68,7 @@ impl Projection {
}
pub fn calc_matrix(&self) -> Matrix4<f32> {
OPENGL_TO_WGPU_MATRIX * perspective(self.fovy, self.aspect, self.znear, self.zfar)
/* OPENGL_TO_WGPU_MATRIX * */ perspective(self.fovy, self.aspect, self.znear, self.zfar)
}
}

@ -0,0 +1,28 @@
@group(0)
@binding(0)
var src: texture_2d<f32>;
@group(0)
@binding(1)
var output: texture_storage_2d_array<rgba32float, write>;
@compute
@workgroup_size(1, 1, 1)
fn compute_equirect_to_cubemap(
@builtin(global_invocation_id)
gid: vec3<u32>,
) {
var SIDES: array<vec3<f32>, 6> = array(
vec3(1.0, 0.0, 0.0), // POSX
vec3(-1.0, 0.0, 0.0), // NEGX
vec3(0.0, 1.0, 0.0), // POSY
vec3(0.0, -1.0, 0.0), // NEGY
vec3(0.0, 0.0, 1.0), // POSZ
vec3(0.0, 0.0, -1.0), // NEGZ
);
var side = SIDES[gid.z];
// We use textureLoad() as textureSample() is not allowed in compute shaders
let samp = textureLoad(src, vec2(0, 0), 0);
textureStore(output, gid.xy, gid.z, vec4(side * 2.0 + 1.0, 1.0));
}

@ -26,6 +26,7 @@ const NUM_INSTANCES_PER_ROW: u32 = 10;
struct CameraUniform {
view_position: [f32; 4],
view_proj: [[f32; 4]; 4],
inv_view_proj: [[f32; 4]; 4], // NEW!
}
impl CameraUniform {
@ -33,13 +34,16 @@ impl CameraUniform {
Self {
view_position: [0.0; 4],
view_proj: cgmath::Matrix4::identity().into(),
inv_view_proj: cgmath::Matrix4::identity().into(),
}
}
// UPDATED!
fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) {
self.view_position = camera.position.to_homogeneous().into();
self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into()
let view_proj = projection.calc_matrix() * camera.calc_matrix();
self.view_proj = view_proj.into();
self.inv_view_proj = view_proj.invert().unwrap().into();
}
}
@ -157,7 +161,10 @@ struct State {
#[allow(dead_code)]
debug_material: model::Material,
mouse_pressed: bool,
// NEW!
hdr: hdr::HdrPipeline,
environment_bind_group: wgpu::BindGroup,
sky_pipeline: wgpu::RenderPipeline,
}
fn create_render_pipeline(
@ -202,7 +209,7 @@ fn create_render_pipeline(
depth_stencil: depth_format.map(|format| wgpu::DepthStencilState {
format,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
depth_compare: wgpu::CompareFunction::LessEqual, // UDPATED!
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
@ -218,7 +225,7 @@ fn create_render_pipeline(
}
impl State {
async fn new(window: Window) -> Self {
async fn new(window: Window) -> anyhow::Result<Self> {
let size = window.inner_size();
// The instance is a handle to our GPU
@ -246,8 +253,8 @@ impl State {
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: wgpu::Features::empty(),
// UPDATED!
features: wgpu::Features::all_webgpu_mask(),
limits: wgpu::Limits::downlevel_defaults(),
},
None, // Trace path
@ -317,7 +324,6 @@ impl State {
label: Some("texture_bind_group_layout"),
});
// UPDATED!
let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0));
let projection =
camera::Projection::new(config.width, config.height, cgmath::Deg(45.0), 0.1, 100.0);
@ -431,8 +437,57 @@ impl State {
let depth_texture =
texture::Texture::create_depth_texture(&device, &config, "depth_texture");
// NEW!
let hdr = hdr::HdrPipeline::new(&device, &config);
let hdr_loader = resources::HdrLoader::new(&device);
let sky_bytes = resources::load_binary("pure-sky.hdr").await?;
let sky_texture = hdr_loader.from_equirectangular_bytes(
&device,
&queue,
&sky_bytes,
1080,
Some("Sky Texture"),
)?;
let environment_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("environment_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: false },
view_dimension: wgpu::TextureViewDimension::Cube,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::NonFiltering),
count: None,
},
],
});
let environment_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("environment_bind_group"),
layout: &environment_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&sky_texture.view()),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(sky_texture.sampler()),
},
],
});
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
@ -440,6 +495,7 @@ impl State {
&texture_bind_group_layout,
&camera_bind_group_layout,
&light_bind_group_layout,
&environment_layout, // UPDATED!
],
push_constant_ranges: &[],
});
@ -479,6 +535,24 @@ impl State {
)
};
// NEW!
let sky_pipeline = {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Sky Pipeline Layout"),
bind_group_layouts: &[&camera_bind_group_layout, &environment_layout],
push_constant_ranges: &[],
});
let shader = wgpu::include_wgsl!("sky.wgsl");
create_render_pipeline(
&device,
&layout,
hdr.format(),
Some(texture::Texture::DEPTH_FORMAT),
&[],
shader,
)
};
let debug_material = {
let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png");
let normal_bytes = include_bytes!("../res/cobble-normal.png");
@ -509,7 +583,7 @@ impl State {
)
};
Self {
Ok(Self {
window,
surface,
device,
@ -534,8 +608,11 @@ impl State {
#[allow(dead_code)]
debug_material,
mouse_pressed: false,
hdr, // NEW!
}
// NEW!
hdr,
environment_bind_group,
sky_pipeline,
})
}
pub fn window(&self) -> &Window {
@ -661,7 +738,13 @@ impl State {
0..self.instances.len() as u32,
&self.camera_bind_group,
&self.light_bind_group,
&self.environment_bind_group,
);
render_pass.set_pipeline(&self.sky_pipeline);
render_pass.set_bind_group(0, &self.camera_bind_group, &[]);
render_pass.set_bind_group(1, &self.environment_bind_group, &[]);
render_pass.draw(0..3, 0..1);
}
// NEW!
@ -676,7 +759,7 @@ impl State {
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen(start))]
pub async fn run() {
pub async fn run() -> anyhow::Result<()> {
cfg_if::cfg_if! {
if #[cfg(target_arch = "wasm32")] {
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
@ -712,13 +795,12 @@ pub async fn run() {
.expect("Couldn't append canvas to document body.");
}
let mut state = State::new(window).await; // NEW!
let mut state = State::new(window).await?; // NEW!
let mut last_render_time = instant::Instant::now();
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => state.window().request_redraw(),
// NEW!
Event::DeviceEvent {
event: DeviceEvent::MouseMotion{ delta, },
.. // We're not using device_id currently

@ -3,6 +3,7 @@
struct Camera {
view_pos: vec4<f32>,
view_proj: mat4x4<f32>,
inv_view_proj: mat4x4<f32>, // NEW!
}
@group(0) @binding(0)
var<uniform> camera: Camera;

@ -1,5 +1,5 @@
use tutorial13_hdr::run;
fn main() {
pollster::block_on(run());
pollster::block_on(run()).unwrap();
}

@ -121,6 +121,7 @@ pub trait DrawModel<'a> {
material: &'a Material,
camera_bind_group: &'a wgpu::BindGroup,
light_bind_group: &'a wgpu::BindGroup,
environment_bind_group: &'a wgpu::BindGroup,
);
fn draw_mesh_instanced(
&mut self,
@ -129,6 +130,7 @@ pub trait DrawModel<'a> {
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
light_bind_group: &'a wgpu::BindGroup,
environment_bind_group: &'a wgpu::BindGroup,
);
fn draw_model(
@ -136,6 +138,7 @@ pub trait DrawModel<'a> {
model: &'a Model,
camera_bind_group: &'a wgpu::BindGroup,
light_bind_group: &'a wgpu::BindGroup,
environment_bind_group: &'a wgpu::BindGroup,
);
fn draw_model_instanced(
&mut self,
@ -143,6 +146,7 @@ pub trait DrawModel<'a> {
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
light_bind_group: &'a wgpu::BindGroup,
environment_bind_group: &'a wgpu::BindGroup,
);
fn draw_model_instanced_with_material(
&mut self,
@ -151,6 +155,7 @@ pub trait DrawModel<'a> {
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
light_bind_group: &'a wgpu::BindGroup,
environment_bind_group: &'a wgpu::BindGroup,
);
}
@ -164,8 +169,16 @@ where
material: &'b Material,
camera_bind_group: &'b wgpu::BindGroup,
light_bind_group: &'b wgpu::BindGroup,
environment_bind_group: &'b wgpu::BindGroup,
) {
self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group, light_bind_group);
self.draw_mesh_instanced(
mesh,
material,
0..1,
camera_bind_group,
light_bind_group,
environment_bind_group,
);
}
fn draw_mesh_instanced(
@ -175,12 +188,14 @@ where
instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup,
light_bind_group: &'b wgpu::BindGroup,
environment_bind_group: &'b wgpu::BindGroup,
) {
self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, camera_bind_group, &[]);
self.set_bind_group(2, light_bind_group, &[]);
self.set_bind_group(3, environment_bind_group, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
@ -189,8 +204,15 @@ where
model: &'b Model,
camera_bind_group: &'b wgpu::BindGroup,
light_bind_group: &'b wgpu::BindGroup,
environment_bind_group: &'b wgpu::BindGroup,
) {
self.draw_model_instanced(model, 0..1, camera_bind_group, light_bind_group);
self.draw_model_instanced(
model,
0..1,
camera_bind_group,
light_bind_group,
environment_bind_group,
);
}
fn draw_model_instanced(
@ -199,6 +221,7 @@ where
instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup,
light_bind_group: &'b wgpu::BindGroup,
environment_bind_group: &'b wgpu::BindGroup, // NEW!
) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
@ -208,6 +231,7 @@ where
instances.clone(),
camera_bind_group,
light_bind_group,
environment_bind_group,
);
}
}
@ -219,6 +243,7 @@ where
instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup,
light_bind_group: &'b wgpu::BindGroup,
environment_bind_group: &'b wgpu::BindGroup,
) {
for mesh in &model.meshes {
self.draw_mesh_instanced(
@ -227,6 +252,7 @@ where
instances.clone(),
camera_bind_group,
light_bind_group,
environment_bind_group,
);
}
}

@ -1,6 +1,7 @@
use std::io::{BufReader, Cursor};
use cfg_if::cfg_if;
use image::codecs::hdr::HdrDecoder;
use wgpu::util::DeviceExt;
use crate::{model, texture};
@ -217,3 +218,137 @@ pub async fn load_model(
Ok(model::Model { meshes, materials })
}
pub struct HdrLoader {
texture_format: wgpu::TextureFormat,
equirect_layout: wgpu::BindGroupLayout,
equirect_to_cubemap: wgpu::ComputePipeline,
}
impl HdrLoader {
pub fn new(device: &wgpu::Device) -> Self {
let module = device.create_shader_module(wgpu::include_wgsl!("equirectangular.wgsl"));
let texture_format = wgpu::TextureFormat::Rgba32Float;
let equirect_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("HdrLoader::equirect_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: false },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::StorageTexture {
access: wgpu::StorageTextureAccess::WriteOnly,
format: texture_format,
view_dimension: wgpu::TextureViewDimension::D2Array,
},
count: None,
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&equirect_layout],
push_constant_ranges: &[],
});
let equirect_to_cubemap =
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("equirect_to_cubemap"),
layout: Some(&pipeline_layout),
module: &module,
entry_point: "compute_equirect_to_cubemap",
});
Self {
equirect_to_cubemap,
texture_format,
equirect_layout,
}
}
pub fn from_equirectangular_bytes(
&self,
device: &wgpu::Device,
queue: &wgpu::Queue,
data: &[u8],
dst_size: u32,
label: Option<&str>,
) -> anyhow::Result<texture::CubeTexture> {
let hdr_decoder = HdrDecoder::new(Cursor::new(data))?;
let meta = hdr_decoder.metadata();
let mut pixels = vec![[0.0, 0.0, 0.0, 0.0]; meta.width as usize * meta.height as usize];
hdr_decoder.read_image_transform(
|pix| {
let rgb = pix.to_hdr();
[rgb.0[0], rgb.0[1], rgb.0[2], 1.0f32]
},
&mut pixels[..],
)?;
let src = texture::Texture::create_2d_texture(
device,
meta.width,
meta.height,
self.texture_format,
wgpu::TextureUsages::TEXTURE_BINDING,
wgpu::FilterMode::Linear,
None,
);
let dst = texture::CubeTexture::create_2d(
device,
dst_size,
dst_size,
self.texture_format,
1,
wgpu::TextureUsages::STORAGE_BINDING | wgpu::TextureUsages::TEXTURE_BINDING,
wgpu::FilterMode::Nearest,
label,
);
let dst_view = dst.texture().create_view(&wgpu::TextureViewDescriptor {
label,
dimension: Some(wgpu::TextureViewDimension::D2Array),
// array_layer_count: Some(6),
..Default::default()
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label,
layout: &self.equirect_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&src.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&dst_view),
},
],
});
let mut encoder = device.create_command_encoder(&Default::default());
let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label });
pass.set_pipeline(&self.equirect_to_cubemap);
pass.set_bind_group(0, &bind_group, &[]);
pass.dispatch_workgroups(dst_size, dst_size, 6);
drop(pass);
queue.submit([encoder.finish()]);
Ok(dst)
}
}

@ -3,6 +3,7 @@
struct Camera {
view_pos: vec4<f32>,
view_proj: mat4x4<f32>,
inv_view_proj: mat4x4<f32>, // NEW!
}
@group(1) @binding(0)
var<uniform> camera: Camera;
@ -34,9 +35,13 @@ struct InstanceInput {
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) tex_coords: vec2<f32>,
@location(1) tangent_position: vec3<f32>,
@location(2) tangent_light_position: vec3<f32>,
@location(3) tangent_view_position: vec3<f32>,
// Updated!
@location(1) world_position: vec3<f32>,
@location(2) world_view_position: vec3<f32>,
@location(3) world_light_position: vec3<f32>,
@location(4) world_normal: vec3<f32>,
@location(5) world_tangent: vec3<f32>,
@location(6) world_bitangent: vec3<f32>,
}
@vertex
@ -56,24 +61,17 @@ fn vs_main(
instance.normal_matrix_2,
);
// Construct the tangent matrix
let world_normal = normalize(normal_matrix * model.normal);
let world_tangent = normalize(normal_matrix * model.tangent);
let world_bitangent = normalize(normal_matrix * model.bitangent);
let tangent_matrix = transpose(mat3x3<f32>(
world_tangent,
world_bitangent,
world_normal,
));
// UPDATED!
let world_position = model_matrix * vec4<f32>(model.position, 1.0);
var out: VertexOutput;
out.clip_position = camera.view_proj * world_position;
out.tex_coords = model.tex_coords;
out.tangent_position = tangent_matrix * world_position.xyz;
out.tangent_view_position = tangent_matrix * camera.view_pos.xyz;
out.tangent_light_position = tangent_matrix * light.position;
out.world_normal = normalize(normal_matrix * model.normal);
out.world_tangent = normalize(normal_matrix * model.tangent);
out.world_bitangent = normalize(normal_matrix * model.bitangent);
out.world_position = world_position.xyz;
out.world_view_position = camera.view_pos.xyz;
return out;
}
@ -88,6 +86,13 @@ var t_normal: texture_2d<f32>;
@group(0) @binding(3)
var s_normal: sampler;
@group(3)
@binding(0)
var env_map: texture_cube<f32>;
@group(3)
@binding(1)
var env_sampler: sampler;
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
let object_color: vec4<f32> = textureSample(t_diffuse, s_diffuse, in.tex_coords);
@ -97,10 +102,25 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
let ambient_strength = 0.1;
let ambient_color = light.color * ambient_strength;
// Create the lighting vectors
// NEW!
// Adjust the tangent and bitangent using the Gramm-Schmidt process
// This makes sure that they are perpedicular to each other and the
// normal of the surface.
let world_tangent = normalize(in.world_tangent - dot(in.world_tangent, in.world_normal) * in.world_normal);
let world_bitangent = cross(world_tangent, in.world_normal);
// Convert the normal sample to world space
let TBN = mat3x3(
world_tangent,
world_bitangent,
in.world_normal,
);
let tangent_normal = object_normal.xyz * 2.0 - 1.0;
let light_dir = normalize(in.tangent_light_position - in.tangent_position);
let view_dir = normalize(in.tangent_view_position - in.tangent_position);
let world_normal = TBN * tangent_normal;
// Create the lighting vectors
let light_dir = normalize(in.world_light_position - in.world_position);
let view_dir = normalize(in.world_view_position - in.world_position);
let half_dir = normalize(view_dir + light_dir);
let diffuse_strength = max(dot(tangent_normal, light_dir), 0.0);
@ -109,7 +129,13 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
let specular_strength = pow(max(dot(tangent_normal, half_dir), 0.0), 32.0);
let specular_color = specular_strength * light.color;
let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz;
// NEW!
// Calculate reflections
let world_view = normalize(in.world_view_position - in.world_position);
let world_reflect = reflect(world_view, world_normal);
let env_ambient = textureSample(env_map, env_sampler, world_reflect).rgb;
let result = (env_ambient + diffuse_color + specular_color) * object_color.xyz;
return vec4<f32>(result, object_color.a);
}

@ -0,0 +1,40 @@
struct Camera {
view_pos: vec4<f32>,
view_proj: mat4x4<f32>,
inv_view_proj: mat4x4<f32>,
}
@group(0) @binding(0)
var<uniform> camera: Camera;
@group(1)
@binding(0)
var env_map: texture_cube<f32>;
@group(1)
@binding(1)
var env_sampler: sampler;
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) view_dir: vec3<f32>,
}
@vertex
fn vs_main(
@builtin(vertex_index) id: u32,
) -> VertexOutput {
let uv = vec2<f32>(vec2<u32>(
(id << 1u) & 2u,
id & 2u
));
var out: VertexOutput;
out.clip_position = vec4(uv * 2.0 - 1.0, 1.0, 1.0);
out.view_dir = normalize((camera.inv_view_proj * vec4(normalize(out.clip_position.xyz), 1.0)).xyz);
return out;
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
// let sample = textureSample(env_map, env_sampler, in.view_dir);
let sample = vec4(in.view_dir, 1.0);
return sample;
}

@ -1,5 +1,10 @@
use std::io::Cursor;
use anyhow::*;
use image::GenericImageView;
use image::{
codecs::hdr::{HdrDecoder, Rgbe8Pixel},
GenericImageView,
};
pub struct Texture {
pub texture: wgpu::Texture,
@ -177,3 +182,78 @@ impl Texture {
}
}
}
pub enum CubeSide {
PosX = 0,
NegX = 1,
PosY = 2,
NegY = 3,
PosZ = 4,
NegZ = 5,
}
pub struct CubeTexture {
texture: wgpu::Texture,
sampler: wgpu::Sampler,
view: wgpu::TextureView,
}
impl CubeTexture {
pub fn create_2d(
device: &wgpu::Device,
width: u32,
height: u32,
format: wgpu::TextureFormat,
mip_level_count: u32,
usage: wgpu::TextureUsages,
mag_filter: wgpu::FilterMode,
label: Option<&str>,
) -> Self {
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size: wgpu::Extent3d {
width,
height,
// A cube has 6 sides, so we need 6 layers
depth_or_array_layers: 6,
},
mip_level_count,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format,
usage,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor {
label,
dimension: Some(wgpu::TextureViewDimension::Cube),
array_layer_count: Some(6),
..Default::default()
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label,
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Self {
texture,
sampler,
view,
}
}
pub fn texture(&self) -> &wgpu::Texture { &self.texture }
pub fn view(&self) -> &wgpu::TextureView { &self.view }
pub fn sampler(&self) -> &wgpu::Sampler { &self.sampler }
}

Loading…
Cancel
Save