Merge pull request #17 from sotrh/gifs

Gifs
pull/19/head
sotrh 5 years ago committed by GitHub
commit a138edbfb7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

4
.gitignore vendored

@ -2,4 +2,6 @@ node_modules/
target/ target/
.vscode/ .vscode/
image.png /image.png
/output*.*
output/

28
Cargo.lock generated

@ -375,6 +375,19 @@ name = "foreign-types-shared"
version = "0.1.1" version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "framework"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "fuchsia-cprng" name = "fuchsia-cprng"
version = "0.1.1" version = "0.1.1"
@ -539,6 +552,21 @@ dependencies = [
"lzw 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "lzw 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "gifs"
version = "0.1.0"
dependencies = [
"cgmath 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"framework 0.1.0",
"gif 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)",
"glsl-to-spirv 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"image 0.22.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tobj 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winit 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "glsl-to-spirv" name = "glsl-to-spirv"
version = "0.1.7" version = "0.1.7"

@ -5,4 +5,7 @@ members = [
# intermediate tutorials # intermediate tutorials
"code/intermediate/*", "code/intermediate/*",
# showcase
"code/showcase/*",
] ]

@ -0,0 +1,16 @@
[package]
name = "framework"
version = "0.1.0"
authors = ["Ben Hansen <bhbenjaminhansen@gmail.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
image = "0.22.4"
winit = "0.20.0"
glsl-to-spirv = "0.1.7"
cgmath = "0.17.0"
failure = "0.1"
tobj = "0.1"
wgpu = "0.4.0"

@ -0,0 +1,74 @@
use std::mem;
pub trait ToRaw {
type Output;
fn to_raw(&self) -> Self::Output;
}
pub struct RawBuffer<R: Copy + 'static> {
pub buffer: wgpu::Buffer,
pub data: Vec<R>,
}
impl<R: Copy> RawBuffer<R> {
pub fn from_slice<T: ToRaw<Output=R>>(device: &wgpu::Device, data: &[T], usage: wgpu::BufferUsage) -> Self {
let raw_data = data.iter().map(ToRaw::to_raw).collect::<Vec<R>>();
Self::from_vec(device, raw_data, usage)
}
pub fn from_vec(device: &wgpu::Device, data: Vec<R>, usage: wgpu::BufferUsage) -> Self {
let buffer = device
.create_buffer_mapped(data.len(), usage)
.fill_from_slice(&data);
Self::from_parts(buffer, data, usage)
}
pub fn from_parts(buffer: wgpu::Buffer, data: Vec<R>, usage: wgpu::BufferUsage) -> Self {
Self { buffer, data }
}
pub fn buffer_size(&self) -> wgpu::BufferAddress {
(self.data.len() * mem::size_of::<R>()) as wgpu::BufferAddress
}
}
pub struct Buffer<U: ToRaw<Output=R>, R: Copy + 'static> {
pub data: Vec<U>,
pub raw_buffer: RawBuffer<R>,
usage: wgpu::BufferUsage,
}
impl<U: ToRaw<Output=R>, R: Copy + 'static> Buffer<U, R> {
pub fn uniform(device: &wgpu::Device, datum: U) -> Self {
let data = vec![datum];
let usage = wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST;
Self::with_usage(device, data, usage)
}
pub fn storage(device: &wgpu::Device, data: Vec<U>) -> Self {
let usage = wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST;
Self::with_usage(device, data, usage)
}
pub fn staging(device: &wgpu::Device, other: &Self) -> Self {
let buffer_size = other.raw_buffer.buffer_size();
let usage = wgpu::BufferUsage::COPY_SRC | wgpu::BufferUsage::MAP_READ | wgpu::BufferUsage::MAP_WRITE;
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
size: buffer_size,
usage,
});
let raw_buffer = RawBuffer::from_parts(buffer, Vec::new(), usage);
Self::from_parts(Vec::new(), raw_buffer, usage)
}
pub fn with_usage(device: &wgpu::Device, data: Vec<U>, usage: wgpu::BufferUsage) -> Self {
let raw_buffer = RawBuffer::from_slice(device, &data, usage);
Self::from_parts(data, raw_buffer, usage)
}
pub fn from_parts(data: Vec<U>, raw_buffer: RawBuffer<R>, usage: wgpu::BufferUsage) -> Self {
Self { data, raw_buffer, usage }
}
}

@ -0,0 +1,25 @@
#[cfg_attr(rustfmt, rustfmt_skip)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
pub struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
pub fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
return proj * view;
}
}

@ -0,0 +1,9 @@
mod buffer;
mod camera;
mod model;
mod texture;
pub use buffer::*;
pub use camera::*;
pub use model::*;
pub use texture::*;

@ -0,0 +1,176 @@
use std::path::Path;
use std::ops::Range;
use crate::texture;
pub trait Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a>;
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct ModelVertex {
position: [f32; 3],
tex_coords: [f32; 2],
normal: [f32; 3],
}
impl Vertex for ModelVertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float3,
},
]
}
}
}
pub struct Material {
pub name: String,
pub diffuse_texture: texture::Texture,
pub bind_group: wgpu::BindGroup,
}
pub struct Mesh {
pub name: String,
pub vertex_buffer: wgpu::Buffer,
pub index_buffer: wgpu::Buffer,
pub num_elements: u32,
pub material: usize,
}
pub struct Model {
pub meshes: Vec<Mesh>,
pub materials: Vec<Material>,
}
impl Model {
pub fn load<P: AsRef<Path>>(device: &wgpu::Device, layout: &wgpu::BindGroupLayout, path: P) -> Result<(Self, Vec<wgpu::CommandBuffer>), failure::Error> {
let (obj_models, obj_materials) = tobj::load_obj(path.as_ref())?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent().unwrap();
// Our `Texure` struct currently returns a `CommandBuffer` when it's created so we need to collect those and return them.
let mut command_buffers = Vec::new();
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let (diffuse_texture, cmds) = texture::Texture::load(&device, containing_folder.join(diffuse_path))?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
]
});
materials.push(Material {
name: mat.name,
diffuse_texture,
bind_group,
});
command_buffers.push(cmds);
}
let mut meshes = Vec::new();
for m in obj_models {
let mut vertices = Vec::new();
for i in 0..m.mesh.positions.len() / 3 {
vertices.push(ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [
m.mesh.texcoords[i * 2],
m.mesh.texcoords[i * 2 + 1],
],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
});
}
let vertex_buffer = device
.create_buffer_mapped(vertices.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(&vertices);
let index_buffer = device
.create_buffer_mapped(m.mesh.indices.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(&m.mesh.indices);
meshes.push(Mesh {
name: m.name,
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
});
}
Ok((Self { meshes, materials, }, command_buffers))
}
}
pub trait DrawModel {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup);
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup);
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup);
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup);
}
impl<'a> DrawModel for wgpu::RenderPass<'a> {
fn draw_mesh(&mut self, mesh: &Mesh, material: &Material, uniforms: &wgpu::BindGroup) {
self.draw_mesh_instanced(mesh, material, 0..1, uniforms);
}
fn draw_mesh_instanced(&mut self, mesh: &Mesh, material: &Material, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
self.set_vertex_buffers(0, &[(&mesh.vertex_buffer, 0)]);
self.set_index_buffer(&mesh.index_buffer, 0);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, &uniforms, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_model(&mut self, model: &Model, uniforms: &wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, uniforms);
}
fn draw_model_instanced(&mut self, model: &Model, instances: Range<u32>, uniforms: &wgpu::BindGroup) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), uniforms);
}
}
}

@ -0,0 +1,133 @@
use image::GenericImageView;
use std::path::Path;
use std::mem;
use crate::buffer;
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler,
pub desc: wgpu::TextureDescriptor,
}
impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn load<P: AsRef<Path>>(device: &wgpu::Device, path: P) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::open(path)?;
Self::from_image(device, &img)
}
pub fn create_depth_texture(device: &wgpu::Device, sc_desc: &wgpu::SwapChainDescriptor) -> Self {
let desc = wgpu::TextureDescriptor {
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
..sc_desc.to_texture_desc()
};
Self::from_descriptor(device, desc)
}
pub fn from_descriptor(device: &wgpu::Device, desc: wgpu::TextureDescriptor) -> Self {
let texture = device.create_texture(&desc);
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
Self { texture, view, sampler, desc }
}
pub fn from_bytes(device: &wgpu::Device, bytes: &[u8]) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, &img)
}
pub fn from_image(device: &wgpu::Device, img: &image::DynamicImage) -> Result<(Self, wgpu::CommandBuffer), failure::Error> {
let rgba = img.to_rgba();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let desc = wgpu::TextureDescriptor {
size,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
};
let texture = device.create_texture(&desc);
let buffer = device
.create_buffer_mapped(rgba.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&rgba);
let mut encoder = device.create_command_encoder(&Default::default());
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size,
);
let cmd_buffer = encoder.finish();
let view = texture.create_default_view();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
Ok((Self { texture, view, sampler, desc }, cmd_buffer))
}
pub fn prepare_buffer_rgba(&self, device: &wgpu::Device) -> buffer::RawBuffer<[f32;4]> {
let num_pixels = self.desc.size.width * self.desc.size.height * self.desc.size.depth;
let buffer_size = num_pixels * mem::size_of::<[f32;4]>() as u32;
let buffer_usage = wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::MAP_READ;
let buffer_desc = wgpu::BufferDescriptor {
size: buffer_size as wgpu::BufferAddress,
usage: buffer_usage,
};
let buffer = device.create_buffer(&buffer_desc);
let data = Vec::with_capacity(num_pixels as usize);
let raw_buffer = buffer::RawBuffer::from_parts(buffer, data, buffer_usage);
raw_buffer
}
}

@ -0,0 +1,19 @@
[package]
name = "gifs"
version = "0.1.0"
authors = ["Ben Hansen <bhbenjaminhansen@gmail.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
image = "0.22.4"
winit = "0.20.0"
glsl-to-spirv = "0.1.7"
cgmath = "0.17.0"
failure = "0.1"
tobj = "0.1"
wgpu = "0.4.0"
gif = "0.10.3"
framework = { path = "../framework" }

@ -0,0 +1,203 @@
extern crate framework;
use std::mem;
use std::sync::{Arc, Mutex};
fn main() {
let adapter = wgpu::Adapter::request(&Default::default()).unwrap();
let (device, mut queue) = adapter.request_device(&Default::default());
let colors = [
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.2],
[0.0, 0.2, 0.2],
[0.2, 0.2, 0.2],
[0.2, 0.2, 0.2],
[0.0, 0.2, 0.2],
[0.0, 0.0, 0.2],
[0.0, 0.0, 0.0],
];
// create a texture to render to
let texture_size = 256u32;
let rt_desc = wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: texture_size,
height: texture_size,
depth: 1,
},
array_layer_count: colors.len() as u32,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let render_target = framework::Texture::from_descriptor(&device, rt_desc);
// create a buffer to copy the texture to so we can get the data
let pixel_size = mem::size_of::<[u8;4]>() as u32;
let buffer_size = (pixel_size * texture_size * texture_size) as wgpu::BufferAddress;
let buffer_desc = wgpu::BufferDescriptor {
size: buffer_size,
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::MAP_READ,
};
let output_buffer = device.create_buffer(&buffer_desc);
// a simple render pipeline that draws a triangle
let render_pipeline = create_render_pipeline(&device, &render_target);
// we need to store this in and arc-mutex so we can pass it to the mapping function
let frames = Arc::new(Mutex::new(Vec::new()));
for c in &colors {
let mut encoder = device.create_command_encoder(&Default::default());
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &render_target.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
// modify the clear color so the gif changes
clear_color: wgpu::Color {
r: c[0],
g: c[1],
b: c[2],
a: 1.0,
}
}
],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&render_pipeline);
rpass.draw(0..3, 0..1);
drop(rpass);
encoder.copy_texture_to_buffer(
wgpu::TextureCopyView {
texture: &render_target.texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
wgpu::BufferCopyView {
buffer: &output_buffer,
offset: 0,
row_pitch: pixel_size * texture_size,
image_height: texture_size,
},
render_target.desc.size
);
queue.submit(&[encoder.finish()]);
let frames_clone = frames.clone();
output_buffer.map_read_async(0, buffer_size, move |result: wgpu::BufferMapAsyncResult<&[u8]>| {
match result {
Ok(mapping) => {
let data = Vec::from(mapping.data);
let mut f = frames_clone.lock().unwrap();
(*f).push(data);
}
_ => { eprintln!("Something went wrong") }
}
});
// wait for the GPU to finish
device.poll(true);
}
let mut frames = Arc::try_unwrap(frames)
.unwrap()
.into_inner()
.unwrap();
save_gif("output.gif", &mut frames, 10, texture_size as u16).unwrap();
}
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
use gif::{Frame, Encoder, Repeat, SetParameter};
let mut image = std::fs::File::create(path)?;
let mut encoder = Encoder::new(&mut image, size, size, &[])?;
encoder.set(Repeat::Infinite)?;
for mut frame in frames {
encoder.write_frame(&Frame::from_rgba_speed(size, size, &mut frame, speed))?;
}
Ok(())
}
// The image crate currently doesn't support looping gifs, so I'm not using this
// code. I'm keeping it around in case image adds looping support.
#[allow(unused)]
fn save_gif_old(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
let output = std::fs::File::create(path)?;
let mut encoder = image::gif::Encoder::new(output);
for mut data in frames {
let frame = image::gif::Frame::from_rgba_speed(size, size, &mut data, speed);
encoder.encode(&frame)?;
}
Ok(())
}
fn create_render_pipeline(device: &wgpu::Device, target: &framework::Texture) -> wgpu::RenderPipeline {
let vs_src = include_str!("res/shader.vert");
let fs_src = include_str!("res/shader.frag");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[
wgpu::ColorStateDescriptor {
format: target.desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
render_pipeline
}

@ -0,0 +1,35 @@
#version 450
layout(location=0) in vec2 v_tex_coords;
layout(location=1) in vec3 v_normal;
layout(location=2) in vec3 v_position;
layout(location=0) out vec4 f_color;
layout(set = 0, binding = 0) uniform texture2D t_diffuse;
layout(set = 0, binding = 1) uniform sampler s_diffuse;
layout(set=1, binding=2)
uniform Lights {
vec3 u_light;
};
const vec3 ambient_color = vec3(0.0, 0.0, 0.0);
const vec3 specular_color = vec3(1.0, 1.0, 1.0);
const float shininess = 32;
void main() {
vec4 diffuse_color = texture(sampler2D(t_diffuse, s_diffuse), v_tex_coords);
float diffuse_term = max(dot(normalize(v_normal), normalize(u_light)), 0);
vec3 camera_dir = normalize(-v_position);
// This is an aproximation of the actual reflection vector, aka what
// angle you have to look at the object to be blinded by the light
vec3 half_direction = normalize(normalize(u_light) + camera_dir);
float specular_term = pow(max(dot(normalize(v_normal), half_direction), 0.0), shininess);
f_color = vec4(ambient_color, 1.0) + vec4(specular_term * specular_color, 1.0) + diffuse_term * diffuse_color;
}

@ -0,0 +1,34 @@
#version 450
layout(location=0) in vec3 a_position;
layout(location=1) in vec2 a_tex_coords;
layout(location=2) in vec3 a_normal;
layout(location=0) out vec2 v_tex_coords;
layout(location=1) out vec3 v_normal;
layout(location=2) out vec3 v_position;
layout(set=1, binding=0)
uniform Uniforms {
mat4 u_view_proj;
};
layout(set=1, binding=1)
buffer Instances {
mat4 s_models[];
};
void main() {
v_tex_coords = a_tex_coords;
mat4 model = s_models[gl_InstanceIndex];
// Rotate the normals with respect to the model, ignoring scaling
mat3 normal_matrix = mat3(transpose(inverse(mat3(model))));
v_normal = normal_matrix * a_normal;
gl_Position = u_view_proj * model * vec4(a_position, 1.0);
// Get the position relative to the view for the lighting calc
v_position = gl_Position.xyz / gl_Position.w;
}

@ -0,0 +1,7 @@
#version 450
layout(location=0) out vec4 f_color;
void main() {
f_color = vec4(0.3, 0.2, 0.1, 1.0);
}

@ -0,0 +1,11 @@
#version 450
const vec2 positions[3] = vec2[3](
vec2(0.0, -0.5),
vec2(-0.5, 0.5),
vec2(0.5, 0.5)
);
void main() {
gl_Position = vec4(positions[gl_VertexIndex], 0.0, 1.0);
}

@ -37,9 +37,17 @@ module.exports = {
collapsable: false, collapsable: false,
children: [ children: [
'/intermediate/tutorial10-lighting/', '/intermediate/tutorial10-lighting/',
'/intermediate/windowless/',
], ],
}, },
{
title: 'Showcase',
collapsable: true,
children: [
'/showcase/',
'/showcase/windowless/',
'/showcase/gifs/',
]
},
'/news/' '/news/'
] ]
} }

@ -1 +1 @@
Subproject commit bb7e290ca411ae49b3eb2e20b3dcde36af454ae1 Subproject commit 50afb738454fd9a4d69e42bea313f79386300908

@ -0,0 +1,3 @@
# Foreward
The articles in this section are not meant to be tutorials. They are showcases of the various things you can do with `wgpu`. I won't go over specifics of creating `wgpu` resources, as those will be covered elsewhere. The code for these examples is still available however, and will be accessible on Github.

@ -0,0 +1,163 @@
# Creating gifs
Sometimes you've created a nice simulation/animation, and you want to show it off. While you can record a video, that might be a bit overkill to break our your video recording if you just want something to post on twitter. That's where what [GIF](https://en.wikipedia.org/wiki/GIF)s are for.
Also, GIF is pronounced GHIF, not JIF as JIF is not only [peanut butter](https://en.wikipedia.org/wiki/Jif_%28peanut_butter%29), it is also a [different image format](https://filext.com/file-extension/JIF).
## How are we making the GIF?
We're going to create a function using the [gif crate](https://docs.rs/gif/) to encode the actual image.
```rust
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
use gif::{Frame, Encoder, Repeat, SetParameter};
let mut image = std::fs::File::create(path)?;
let mut encoder = Encoder::new(&mut image, size, size, &[])?;
encoder.set(Repeat::Infinite)?;
for mut frame in frames {
encoder.write_frame(&Frame::from_rgba_speed(size, size, &mut frame, speed))?;
}
Ok(())
}
```
<!-- image-rs doesn't currently support looping, so I switched to gif -->
<!-- A GIF is a type of image, and fortunately the [image crate](https://docs.rs/image/) supports GIFs natively. It's pretty simple to use. -->
<!-- ```rust
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> Result<(), failure::Error> {
let output = std::fs::File::create(path)?;
let mut encoder = image::gif::Encoder::new(output);
for mut data in frames {
let frame = image::gif::Frame::from_rgba_speed(size, size, &mut data, speed);
encoder.encode(&frame)?;
}
Ok(())
}
``` -->
All we need to use this code is the frames of the GIF, how fast it should run, and the size of the GIF (you could use width and height seperately, but I didn't).
## How do we make the frames?
If you checked out the [windowless showcase](../windowless/#a-triangle-without-a-window), you'll know that we render directly to a `wgpu::Texture`. We'll create a texture to render to and a buffer the copy the output to.
```rust
// create a texture to render to
let texture_size = 256u32;
let rt_desc = wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: texture_size,
height: texture_size,
depth: 1,
},
array_layer_count: colors.len() as u32,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let render_target = framework::Texture::from_descriptor(&device, rt_desc);
// create a buffer to copy the texture to so we can get the data
let pixel_size = mem::size_of::<[u8;4]>() as u32;
let buffer_size = (pixel_size * texture_size * texture_size) as wgpu::BufferAddress;
let buffer_desc = wgpu::BufferDescriptor {
size: buffer_size,
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::MAP_READ,
};
let output_buffer = device.create_buffer(&buffer_desc);
```
With that we can render a frame, and then copy that frame to a `Vec<u8>`.
```rust
// we need to store this in and arc-mutex so we can pass it to the mapping function
let frames = Arc::new(Mutex::new(Vec::new()));
for c in &colors {
let mut encoder = device.create_command_encoder(&Default::default());
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
attachment: &render_target.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
// modify the clear color so the gif changes
clear_color: wgpu::Color {
r: c[0],
g: c[1],
b: c[2],
a: 1.0,
}
}
],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&render_pipeline);
rpass.draw(0..3, 0..1);
drop(rpass);
encoder.copy_texture_to_buffer(
wgpu::TextureCopyView {
texture: &render_target.texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
wgpu::BufferCopyView {
buffer: &output_buffer,
offset: 0,
row_pitch: pixel_size * texture_size,
image_height: texture_size,
},
render_target.desc.size
);
queue.submit(&[encoder.finish()]);
let frames_clone = frames.clone();
output_buffer.map_read_async(0, buffer_size, move |result: wgpu::BufferMapAsyncResult<&[u8]>| {
match result {
Ok(mapping) => {
let data = Vec::from(mapping.data);
let mut f = frames_clone.lock().unwrap();
(*f).push(data);
}
_ => { eprintln!("Something went wrong") }
}
});
// wait for the GPU to finish
device.poll(true);
}
```
Once that's done we can pull the frame data our of the `Arc<Mutex<_>>`, and pass it into `save_gif()`.
```rust
let mut frames = Arc::try_unwrap(frames)
.unwrap()
.into_inner()
.unwrap();
save_gif("output.gif", &mut frames, 1, texture_size as u16).unwrap();
```
That's the gist of it. We can improve things using a texture array, and sending the draw commands all at once, but this gets the idea across. With the shader I wrote we get the following GIF.
![./output.gif](./output.gif)
<AutoGithubLink/>

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 1.5 KiB

Loading…
Cancel
Save