migrated windowless docs

This commit is contained in:
Ben Hansen 2021-02-18 20:13:23 -07:00
parent eff8418d37
commit 361bee3c58
2 changed files with 101 additions and 77 deletions

View File

@ -163,6 +163,8 @@ async fn run() {
queue.submit(Some(encoder.finish())); queue.submit(Some(encoder.finish()));
// We need to scope the mapping variables so that we can
// unmap the buffer
{ {
let buffer_slice = output_buffer.slice(..); let buffer_slice = output_buffer.slice(..);
@ -179,7 +181,6 @@ async fn run() {
ImageBuffer::<Rgba<u8>, _>::from_raw(texture_size, texture_size, data).unwrap(); ImageBuffer::<Rgba<u8>, _>::from_raw(texture_size, texture_size, data).unwrap();
buffer.save("image.png").unwrap(); buffer.save("image.png").unwrap();
} }
output_buffer.unmap(); output_buffer.unmap();
} }

View File

@ -7,14 +7,17 @@ Sometimes we just want to leverage the gpu. Maybe we want to crunch a large set
It's actually quite simple. We don't *need* a window to create an `Instance`, we don't *need* a window to select an `Adapter`, nor do we *need* a window to create a `Device`. We only needed the window to create a `Surface` which we needed to create the `SwapChain`. Once we have a `Device`, we have all we need to start sending commands to the gpu. It's actually quite simple. We don't *need* a window to create an `Instance`, we don't *need* a window to select an `Adapter`, nor do we *need* a window to create a `Device`. We only needed the window to create a `Surface` which we needed to create the `SwapChain`. Once we have a `Device`, we have all we need to start sending commands to the gpu.
```rust ```rust
let adapter = wgpu::Adapter::request( let adapter = instance
&wgpu::RequestAdapterOptions { .request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(), power_preference: wgpu::PowerPreference::default(),
compatible_surface: None, compatible_surface: None,
}, })
wgpu::BackendBit::PRIMARY, .await
).await.unwrap(); .unwrap();
let (device, queue) = adapter.request_device(&Default::default()).await; let (device, queue) = adapter
.request_device(&Default::default(), None)
.await
.unwrap();
``` ```
## A triangle without a window ## A triangle without a window
@ -30,7 +33,6 @@ let texture_desc = wgpu::TextureDescriptor {
height: texture_size, height: texture_size,
depth: 1, depth: 1,
}, },
array_layer_count: 1,
mip_level_count: 1, mip_level_count: 1,
sample_count: 1, sample_count: 1,
dimension: wgpu::TextureDimension::D2, dimension: wgpu::TextureDimension::D2,
@ -40,9 +42,8 @@ let texture_desc = wgpu::TextureDescriptor {
, ,
label: None, label: None,
}; };
let texture = device.create_texture(&texture_desc); let texture = device.create_texture(&texture_desc);
let texture_view = texture.create_default_view(); let texture_view = texture.create_view(&Default::default());
``` ```
We're using `TextureUsage::OUTPUT_ATTACHMENT` so wgpu can render to our texture. The `TextureUsage::COPY_SRC` is so we can pull data out of the texture so we can save it to a file. We're using `TextureUsage::OUTPUT_ATTACHMENT` so wgpu can render to our texture. The `TextureUsage::COPY_SRC` is so we can pull data out of the texture so we can save it to a file.
@ -60,6 +61,7 @@ let output_buffer_desc = wgpu::BufferDescriptor {
// this tells wpgu that we want to read this buffer from the cpu // this tells wpgu that we want to read this buffer from the cpu
| wgpu::BufferUsage::MAP_READ, | wgpu::BufferUsage::MAP_READ,
label: None, label: None,
mapped_at_creation: false,
}; };
let output_buffer = device.create_buffer(&output_buffer_desc); let output_buffer = device.create_buffer(&output_buffer_desc);
``` ```
@ -98,58 +100,75 @@ Using that we'll create a simple `RenderPipeline`.
let vs_src = include_str!("shader.vert"); let vs_src = include_str!("shader.vert");
let fs_src = include_str!("shader.frag"); let fs_src = include_str!("shader.frag");
let mut compiler = shaderc::Compiler::new().unwrap(); let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(vs_src, shaderc::ShaderKind::Vertex, "shader.vert", "main", None).unwrap(); let vs_spirv = compiler
let fs_spirv = compiler.compile_into_spirv(fs_src, shaderc::ShaderKind::Fragment, "shader.frag", "main", None).unwrap(); .compile_into_spirv(
let vs_data = wgpu::read_spirv(std::io::Cursor::new(vs_spirv.as_binary_u8())).unwrap(); vs_src,
let fs_data = wgpu::read_spirv(std::io::Cursor::new(fs_spirv.as_binary_u8())).unwrap(); shaderc::ShaderKind::Vertex,
let vs_module = device.create_shader_module(&vs_data); "shader.vert",
let fs_module = device.create_shader_module(&fs_data); "main",
None,
)
.unwrap();
let fs_spirv = compiler
.compile_into_spirv(
fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)
.unwrap();
let vs_data = wgpu::util::make_spirv(vs_spirv.as_binary_u8());
let fs_data = wgpu::util::make_spirv(fs_spirv.as_binary_u8());
let vs_module = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Vertex Shader"),
source: vs_data,
flags: wgpu::ShaderFlags::default(),
});
let fs_module = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Fragment Shader"),
source: fs_data,
flags: wgpu::ShaderFlags::default(),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[], bind_group_layouts: &[],
push_constant_ranges: &[],
}); });
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout, label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState { vertex: wgpu::VertexState {
module: &vs_module, module: &vs_module,
entry_point: "main", entry_point: "main",
buffers: &[],
}, },
fragment: Some(wgpu::FragmentState { fragment: Some(wgpu::FragmentState {
module: &fs_module, module: &fs_module,
entry_point: "main", entry_point: "main",
}), targets: &[wgpu::ColorTargetState {
rasterization_state: Some(wgpu::RasterizationStateDescriptor { format: texture_desc.format,
front_face: wgpu::FrontFace::Ccw, alpha_blend: wgpu::BlendState::REPLACE,
cull_mode: wgpu::CullMode::Back, color_blend: wgpu::BlendState::REPLACE,
depth_bias: 0, write_mask: wgpu::ColorWrite::ALL,
depth_bias_slope_scale: 0.0, }],
depth_bias_clamp: 0.0,
}), }),
primitive: wgpu::PrimitiveState { primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList, topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None, strip_index_format: None,
front_face: wgpu::FrontFace::Ccw, front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back, cull_mode: wgpu::CullMode::Back,
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill, polygon_mode: wgpu::PolygonMode::Fill,
}, },
color_states: &[ depth_stencil: None,
wgpu::ColorStateDescriptor { multisample: wgpu::MultisampleState {
format: texture_desc.format, count: 1,
color_blend: wgpu::BlendDescriptor::REPLACE, mask: !0,
alpha_blend: wgpu::BlendDescriptor::REPLACE, alpha_to_coverage_enabled: false,
write_mask: wgpu::ColorWrite::ALL,
},
],
depth_stencil: None,
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[],
}, },
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
}); });
``` ```
@ -166,17 +185,19 @@ The `RenderPass` is where things get interesting. A render pass requires at leas
```rust ```rust
{ {
let render_pass_desc = wgpu::RenderPassDescriptor { let render_pass_desc = wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[ color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor { wgpu::RenderPassColorAttachmentDescriptor {
attachment: &texture_view, attachment: &texture_view,
resolve_target: None, resolve_target: None,
load_op: wgpu::LoadOp::Clear, ops: wgpu::Operations {
store_op: wgpu::StoreOp::Store, load: wgpu::LoadOp::Clear(wgpu::Color {
clear_color: wgpu::Color { r: 0.1,
r: 0.1, g: 0.2,
g: 0.2, b: 0.3,
b: 0.3, a: 1.0,
a: 1.0, }),
store: true,
}, },
} }
], ],
@ -196,15 +217,16 @@ encoder.copy_texture_to_buffer(
wgpu::TextureCopyView { wgpu::TextureCopyView {
texture: &texture, texture: &texture,
mip_level: 0, mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO, origin: wgpu::Origin3d::ZERO,
}, },
wgpu::BufferCopyView { wgpu::BufferCopyView {
buffer: &output_buffer, buffer: &output_buffer,
offset: 0, layout: wgpu::TextureDataLayout {
bytes_per_row: u32_size * texture_size, offset: 0,
rows_per_image: texture_size, bytes_per_row: u32_size * texture_size,
}, rows_per_image: texture_size,
},
},
texture_desc.size, texture_desc.size,
); );
``` ```
@ -212,37 +234,38 @@ encoder.copy_texture_to_buffer(
Now that we've made all our commands, let's submit them to the gpu. Now that we've made all our commands, let's submit them to the gpu.
```rust ```rust
device.get_queue().submit(&[encoder.finish()]); queue.submit(Some(encoder.finish()));
``` ```
## Getting data out of a buffer ## Getting data out of a buffer
The `Buffer` struct has two methods to access it's contents: `map_read`, and `map_write`. Both of these methods take in a `BufferAddress` specifying the byte to start from, the size in bytes of the chunk we're reading/writing, and a callback lambda that where we'll actually access the data. We're going to use `map_read` to save our `output_buffer` to a png file. In order to get the data out of the buffer we need to first map it, then we can get a `BufferView` that we can treat like a `&[u8]`.
The actual mapping code is fairly simple.
```rust ```rust
// NOTE: We have to create the mapping THEN device.poll(). If we don't // We need to scope the mapping variables so that we can
// the application will freeze. // unmap the buffer
let mapping = output_buffer.map_read(0, output_buffer_size); {
device.poll(wgpu::Maintain::Wait); let buffer_slice = output_buffer.slice(..);
let result = mapping.await.unwrap(); // NOTE: We have to create the mapping THEN device.poll() before await
let data = result.as_slice(); // the future. Otherwise the application will freeze.
let mapping = buffer_slice.map_async(wgpu::MapMode::Read);
device.poll(wgpu::Maintain::Wait);
mapping.await.unwrap();
use image::{ImageBuffer, Rgba}; let data = buffer_slice.get_mapped_range();
let buffer = ImageBuffer::<Rgba<u8>, _>::from_raw(
texture_size,
texture_size,
data,
).unwrap();
buffer.save("image.png").unwrap(); use image::{ImageBuffer, Rgba};
let buffer =
ImageBuffer::<Rgba<u8>, _>::from_raw(texture_size, texture_size, data).unwrap();
buffer.save("image.png").unwrap();
}
output_buffer.unmap();
``` ```
## Main is not asyncable ## Main is not asyncable
The `main()` method can't return a future, so we can't use the `async` keyword. We'll get around this by putting our code into a different function so that we can block on it in `main()`. You'll need to use the [futures crate](https://docs.rs/futures). The `main()` method can't return a future, so we can't use the `async` keyword. We'll get around this by putting our code into a different function so that we can block on it in `main()`. You'll need to use a crate that can poll futures such as the [futures crate](https://docs.rs/futures).
```rust ```rust
async fn run() { async fn run() {