HTDZ53HWD4X3DSFZX7MMC7DPFJNHQPGBVLJD2S325SQRRVIQ3CLAC
MMCK5BQQST5NDI5R3VLY7ICBGNARTYG2FKKRPRIXQHXXGH2QO3PQC
Y4JIGMJIDT3RS23FHKHYW2YMSW5OSVXCRHFXRQ33RHQFQIRJAKHQC
3TGAYDOINACMSSEJWMX6R7AJ52K4DGXYFJEKISQ7Z6NETJXBNBIAC
HUZ7YFN6VIDK3MFB45AC5HP5JLT7XKJQ4SMNXTFY2L5TRTFY4OHAC
L5SVCVPJMJPDARA4H43U5INQMAKHUIB5PUP55IDRKAPQDB35MXZQC
PAOLWMH7TLJLNAWBJBWP7KB7YGDFUPASMTFU33KZ3QW5JXJMPXCAC
L6RIUKGLJZLAOKFGUDTZKBPP4HUBPEZAKHJEQHO34WFF62AB2ZIQC
X5EMQBC4BFNJOOHS2QMF4UB4QH6JKSALK6RXLK6B7SP6MNYSAP6QC
GROTV3H2V6BHU5CA7WSLVYDCWAR7GHUP5JV2QOGPLJ6N3553T4MAC
GHIHJCWBCGFZI4AG66L7WG4WKJ467DXFBRBNLT4ZULSN7ZVBJI6AC
LJD76LBNKTWSAHLRGL7EXNMM5HYRSOD6VXWYQSX2XVK7TQROTZ6QC
KID2E3YKLHUFPHWYYGRQZTX73THWMAXQWPT5XZEJJO46BCQD27HQC
WQIQA2TNWNSNGKQC22XPTECAZEWG6PG5QWQECUVXDUQRSCV6RUZQC
world_texture_view: wgpu::TextureView,
// world_texture_sampler: wgpu::Sampler,
world_texture_bind_group: wgpu::BindGroup,
// world_texture_view: wgpu::TextureView,
// // world_texture_sampler: wgpu::Sampler,
// world_texture_bind_group: wgpu::BindGroup,
/// Renders from the world texture to screen space within the bounds of the camera
// TODO eventually handle screen space effects
world_render_pipeline: wgpu::RenderPipeline,
// num_world_vertices: u32,
num_world_indices: u32,
world_vertex_buffer: wgpu::Buffer,
world_index_buffer: wgpu::Buffer,
// /// Renders from the world texture to screen space within the bounds of the camera
// // TODO eventually handle screen space effects
// world_render_pipeline: wgpu::RenderPipeline,
// // num_world_vertices: u32,
// num_world_indices: u32,
// world_vertex_buffer: wgpu::Buffer,
// world_index_buffer: wgpu::Buffer,
impl Vertex {
fn desc() -> wgpu::VertexBufferLayout<'static> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x4,
},
],
}
}
}
const INDICES: &'static [u16] = &[0, 2, 3, 0, 1, 2];
/// `w` and `h` are percentages of the screen to use to render the world, (0.0, 1.0]
fn generate_world_vertices(w: f32, h: f32) -> Vec<Vertex> {
let verts = vec![
Vertex {
position: [-w, -h, 0.0],
tex_coords: [0.0, 1.0],
color: [1.0, 1.0, 1.0, 1.0],
},
Vertex {
position: [w, -h, 0.0],
tex_coords: [1.0, 1.0],
color: [1.0, 1.0, 1.0, 1.0],
},
Vertex {
position: [w, h, 0.0],
tex_coords: [1.0, 0.0],
color: [1.0, 1.0, 1.0, 1.0],
},
Vertex {
position: [-w, h, 0.0],
tex_coords: [0.0, 0.0],
color: [1.0, 1.0, 1.0, 1.0],
},
];
verts
}
fn create_composer(_assets: &AssetCache) -> Result<compose::Composer, compose::ComposerError> {
let mut composer = compose::Composer::default();
// HDR module
let hdr_module_src = include_str!("./hdr.wgsl");
let (name, imports, shader_defs) = compose::get_preprocessor_data(hdr_module_src);
composer.add_composable_module(compose::ComposableModuleDescriptor {
as_name: name,
source: hdr_module_src,
file_path: "hdr.wgsl",
language: compose::ShaderLanguage::Wgsl,
additional_imports: &imports,
shader_defs,
})?;
Ok(composer)
}
let mut composer = Self::create_composer(assets)?;
let world_vertices = Self::generate_world_vertices(1.0, 1.0);
// let num_world_vertices = world_vertices.len() as u32;
let world_vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("world vertex buffer"),
contents: bytemuck::cast_slice(&world_vertices),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
});
let world_index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("world vertex buffer"),
contents: bytemuck::cast_slice(Self::INDICES),
usage: wgpu::BufferUsages::INDEX,
});
let world_texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: WORLD_HALF_EXTENTS.x * 2,
height: WORLD_HALF_EXTENTS.y * 2,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: world_texture_format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
label: Some("world texture"),
view_formats: &[],
});
let world_texture_view = world_texture.create_view(&wgpu::TextureViewDescriptor::default());
let world_texture_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let world_texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("world texture bind group layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let world_texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("world texture bind group"),
layout: &world_texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&world_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&world_texture_sampler),
},
],
});
let camera_uniform = CameraUniform::new();
let mut uniform_buffer = UniformBuffer::new(Vec::new());
uniform_buffer.write(&camera_uniform).unwrap();
let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("camera buffer"),
contents: &uniform_buffer.into_inner(),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let camera_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("camera bind group layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
});
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &camera_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: camera_buffer.as_entire_binding(),
}],
label: Some("camera bind group"),
});
let render_options = RenderOptions::default();
let mut render_options_uniform_buffer = UniformBuffer::new(Vec::new());
render_options_uniform_buffer
.write(&render_options)
.unwrap();
let render_options_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("render options buffer"),
contents: &render_options_uniform_buffer.into_inner(),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let render_options_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("render options bind group layout"),
});
let render_options_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("render options bind group"),
layout: &render_options_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: render_options_buffer.as_entire_binding(),
}],
});
// let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
// label: Some("world shader"),
// source: wgpu::ShaderSource::Wgsl(include_str!("world_shader.wgsl").into()),
// });
let composed_world_shader = composer.make_naga_module(compose::NagaModuleDescriptor {
source: include_str!("world_shader.wgsl"),
file_path: "world_shader.wgsl",
shader_type: compose::ShaderType::Wgsl,
shader_defs: HashMap::new(),
additional_imports: &[],
})?;
let mut validator = naga::valid::Validator::new(
naga::valid::ValidationFlags::default(),
naga::valid::Capabilities::default(),
);
let composed_world_shader_module_info = validator.validate(&composed_world_shader)?;
let shader_source = naga::back::wgsl::write_string(
&composed_world_shader,
&composed_world_shader_module_info,
naga::back::wgsl::WriterFlags::EXPLICIT_TYPES,
)?;
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("world shader"),
source: wgpu::ShaderSource::Wgsl(shader_source.into()),
});
let world_render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("world render pipeline layout"),
bind_group_layouts: &[
&world_texture_bind_group_layout,
&camera_bind_group_layout,
&render_options_bind_group_layout,
],
push_constant_ranges: &[],
});
let world_render_pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("world render pipeline"),
layout: Some(&world_render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
},
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
multiview: None,
});
let world_renderer =
middleware::WorldRenderer::new(&device, config.format, world_texture_format, assets)?;
// world_texture,
world_texture_view,
// world_texture_sampler,
world_texture_bind_group,
camera_uniform,
camera_buffer,
camera_bind_group,
world_render_pipeline,
render_options_buffer,
render_options_bind_group,
// num_world_vertices,
num_world_indices,
world_vertex_buffer,
world_index_buffer,
world_renderer,
// Update the camera and camera buffer
if let Some((w, h)) = input.camera.update_aspect(&self.window) {
// Update the world vertex buffer
let new_world_vertices = Self::generate_world_vertices(w, h);
self.queue.write_buffer(
&self.world_vertex_buffer,
0,
bytemuck::cast_slice(&new_world_vertices),
);
}
self.camera_uniform.update_view_proj(input.camera);
let mut uniform_buffer = UniformBuffer::new(Vec::new());
uniform_buffer.write(&self.camera_uniform).unwrap();
self.queue
.write_buffer(&self.camera_buffer, 0, &uniform_buffer.into_inner());
// Update render options
let mut uniform_buffer = UniformBuffer::new(Vec::new());
uniform_buffer.write(input.render_options).unwrap();
self.queue
.write_buffer(&self.render_options_buffer, 0, &uniform_buffer.into_inner());
final_render_pass.set_pipeline(&self.world_render_pipeline);
final_render_pass.set_bind_group(0, &self.world_texture_bind_group, &[]);
final_render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
final_render_pass.set_bind_group(2, &self.render_options_bind_group, &[]);
final_render_pass.set_vertex_buffer(0, self.world_vertex_buffer.slice(..));
final_render_pass
.set_index_buffer(self.world_index_buffer.slice(..), wgpu::IndexFormat::Uint16);
final_render_pass.draw_indexed(0..self.num_world_indices, 0, 0..1);
self.world_renderer
.render(&mut final_render_pass, &world_renderer_data);
#[allow(dead_code)]
pub mod world {
pub struct WorldRenderer {}
pub type WorldRenderFrameData = ();
mod world {
use std::collections::HashMap;
use assets_manager::AssetCache;
use encase::{ShaderType, UniformBuffer};
use naga_oil::compose;
use wgpu::util::DeviceExt;
use winit::window::Window;
use crate::screen::{Camera, RenderOptions, WORLD_HALF_EXTENTS};
/// Information sent to the shader about our camera
#[derive(ShaderType)]
struct CameraUniform {
view_proj: glam::Mat4,
}
impl CameraUniform {
fn new() -> Self {
Self {
view_proj: glam::Mat4::IDENTITY,
}
}
fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = camera.build_view_projection_matrix();
}
}
pub struct WorldRenderer {
// world_texture: wgpu::Texture,
world_texture_view: wgpu::TextureView,
// world_texture_sampler: wgpu::Sampler,
world_texture_bind_group: wgpu::BindGroup,
camera_uniform: CameraUniform,
camera_buffer: wgpu::Buffer,
camera_bind_group: wgpu::BindGroup,
render_options_buffer: wgpu::Buffer,
render_options_bind_group: wgpu::BindGroup,
/// Renders from the world texture to screen space within the bounds of the camera
// TODO eventually handle screen space effects
world_render_pipeline: wgpu::RenderPipeline,
// num_world_vertices: u32,
num_world_indices: u32,
world_vertex_buffer: wgpu::Buffer,
world_index_buffer: wgpu::Buffer,
}
pub struct WorldRenderFrameData {}
/// Most general type of vertex
/// Specifies a position, then tex coordinates, then an associated color (w/ alpha)
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
color: [f32; 4],
}
impl Vertex {
fn desc() -> wgpu::VertexBufferLayout<'static> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x4,
},
],
}
}
}
fn new(device: &wgpu::Device, texture_format: wgpu::TextureFormat) -> Self {
const INDICES: &'static [u16] = &[0, 2, 3, 0, 1, 2];
/// `w` and `h` are percentages of the screen to use to render the world, (0.0, 1.0]
fn generate_world_vertices(w: f32, h: f32) -> Vec<Vertex> {
let verts = vec![
Vertex {
position: [-w, -h, 0.0],
tex_coords: [0.0, 1.0],
color: [1.0, 1.0, 1.0, 1.0],
},
Vertex {
position: [w, -h, 0.0],
tex_coords: [1.0, 1.0],
color: [1.0, 1.0, 1.0, 1.0],
},
Vertex {
position: [w, h, 0.0],
tex_coords: [1.0, 0.0],
color: [1.0, 1.0, 1.0, 1.0],
},
Vertex {
position: [-w, h, 0.0],
tex_coords: [0.0, 0.0],
color: [1.0, 1.0, 1.0, 1.0],
},
];
verts
}
fn create_composer(
_assets: &AssetCache,
) -> Result<compose::Composer, compose::ComposerError> {
let mut composer = compose::Composer::default();
// HDR module
let hdr_module_src = include_str!("../hdr.wgsl");
let (name, imports, shader_defs) = compose::get_preprocessor_data(hdr_module_src);
composer.add_composable_module(compose::ComposableModuleDescriptor {
as_name: name,
source: hdr_module_src,
file_path: "hdr.wgsl",
language: compose::ShaderLanguage::Wgsl,
additional_imports: &imports,
shader_defs,
})?;
Ok(composer)
}
pub fn world_texture_view(&self) -> &wgpu::TextureView {
&self.world_texture_view
}
pub fn new(
device: &wgpu::Device,
texture_format: wgpu::TextureFormat,
world_texture_format: wgpu::TextureFormat,
assets: &AssetCache,
) -> color_eyre::Result<Self> {
Self {}
let num_world_indices = Self::INDICES.len() as u32;
let mut composer = Self::create_composer(assets)?;
let world_vertices = Self::generate_world_vertices(1.0, 1.0);
// let num_world_vertices = world_vertices.len() as u32;
let world_vertex_buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("world vertex buffer"),
contents: bytemuck::cast_slice(&world_vertices),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
});
let world_index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("world vertex buffer"),
contents: bytemuck::cast_slice(Self::INDICES),
usage: wgpu::BufferUsages::INDEX,
});
let world_texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: WORLD_HALF_EXTENTS.x * 2,
height: WORLD_HALF_EXTENTS.y * 2,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: world_texture_format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT
| wgpu::TextureUsages::TEXTURE_BINDING,
label: Some("world texture"),
view_formats: &[],
});
let world_texture_view =
world_texture.create_view(&wgpu::TextureViewDescriptor::default());
let world_texture_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let world_texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("world texture bind group layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let world_texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("world texture bind group"),
layout: &world_texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&world_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&world_texture_sampler),
},
],
});
let camera_uniform = CameraUniform::new();
let mut uniform_buffer = UniformBuffer::new(Vec::new());
uniform_buffer.write(&camera_uniform).unwrap();
let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("camera buffer"),
contents: &uniform_buffer.into_inner(),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let camera_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("camera bind group layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
});
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &camera_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: camera_buffer.as_entire_binding(),
}],
label: Some("camera bind group"),
});
let render_options = RenderOptions::default();
let mut render_options_uniform_buffer = UniformBuffer::new(Vec::new());
render_options_uniform_buffer
.write(&render_options)
.unwrap();
let render_options_buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("render options buffer"),
contents: &render_options_uniform_buffer.into_inner(),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let render_options_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("render options bind group layout"),
});
let render_options_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("render options bind group"),
layout: &render_options_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: render_options_buffer.as_entire_binding(),
}],
});
// let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
// label: Some("world shader"),
// source: wgpu::ShaderSource::Wgsl(include_str!("world_shader.wgsl").into()),
// });
let composed_world_shader =
composer.make_naga_module(compose::NagaModuleDescriptor {
source: include_str!("../world_shader.wgsl"),
file_path: "world_shader.wgsl",
shader_type: compose::ShaderType::Wgsl,
shader_defs: HashMap::new(),
additional_imports: &[],
})?;
let mut validator = naga::valid::Validator::new(
naga::valid::ValidationFlags::default(),
naga::valid::Capabilities::default(),
);
let composed_world_shader_module_info = validator.validate(&composed_world_shader)?;
let shader_source = naga::back::wgsl::write_string(
&composed_world_shader,
&composed_world_shader_module_info,
naga::back::wgsl::WriterFlags::EXPLICIT_TYPES,
)?;
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("world shader"),
source: wgpu::ShaderSource::Wgsl(shader_source.into()),
});
let world_render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("world render pipeline layout"),
bind_group_layouts: &[
&world_texture_bind_group_layout,
&camera_bind_group_layout,
&render_options_bind_group_layout,
],
push_constant_ranges: &[],
});
let world_render_pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("world render pipeline"),
layout: Some(&world_render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
},
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
multiview: None,
});
Ok(Self {
world_texture_view,
world_texture_bind_group,
camera_uniform,
camera_buffer,
camera_bind_group,
render_options_buffer,
render_options_bind_group,
world_render_pipeline,
num_world_indices,
world_vertex_buffer,
world_index_buffer,
})
fn prepare(&mut self) -> WorldRenderFrameData {
todo!()
pub fn prepare(
&mut self,
_device: &wgpu::Device,
queue: &wgpu::Queue,
window: &Window,
camera: &mut Camera,
render_options: &RenderOptions,
) -> WorldRenderFrameData {
if let Some((w, h)) = camera.update_aspect(window) {
// Update the world vertex buffer
let new_world_vertices = Self::generate_world_vertices(w, h);
queue.write_buffer(
&self.world_vertex_buffer,
0,
bytemuck::cast_slice(&new_world_vertices),
);
}
// Update the camera and camera buffer
self.camera_uniform.update_view_proj(camera);
let mut uniform_buffer = UniformBuffer::new(Vec::new());
uniform_buffer.write(&self.camera_uniform).unwrap();
queue.write_buffer(&self.camera_buffer, 0, &uniform_buffer.into_inner());
// Update render options
let mut uniform_buffer = UniformBuffer::new(Vec::new());
uniform_buffer.write(render_options).unwrap();
queue.write_buffer(&self.render_options_buffer, 0, &uniform_buffer.into_inner());
WorldRenderFrameData {}
let _ = render_pass;
let _ = render_data;
render_pass.set_pipeline(&self.world_render_pipeline);
render_pass.set_bind_group(0, &self.world_texture_bind_group, &[]);
render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
render_pass.set_bind_group(2, &self.render_options_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.world_vertex_buffer.slice(..));
render_pass
.set_index_buffer(self.world_index_buffer.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(0..self.num_world_indices, 0, 0..1);