Update project

This commit is contained in:
Werner
2021-11-08 12:17:40 -03:00
parent c5dbf96177
commit 15e8989bef
30 changed files with 873 additions and 851 deletions

@ -0,0 +1,66 @@
use super::{Material, Mesh, Model};
use std::ops::Range;
pub trait DrawModel<'a> {
fn DrawMesh(
&mut self,
mesh: &'a Mesh,
material: &'a Material,
camera_bind_group: &'a wgpu::BindGroup,
);
fn DrawMeshInstanced(
&mut self,
mesh: &'a Mesh,
material: &'a Material,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
);
fn DrawModel(&mut self, model: &'a Model, camera_bind_group: &'a wgpu::BindGroup);
fn DrawModelInstanced(
&mut self,
model: &'a Model,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
);
}
impl<'a> DrawModel<'a> for wgpu::RenderPass<'a> {
fn DrawMesh(
&mut self,
mesh: &'a Mesh,
material: &'a Material,
camera_bind_group: &'a wgpu::BindGroup,
) {
self.DrawMeshInstanced(mesh, material, 0..1, camera_bind_group);
}
fn DrawMeshInstanced(
&mut self,
mesh: &'a Mesh,
material: &'a Material,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
) {
self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, camera_bind_group, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn DrawModel(&mut self, model: &'a Model, camera_bind_group: &'a wgpu::BindGroup) {
self.DrawModelInstanced(model, 0..1, camera_bind_group);
}
fn DrawModelInstanced(
&mut self,
model: &'a Model,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.DrawMeshInstanced(mesh, material, instances.clone(), camera_bind_group);
}
}
}

71
Source/Render/Instance.rs Normal file

@ -0,0 +1,71 @@
use bytemuck::{Pod, Zeroable};
use std::mem;
pub const NUM_INSTANCES_PER_ROW: u32 = 10;
pub const NUM_INSTANCES: u32 = NUM_INSTANCES_PER_ROW * NUM_INSTANCES_PER_ROW;
pub const INSTANCE_DISPLACEMENT: cgmath::Vector3<f32> = cgmath::Vector3::new(
NUM_INSTANCES_PER_ROW as f32 * 0.5,
0.0,
NUM_INSTANCES_PER_ROW as f32 * 0.5,
);
pub struct Instance {
pub position: cgmath::Vector3<f32>,
pub rotation: cgmath::Quaternion<f32>,
}
impl Instance {
pub fn ToRaw(&self) -> InstanceRaw {
InstanceRaw {
model: (cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation))
.into(),
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
pub struct InstanceRaw {
pub model: [[f32; 4]; 4],
}
impl InstanceRaw {
pub fn GetDescriptor<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
// We need to switch from using a step mode of Vertex to Instance
// This means that our shaders will only change to use the next
// instance when the shader starts processing a new instance
step_mode: wgpu::VertexStepMode::Instance,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
// While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll
// be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later
shader_location: 5,
format: wgpu::VertexFormat::Float32x4,
},
// A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot
// for each vec4. We'll have to reassemble the mat4 in
// the shader.
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
shader_location: 6,
format: wgpu::VertexFormat::Float32x4,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
shader_location: 7,
format: wgpu::VertexFormat::Float32x4,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
shader_location: 8,
format: wgpu::VertexFormat::Float32x4,
},
],
}
}
}

@ -0,0 +1,7 @@
use super::Texture;
pub struct Material {
pub name: String,
pub diffuse_texture: Texture,
pub bind_group: wgpu::BindGroup,
}

7
Source/Render/Mesh.rs Normal file

@ -0,0 +1,7 @@
pub struct Mesh {
pub name: String,
pub vertex_buffer: wgpu::Buffer,
pub index_buffer: wgpu::Buffer,
pub num_elements: u32,
pub material: usize,
}

139
Source/Render/Model.rs Normal file

@ -0,0 +1,139 @@
use super::Texture;
use super::{Material, Mesh, Vertex};
use anyhow::Result;
use bytemuck::{Pod, Zeroable};
use std::path::Path;
use tobj::LoadOptions;
use wgpu::util::DeviceExt;
pub struct Model {
pub meshes: Vec<Mesh>,
pub materials: Vec<Material>,
}
impl Model {
pub fn Load<P: AsRef<Path>>(
device: &wgpu::Device,
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
path: P,
) -> Result<Self> {
let (obj_models, obj_materials) = tobj::load_obj(
path.as_ref(),
&LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
},
)?;
let obj_materials = obj_materials?;
// We're assuming that the texture files are stored with the obj file
let containing_folder = path.as_ref().parent().expect("Directory has no parent");
let mut materials = Vec::new();
for mat in obj_materials {
let diffuse_path = mat.diffuse_texture;
let diffuse_texture =
Texture::Load(device, queue, containing_folder.join(diffuse_path))?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
materials.push(Material {
name: mat.name,
diffuse_texture,
bind_group,
});
}
let mut meshes = Vec::new();
for m in obj_models {
let mut vertices = Vec::new();
for i in 0..m.mesh.positions.len() / 3 {
vertices.push(ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
});
}
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", path.as_ref())),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsages::INDEX,
});
meshes.push(Mesh {
name: m.name,
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
});
}
Ok(Self { meshes, materials })
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
pub struct ModelVertex {
position: [f32; 3],
tex_coords: [f32; 2],
normal: [f32; 3],
}
impl Vertex for ModelVertex {
fn GetDescriptor<'a>() -> wgpu::VertexBufferLayout<'a> {
use std::mem;
wgpu::VertexBufferLayout {
array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x3,
},
],
}
}
}

156
Source/Render/Texture.rs Normal file

@ -0,0 +1,156 @@
use anyhow::*;
use image::GenericImageView;
use std::path::Path;
/// Texture coordinates.
///
/// ```no_run
/// (0.0) (1.0)
/// V1 ----------------- V2
/// | / |
/// | Q1 / |
/// | / |
/// | / |
/// | / |
/// | / Q2 |
/// | / |
/// V3 ----------------- V4
/// (0.1) (1.1)
/// ```
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler,
}
impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn Load<P: AsRef<Path>>(
device: &wgpu::Device,
queue: &wgpu::Queue,
path: P,
) -> Result<Self> {
// Needed to appease the borrow checker
let path_copy = path.as_ref().to_path_buf();
let label = path_copy.to_str();
let img = image::open(path)?;
Self::FromImage(device, queue, &img, label)
}
pub fn FromBytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::FromImage(device, queue, &img, Some(label))
}
pub fn FromImage(
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>,
) -> Result<Self> {
let rgba = img.to_rgba8();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
queue.write_texture(
wgpu::ImageCopyTexture {
aspect: wgpu::TextureAspect::All,
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
size,
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Ok(Self {
texture,
view,
sampler,
})
}
pub fn CreateDepthTexture(
device: &wgpu::Device,
config: &wgpu::SurfaceConfiguration,
label: &str,
) -> Self {
let size = wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
};
let desc = wgpu::TextureDescriptor {
label: Some(label),
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
..Default::default()
});
Self {
texture,
view,
sampler,
}
}
}

3
Source/Render/Vertex.rs Normal file

@ -0,0 +1,3 @@
pub trait Vertex {
fn GetDescriptor<'a>() -> wgpu::VertexBufferLayout<'a>;
}

27
Source/Render/mod.rs Normal file

@ -0,0 +1,27 @@
#[path = "DrawModel.rs"]
mod _DrawModel;
pub use self::_DrawModel::*;
#[path = "Instance.rs"]
mod _Instance;
pub use self::_Instance::*;
#[path = "Material.rs"]
mod _Material;
pub use self::_Material::*;
#[path = "Model.rs"]
mod _Model;
pub use self::_Model::*;
#[path = "Mesh.rs"]
mod _Mesh;
pub use self::_Mesh::*;
#[path = "Texture.rs"]
mod _Texture;
pub use self::_Texture::*;
#[path = "Vertex.rs"]
mod _Vertex;
pub use self::_Vertex::*;