#![allow(non_snake_case)] #[path = "Camera.rs"] mod _Camera; use _Camera::*; #[path = "Texture.rs"] mod _Texture; use _Texture::*; use bytemuck::{Pod, Zeroable}; use image::GenericImageView; use std::mem; use wgpu::util::DeviceExt; use winit::dpi::LogicalSize; use winit::event::*; use winit::event_loop::{ControlFlow, EventLoop}; use winit::window::Window; use winit::window::WindowBuilder; #[repr(C)] #[derive(Copy, Clone, Debug, Pod, Zeroable)] struct Vertex { position: [f32; 3], tex_coords: [f32; 2], } impl Vertex { fn desc<'a>() -> wgpu::VertexBufferLayout<'a> { wgpu::VertexBufferLayout { array_stride: mem::size_of::() as wgpu::BufferAddress, step_mode: wgpu::VertexStepMode::Vertex, attributes: &[ wgpu::VertexAttribute { offset: 0, shader_location: 0, format: wgpu::VertexFormat::Float32x3, }, wgpu::VertexAttribute { offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress, shader_location: 1, format: wgpu::VertexFormat::Float32x2, // NEW! }, ], } } } /// Texture coordinates. /// /// ```no_run /// (0.0) (1.0) /// V1 ----------------- V2 /// | / | /// | Q1 / | /// | / | /// | / | /// | / | /// | / Q2 | /// | / | /// V3 ----------------- V4 /// (0.1) (1.1) /// ``` #[rustfmt::skip] const VERTICES: &[Vertex] = &[ Vertex { position: [-0.5, 0.5, 0.0], tex_coords: [0.0, 0.0] }, // A Vertex { position: [0.5, 0.5, 0.0], tex_coords: [1.0, 0.0] }, // B Vertex { position: [-0.5, -0.5, 0.0], tex_coords: [0.0, 1.0] }, // C Vertex { position: [0.5, -0.5, 0.0], tex_coords: [1.0, 1.0] }, // D ]; #[rustfmt::skip] const INDICES: &[u16] = &[ 1, 0, 2, 1, 2, 3, ]; struct State { surface: wgpu::Surface, device: wgpu::Device, queue: wgpu::Queue, config: wgpu::SurfaceConfiguration, size: winit::dpi::PhysicalSize, render_pipeline: wgpu::RenderPipeline, vertex_buffer: wgpu::Buffer, index_buffer: wgpu::Buffer, num_indices: u32, diffuse_bind_group: wgpu::BindGroup, diffuse_texture: Texture, camera: Camera, camera_uniform: CameraUniform, camera_buffer: wgpu::Buffer, camera_bind_group: wgpu::BindGroup, camera_controller: CameraController, } impl State { async fn new(window: &Window) -> Self { let size = window.inner_size(); // The instance is a handle to our GPU // Backends::all => Vulkan + Metal + DX12 + Browser WebGPU let instance = wgpu::Instance::new(wgpu::Backends::all()); let surface = unsafe { instance.create_surface(window) }; let adapter = instance .request_adapter(&wgpu::RequestAdapterOptions { power_preference: wgpu::PowerPreference::HighPerformance, compatible_surface: Some(&surface), force_fallback_adapter: false, }) .await .unwrap(); let (device, queue) = adapter .request_device( &wgpu::DeviceDescriptor { features: wgpu::Features::empty(), limits: wgpu::Limits::default(), label: None, }, None, // Trace path ) .await .unwrap(); let config = wgpu::SurfaceConfiguration { usage: wgpu::TextureUsages::RENDER_ATTACHMENT, format: surface.get_preferred_format(&adapter).unwrap(), width: size.width, height: size.height, present_mode: wgpu::PresentMode::Fifo, }; surface.configure(&device, &config); // Texture let diffuse_bytes = include_bytes!("../Resources/Tree.png"); let diffuse_image = image::load_from_memory(diffuse_bytes).unwrap(); let diffuse_rgba = diffuse_image.as_rgba8().unwrap(); let dimensions = diffuse_image.dimensions(); let texture_size = wgpu::Extent3d { width: dimensions.0, height: dimensions.1, depth_or_array_layers: 1, }; let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor { // All textures are stored as 3D, we represent our 2D texture // by setting depth to 1. size: texture_size, mip_level_count: 1, // We'll talk about this a little later sample_count: 1, dimension: wgpu::TextureDimension::D2, // Most images are stored using sRGB so we need to reflect that here. format: wgpu::TextureFormat::Rgba8UnormSrgb, // TEXTURE_BINDING tells wgpu that we want to use this texture in shaders // COPY_DST means that we want to copy data to this texture usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, label: Some("diffuse_texture"), }); queue.write_texture( // Tells wgpu where to copy the pixel data wgpu::ImageCopyTexture { texture: &diffuse_texture, mip_level: 0, origin: wgpu::Origin3d::ZERO, aspect: wgpu::TextureAspect::All, }, // The actual pixel data diffuse_rgba, // The layout of the texture wgpu::ImageDataLayout { offset: 0, bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0), rows_per_image: std::num::NonZeroU32::new(dimensions.1), }, texture_size, ); let diffuse_bytes = include_bytes!("../Resources/Tree.png"); let diffuse_texture = Texture::from_bytes(&device, &queue, diffuse_bytes, "Tree.png").unwrap(); let texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { entries: &[ wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStages::FRAGMENT, ty: wgpu::BindingType::Texture { multisampled: false, view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 1, visibility: wgpu::ShaderStages::FRAGMENT, ty: wgpu::BindingType::Sampler { // This is only for TextureSampleType::Depth comparison: false, // This should be true if the sample_type of the texture is: // TextureSampleType::Float { filterable: true } // Otherwise you'll get an error. filtering: true, }, count: None, }, ], label: Some("texture_bind_group_layout"), }); let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { layout: &texture_bind_group_layout, entries: &[ wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), }, wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), }, ], label: Some("diffuse_bind_group"), }); // Camera let camera = Camera { // position the camera one unit up and 2 units back // +z is out of the screen eye: (0.0, 1.0, 2.0).into(), // have it look at the origin target: (0.0, 0.0, 0.0).into(), // which way is "up" up: cgmath::Vector3::unit_y(), aspect: config.width as f32 / config.height as f32, fovy: 45.0, znear: 0.1, zfar: 100.0, }; let camera_controller = CameraController::new(0.2); let mut camera_uniform = CameraUniform::new(); camera_uniform.update_view_proj(&camera); let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { label: Some("Camera Buffer"), contents: bytemuck::cast_slice(&[camera_uniform]), usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, }); let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { entries: &[wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStages::VERTEX, ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: None, }, count: None, }], label: Some("camera_bind_group_layout"), }); let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { layout: &camera_bind_group_layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: camera_buffer.as_entire_binding(), }], label: Some("camera_bind_group"), }); // Shader let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor { label: Some("Shader"), source: wgpu::ShaderSource::Wgsl(include_str!("../Shaders/Texture.wgsl").into()), }); // Pipeline let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: Some("Render Pipeline Layout"), bind_group_layouts: &[&texture_bind_group_layout, &camera_bind_group_layout], push_constant_ranges: &[], }); let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { label: Some("Render Pipeline"), layout: Some(&render_pipeline_layout), vertex: wgpu::VertexState { module: &shader, entry_point: "main", buffers: &[Vertex::desc()], }, fragment: Some(wgpu::FragmentState { // 3. module: &shader, entry_point: "main", targets: &[wgpu::ColorTargetState { // 4. format: config.format, blend: Some(wgpu::BlendState::REPLACE), write_mask: wgpu::ColorWrites::ALL, }], }), primitive: wgpu::PrimitiveState { topology: wgpu::PrimitiveTopology::TriangleList, // 1. strip_index_format: None, front_face: wgpu::FrontFace::Ccw, // 2. cull_mode: Some(wgpu::Face::Back), // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE polygon_mode: wgpu::PolygonMode::Fill, // Requires Features::DEPTH_CLAMPING clamp_depth: false, // Requires Features::CONSERVATIVE_RASTERIZATION conservative: false, }, depth_stencil: None, // 1. multisample: wgpu::MultisampleState { count: 1, // 2. mask: !0, // 3. alpha_to_coverage_enabled: false, // 4. }, }); // Buffers let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { label: Some("Vertex Buffer"), contents: bytemuck::cast_slice(VERTICES), usage: wgpu::BufferUsages::VERTEX, }); let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { label: Some("Index Buffer"), contents: bytemuck::cast_slice(INDICES), usage: wgpu::BufferUsages::INDEX, }); let num_indices = INDICES.len() as u32; Self { surface, device, queue, config, size, render_pipeline, vertex_buffer, index_buffer, num_indices, diffuse_bind_group, diffuse_texture, camera, camera_uniform, camera_buffer, camera_bind_group, camera_controller, } } pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { if new_size.width > 0 && new_size.height > 0 { self.size = new_size; self.config.width = new_size.width; self.config.height = new_size.height; self.surface.configure(&self.device, &self.config); } } fn input(&mut self, event: &WindowEvent) -> bool { self.camera_controller.process_events(event) } fn update(&mut self) { self.camera_controller.update_camera(&mut self.camera); self.camera_uniform.update_view_proj(&self.camera); self.queue.write_buffer( &self.camera_buffer, 0, bytemuck::cast_slice(&[self.camera_uniform]), ); } fn render(&mut self) -> Result<(), wgpu::SurfaceError> { let output = self.surface.get_current_texture()?; let view = output .texture .create_view(&wgpu::TextureViewDescriptor::default()); let mut encoder = self .device .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("Render Encoder"), }); { let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { label: Some("Render Pass"), color_attachments: &[ // This is what [[location(0)]] in the fragment shader targets wgpu::RenderPassColorAttachment { view: &view, resolve_target: None, ops: wgpu::Operations { load: wgpu::LoadOp::Clear(wgpu::Color { r: 0.0, g: 0.0, b: 0.0, a: 1.0, }), store: true, }, }, ], depth_stencil_attachment: None, }); render_pass.set_pipeline(&self.render_pipeline); render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); render_pass.set_bind_group(1, &self.camera_bind_group, &[]); render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16); render_pass.draw_indexed(0..self.num_indices, 0, 0..1); } // submit will accept anything that implements IntoIter self.queue.submit(std::iter::once(encoder.finish())); output.present(); Ok(()) } } fn main() { env_logger::init(); let event_loop = EventLoop::new(); let window = WindowBuilder::new() .with_title("Renderer") .with_inner_size(LogicalSize::new(1280, 720)) .build(&event_loop) .unwrap(); // State::new uses async code, so we're going to wait for it to finish let mut state = pollster::block_on(State::new(&window)); event_loop.run(move |event, _, control_flow| { match event { Event::WindowEvent { ref event, window_id, } if window_id == window.id() => { if !state.input(event) { match event { WindowEvent::CloseRequested | WindowEvent::KeyboardInput { input: KeyboardInput { state: ElementState::Pressed, virtual_keycode: Some(VirtualKeyCode::Escape), .. }, .. } => *control_flow = ControlFlow::Exit, WindowEvent::Resized(physical_size) => { state.resize(*physical_size); } WindowEvent::ScaleFactorChanged { new_inner_size, .. } => { state.resize(**new_inner_size); } _ => {} } } } Event::RedrawRequested(_) => { state.update(); match state.render() { Ok(_) => {} // Reconfigure the surface if lost Err(wgpu::SurfaceError::Lost) => state.resize(state.size), // The system is out of memory, we should probably quit Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit, // All other errors (Outdated, Timeout) should be resolved by the next frame Err(e) => eprintln!("{:?}", e), } } Event::MainEventsCleared => { // RedrawRequested will only trigger once, unless we manually // request it. window.request_redraw(); } _ => {} } }); }