v/renderer/webgpu/renderer.cpp
2026-04-28 19:46:41 +02:00

281 lines
11 KiB
C++

//
// Created by Vicente Ferrari Smith on 06.03.26.
//
#include "renderer.h"
#include <iostream>
#include "webgpu.h"
#include "../graphics.h"
#include <misc.h>
#include <print>
extern WGPUInstance instance;
extern Device wgpu_device;
extern Queue wgpu_queue;
Renderer::Renderer(GLFWwindow *window) {
create_compute_pipeline();
create_render_pipeline();
// Number of floats in the buffers
WGPUBufferDescriptor inputBufferDesc = WGPU_BUFFER_DESCRIPTOR_INIT;
inputBufferDesc.label = toWgpuStringView("Input Buffer");
inputBufferDesc.size = elementCount * sizeof(float);
inputBufferDesc.usage = WGPUBufferUsage_Storage;
inputBufferDesc.mappedAtCreation = true;
input_buffer = wgpuDeviceCreateBuffer(wgpu_device.device, &inputBufferDesc);
WGPUBufferDescriptor outputBufferDesc = WGPU_BUFFER_DESCRIPTOR_INIT;
outputBufferDesc.label = toWgpuStringView("Output Buffer");
outputBufferDesc.size = elementCount * sizeof(float);
outputBufferDesc.usage = WGPUBufferUsage_Storage | WGPUBufferUsage_CopySrc;
output_buffer = wgpuDeviceCreateBuffer(wgpu_device.device, &outputBufferDesc);
WGPUBufferDescriptor stagingBufferDesc = WGPU_BUFFER_DESCRIPTOR_INIT;
stagingBufferDesc.label = toWgpuStringView("Staging Buffer");
stagingBufferDesc.size = elementCount * sizeof(float);
stagingBufferDesc.usage = WGPUBufferUsage_CopyDst | WGPUBufferUsage_MapRead;
staging_buffer = wgpuDeviceCreateBuffer(wgpu_device.device, &stagingBufferDesc);
float* inputBufferData = static_cast<float*>(
wgpuBufferGetMappedRange(input_buffer, 0, WGPU_WHOLE_MAP_SIZE)
);
// Write 0.0, 0.1, 0.2, 0.3, ... in inputBuffer
for (size_t i = 0 ; i < elementCount ; ++i) {
inputBufferData[i] = static_cast<float>(i) * 0.1f;
}
wgpuBufferUnmap(input_buffer);
}
void Renderer::create_compute_pipeline() {
std::string shader_source = read_entire_file("shader/compute.wgsl");
if (!shader_source.empty()) {
WGPUShaderSourceWGSL wgslSourceDesc = WGPU_SHADER_SOURCE_WGSL_INIT;
wgslSourceDesc.code = toWgpuStringView(shader_source);
WGPUShaderModuleDescriptor moduleDesc = WGPU_SHADER_MODULE_DESCRIPTOR_INIT;
moduleDesc.nextInChain = &wgslSourceDesc.chain;
moduleDesc.label = toWgpuStringView("Our first compute shader");
WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(wgpu_device.device, &moduleDesc);
WGPUComputePipelineDescriptor desc = WGPU_COMPUTE_PIPELINE_DESCRIPTOR_INIT;
desc.label = toWgpuStringView("Our simple pipeline");
desc.compute.module = shaderModule;
desc.compute.entryPoint = toWgpuStringView("main");
compute_pipeline = wgpuDeviceCreateComputePipeline(wgpu_device.device, &desc);
} else {
std::println("Couldn't load compute shader source");
}
}
void Renderer::create_render_pipeline() {
}
void Renderer::submit_sprite(glm::vec2 pos, const sprite_t &sprite) {
RenderCommand cmd {};
cmd.pipeline = PipelineType::TexturedQuad;
cmd.key = {
(uint16_t) pos.y,
0,
(uint8_t) PipelineType::TexturedQuad
};
const Texture &texture = texture_manager.textures[sprite.texture];
cmd.textured_quad = {
.pos = pos,
.scale = { sprite.scale.x, sprite.scale.y },
.uv0 = {0, 0},
.uv1 = {1, 1},
.colour = {1, 1, 1, 1},
.texture = texture.p_texture->texture,
};
commands.push_back(cmd);
// assert(started == true, "You can't submit without having started the renderer first.");
// renderable : Renderable;
// renderable.type = .Sprite;
//
// if sprite.window_space
// renderable.projection_type = .ORTHOGRAPHIC_WINDOW;
// else
// renderable.projection_type = .ORTHOGRAPHIC_WORLD;
//
// renderable.pos = pos;
// renderable.sprite.texture_sheet = sprite.texture_sheet;
// renderable.sprite.texture_cell = sprite.texture_cell;
// renderable.sprite.origin = sprite.origin;
// renderable.sprite.scale = sprite.scale;
// renderable.sprite.colour = sprite.colour;
// renderable.sprite.alpha = alpha;
//
// array_add(*renderer.renderable_list, renderable);
}
void Renderer::begin_frame() {
commands.clear();
}
uint32_t divideAndCeil(uint32_t p, uint32_t q) {
return (p + q - 1) / q;
}
void fetchBufferDataSync(
WGPUInstance instance,
WGPUBuffer bufferB,
std::function<void(const void*)> processBufferData
) {
// We copy here what used to be in main():
// Context passed to `onBufferBMapped` through theuserdata pointer:
struct OnBufferBMappedContext {
bool operationEnded = false; // Turned true as soon as the callback is invoked
bool mappingIsSuccessful = false; // Turned true only if mapping succeeded
};
// This function has the type WGPUBufferMapCallback as defined in webgpu.h
auto onBufferBMapped = [](
WGPUMapAsyncStatus status,
struct WGPUStringView message,
void* userdata1,
void* /* userdata2 */
) {
OnBufferBMappedContext& context = *reinterpret_cast<OnBufferBMappedContext*>(userdata1);
context.operationEnded = true;
if (status == WGPUMapAsyncStatus_Success) {
context.mappingIsSuccessful = true;
} else {
std::cout << "Could not map buffer B! Status: " << status << ", message: " << toStdStringView(message) << std::endl;
}
};
// We create an instance of the context shared with `onBufferBMapped`
OnBufferBMappedContext context;
// And we build the callback info:
WGPUBufferMapCallbackInfo callbackInfo = WGPU_BUFFER_MAP_CALLBACK_INFO_INIT;
callbackInfo.mode = WGPUCallbackMode_AllowProcessEvents;
callbackInfo.callback = onBufferBMapped;
callbackInfo.userdata1 = &context;
// And finally we launch the asynchronous operation
wgpuBufferMapAsync(
bufferB,
WGPUMapMode_Read,
0, // offset
WGPU_WHOLE_MAP_SIZE,
callbackInfo
);
// Process events until the map operation ended
wgpuInstanceProcessEvents(instance);
while (!context.operationEnded) {
sleepForMilliseconds(200);
wgpuInstanceProcessEvents(instance);
}
if (context.mappingIsSuccessful) {
const void* bufferData = wgpuBufferGetConstMappedRange(bufferB, 0, WGPU_WHOLE_MAP_SIZE);
processBufferData(bufferData);
}
}
void Renderer::end_frame(GLFWwindow *window) {
std::vector<WGPUBindGroupEntry> bindGroupEntries(2, WGPU_BIND_GROUP_ENTRY_INIT);
bindGroupEntries[0].binding = 0;
bindGroupEntries[0].buffer = input_buffer;
bindGroupEntries[1].binding = 1;
bindGroupEntries[1].buffer = output_buffer;
WGPUBindGroupDescriptor bindGroupDesc = WGPU_BIND_GROUP_DESCRIPTOR_INIT;
bindGroupDesc.entries = bindGroupEntries.data();
bindGroupDesc.entryCount = bindGroupEntries.size();
bindGroupDesc.layout = wgpuComputePipelineGetBindGroupLayout(compute_pipeline, 0);
WGPUBindGroup bindGroup = wgpuDeviceCreateBindGroup(wgpu_device.device, &bindGroupDesc);
wgpuBindGroupLayoutRelease(bindGroupDesc.layout);
WGPUCommandEncoderDescriptor encoderDesc = WGPU_COMMAND_ENCODER_DESCRIPTOR_INIT;
encoderDesc.label = toWgpuStringView("My command encoder");
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(wgpu_device.device, &encoderDesc);
WGPUComputePassEncoder computePass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
wgpuComputePassEncoderSetPipeline(computePass, compute_pipeline);
wgpuComputePassEncoderSetBindGroup(computePass, 0, bindGroup, 0, nullptr);
uint32_t workgroupSizeX = 32; // the value specified in @workgroup_size(...)
uint32_t workgroupCountX = divideAndCeil((uint32_t)elementCount, workgroupSizeX);
// After the end of the compute pass, we copy the whole output buffer into the staging buffer
wgpuComputePassEncoderDispatchWorkgroups(computePass, workgroupCountX, 1, 1);
wgpuComputePassEncoderEnd(computePass);
wgpuComputePassEncoderRelease(computePass);
wgpuCommandEncoderCopyBufferToBuffer(encoder, output_buffer, 0, staging_buffer, 0, elementCount * sizeof(float));
WGPUCommandBufferDescriptor cmdBufferDescriptor = WGPU_COMMAND_BUFFER_DESCRIPTOR_INIT;
cmdBufferDescriptor.label = toWgpuStringView("Command buffer");
WGPUCommandBuffer command = wgpuCommandEncoderFinish(encoder, &cmdBufferDescriptor);
wgpuCommandEncoderRelease(encoder); // release encoder after it's finished
// Finally submit the command queue
std::cout << "Submitting command..." << std::endl;
wgpuQueueSubmit(wgpu_queue.queue, 1, &command);
wgpuCommandBufferRelease(command);
std::cout << "Command submitted." << std::endl;
// Removed
fetchBufferDataSync(instance, staging_buffer, [&](const void* data) {
const float* floatData = static_cast<const float*>(data);
std::cout << "Result: [";
for (size_t i = 0 ; i < elementCount ; ++i) {
if (i > 0) std::cout << ", ";
std::cout << floatData[i];
}
std::cout << "]" << std::endl;
});
// // Get the next target texture view
// WGPUTextureView target_view = get_next_surface_view();
// if (!target_view) return; // no surface texture, we skip this frame
//
// WGPUCommandEncoderDescriptor encoderDesc = WGPU_COMMAND_ENCODER_DESCRIPTOR_INIT;
// encoderDesc.label = toWgpuStringView("My command encoder");
// WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(wgpu_device.device, &encoderDesc);
// WGPURenderPassDescriptor renderPassDesc = WGPU_RENDER_PASS_DESCRIPTOR_INIT;
// WGPURenderPassColorAttachment colorAttachment = WGPU_RENDER_PASS_COLOR_ATTACHMENT_INIT;
//
// colorAttachment.view = target_view;
// colorAttachment.loadOp = WGPULoadOp_Clear;
// colorAttachment.storeOp = WGPUStoreOp_Store;
// colorAttachment.clearValue = WGPUColor{ 100.0 / 255.0, 149.0 / 255.0, 237.0 / 255.0, 1.0 };
//
// renderPassDesc.colorAttachmentCount = 1;
// renderPassDesc.colorAttachments = &colorAttachment;
//
// WGPURenderPassEncoder renderPass = wgpuCommandEncoderBeginRenderPass(encoder, &renderPassDesc);
// // Use the render pass here (we do nothing with the render pass for now)
// wgpuRenderPassEncoderEnd(renderPass);
// wgpuRenderPassEncoderRelease(renderPass);
// WGPUCommandBufferDescriptor cmdBufferDescriptor = WGPU_COMMAND_BUFFER_DESCRIPTOR_INIT;
// cmdBufferDescriptor.label = toWgpuStringView("Command buffer");
// WGPUCommandBuffer command = wgpuCommandEncoderFinish(encoder, &cmdBufferDescriptor);
// wgpuCommandEncoderRelease(encoder); // release encoder after it's finished
//
// // Finally submit the command queue
// std::println("Submitting command...");
// wgpuQueueSubmit(wgpu_queue.queue, 1, &command);
// wgpuCommandBufferRelease(command);
// std::println("Command submitted.");
//
// // At the end of the frame
// wgpuTextureViewRelease(target_view);
// #ifndef __EMSCRIPTEN__
// wgpuSurfacePresent(wgpu_surface.surface);
// #endif
}