wg_engine: refactor context handles

New approach provide:
- instance, adaptor, device and default queue
- device capabilitieas
- command buffer executor
- error handling
This commit is contained in:
Sergii Liebodkin 2023-12-19 23:56:14 +02:00 committed by Hermet Park
parent 1a6571f596
commit 93f0e493ac
4 changed files with 131 additions and 110 deletions

View file

@ -21,6 +21,96 @@
*/ */
#include "tvgWgCommon.h" #include "tvgWgCommon.h"
#include <iostream>
//*****************************************************************************
// context
//*****************************************************************************
void WgContext::initialize()
{
// create instance
WGPUInstanceDescriptor instanceDesc{};
instanceDesc.nextInChain = nullptr;
instance = wgpuCreateInstance(&instanceDesc);
assert(instance);
// request adapter options
WGPURequestAdapterOptions requestAdapterOptions{};
requestAdapterOptions.nextInChain = nullptr;
requestAdapterOptions.compatibleSurface = nullptr;
requestAdapterOptions.powerPreference = WGPUPowerPreference_HighPerformance;
requestAdapterOptions.forceFallbackAdapter = false;
// on adapter request ended function
auto onAdapterRequestEnded = [](WGPURequestAdapterStatus status, WGPUAdapter adapter, char const * message, void * pUserData) {
if (status != WGPURequestAdapterStatus_Success)
TVGERR("WG_RENDERER", "Adapter request: %s", message);
*((WGPUAdapter*)pUserData) = adapter;
};
// request adapter
wgpuInstanceRequestAdapter(instance, &requestAdapterOptions, onAdapterRequestEnded, &adapter);
assert(adapter);
// adapter enumarate fueatures
size_t featuresCount = wgpuAdapterEnumerateFeatures(adapter, featureNames);
wgpuAdapterGetProperties(adapter, &adapterProperties);
wgpuAdapterGetLimits(adapter, &supportedLimits);
// reguest device
WGPUDeviceDescriptor deviceDesc{};
deviceDesc.nextInChain = nullptr;
deviceDesc.label = "The device";
deviceDesc.requiredFeaturesCount = featuresCount;
deviceDesc.requiredFeatures = featureNames;
deviceDesc.requiredLimits = nullptr;
deviceDesc.defaultQueue.nextInChain = nullptr;
deviceDesc.defaultQueue.label = "The default queue";
deviceDesc.deviceLostCallback = nullptr;
deviceDesc.deviceLostUserdata = nullptr;
// on device request ended function
auto onDeviceRequestEnded = [](WGPURequestDeviceStatus status, WGPUDevice device, char const * message, void * pUserData) {
if (status != WGPURequestDeviceStatus_Success)
TVGERR("WG_RENDERER", "Device request: %s", message);
*((WGPUDevice*)pUserData) = device;
};
// request device
wgpuAdapterRequestDevice(adapter, &deviceDesc, onDeviceRequestEnded, &device);
assert(device);
// on device error function
auto onDeviceError = [](WGPUErrorType type, char const* message, void* pUserData) {
TVGERR("WG_RENDERER", "Uncaptured device error: %s", message);
// TODO: remove direct error message
std::cout << message << std::endl;
};
// set device error handling
wgpuDeviceSetUncapturedErrorCallback(device, onDeviceError, nullptr);
queue = wgpuDeviceGetQueue(device);
assert(queue);
}
void WgContext::release()
{
if (device) {
wgpuDeviceDestroy(device);
wgpuDeviceRelease(device);
}
if (adapter) wgpuAdapterRelease(adapter);
if (instance) wgpuInstanceRelease(instance);
}
void WgContext::executeCommandEncoder(WGPUCommandEncoder commandEncoder)
{
// command buffer descriptor
WGPUCommandBufferDescriptor commandBufferDesc{};
commandBufferDesc.nextInChain = nullptr;
commandBufferDesc.label = "The command buffer";
WGPUCommandBuffer commandsBuffer = nullptr;
commandsBuffer = wgpuCommandEncoderFinish(commandEncoder, &commandBufferDesc);
wgpuQueueSubmit(queue, 1, &commandsBuffer);
wgpuCommandBufferRelease(commandsBuffer);
}
//***************************************************************************** //*****************************************************************************
// bind group // bind group

View file

@ -28,6 +28,22 @@
#include "tvgCommon.h" #include "tvgCommon.h"
#include "tvgRender.h" #include "tvgRender.h"
struct WgContext {
WGPUInstance instance{};
WGPUAdapter adapter{};
WGPUDevice device{};
WGPUQueue queue{};
WGPUFeatureName featureNames[32]{};
WGPUAdapterProperties adapterProperties{};
WGPUSupportedLimits supportedLimits{};
void initialize();
void release();
void executeCommandEncoder(WGPUCommandEncoder commandEncoder);
};
struct WgBindGroup struct WgBindGroup
{ {
WGPUBindGroup mBindGroup{}; WGPUBindGroup mBindGroup{};

View file

@ -22,13 +22,10 @@
#include "tvgWgRenderer.h" #include "tvgWgRenderer.h"
#include <iostream>
#ifdef _WIN32 #ifdef _WIN32
// TODO: cross-platform realization // TODO: cross-platform realization
#include <windows.h> #include <windows.h>
#endif #endif
#include "tvgWgRenderData.h"
#include "tvgWgShaderSrc.h"
WgRenderer::WgRenderer() WgRenderer::WgRenderer()
{ {
@ -44,84 +41,18 @@ WgRenderer::~WgRenderer()
void WgRenderer::initialize() void WgRenderer::initialize()
{ {
// create instance mContext.initialize();
WGPUInstanceDescriptor instanceDesc{}; mPipelines.initialize(mContext.device);
instanceDesc.nextInChain = nullptr;
mInstance = wgpuCreateInstance(&instanceDesc);
assert(mInstance);
// request adapter options
WGPURequestAdapterOptions requestAdapterOptions{};
requestAdapterOptions.nextInChain = nullptr;
requestAdapterOptions.compatibleSurface = nullptr;
requestAdapterOptions.powerPreference = WGPUPowerPreference_HighPerformance;
requestAdapterOptions.forceFallbackAdapter = false;
// on adapter request ended function
auto onAdapterRequestEnded = [](WGPURequestAdapterStatus status, WGPUAdapter adapter, char const * message, void * pUserData) {
if (status != WGPURequestAdapterStatus_Success)
TVGERR("WG_RENDERER", "Adapter request: %s", message);
*((WGPUAdapter*)pUserData) = adapter;
};
// request adapter
wgpuInstanceRequestAdapter(mInstance, &requestAdapterOptions, onAdapterRequestEnded, &mAdapter);
assert(mAdapter);
// adapter enumarate fueatures
WGPUFeatureName featureNames[32]{};
size_t featuresCount = wgpuAdapterEnumerateFeatures(mAdapter, featureNames);
WGPUAdapterProperties adapterProperties{};
wgpuAdapterGetProperties(mAdapter, &adapterProperties);
WGPUSupportedLimits supportedLimits{};
wgpuAdapterGetLimits(mAdapter, &supportedLimits);
// reguest device
WGPUDeviceDescriptor deviceDesc{};
deviceDesc.nextInChain = nullptr;
deviceDesc.label = "The device";
deviceDesc.requiredFeaturesCount = featuresCount;
deviceDesc.requiredFeatures = featureNames;
deviceDesc.requiredLimits = nullptr;
deviceDesc.defaultQueue.nextInChain = nullptr;
deviceDesc.defaultQueue.label = "The default queue";
deviceDesc.deviceLostCallback = nullptr;
deviceDesc.deviceLostUserdata = nullptr;
// on device request ended function
auto onDeviceRequestEnded = [](WGPURequestDeviceStatus status, WGPUDevice device, char const * message, void * pUserData) {
if (status != WGPURequestDeviceStatus_Success)
TVGERR("WG_RENDERER", "Device request: %s", message);
*((WGPUDevice*)pUserData) = device;
};
// request device
wgpuAdapterRequestDevice(mAdapter, &deviceDesc, onDeviceRequestEnded, &mDevice);
assert(mDevice);
// on device error function
auto onDeviceError = [](WGPUErrorType type, char const* message, void* pUserData) {
TVGERR("WG_RENDERER", "Uncaptured device error: %s", message);
// TODO: remove direct error message
std::cout << message << std::endl;
};
// set device error handling
wgpuDeviceSetUncapturedErrorCallback(mDevice, onDeviceError, nullptr);
mQueue = wgpuDeviceGetQueue(mDevice);
assert(mQueue);
// create pipelines
mPipelines.initialize(mDevice);
} }
void WgRenderer::release() void WgRenderer::release()
{ {
mRenderTarget.release();
mPipelines.release(); mPipelines.release();
if (mDevice) { if (mSwapChain) wgpuSwapChainRelease(mSwapChain);
wgpuDeviceDestroy(mDevice); if (mSurface) wgpuSurfaceRelease(mSurface);
wgpuDeviceRelease(mDevice); mPipelines.release();
} mContext.release();
if (mAdapter) wgpuAdapterRelease(mAdapter);
if (mInstance) wgpuInstanceRelease(mInstance);
} }
@ -131,27 +62,27 @@ RenderData WgRenderer::prepare(const RenderShape& rshape, RenderData data, const
auto renderDataShape = (WgRenderDataShape*)data; auto renderDataShape = (WgRenderDataShape*)data;
if (!renderDataShape) { if (!renderDataShape) {
renderDataShape = new WgRenderDataShape(); renderDataShape = new WgRenderDataShape();
renderDataShape->initialize(mDevice); renderDataShape->initialize(mContext.device);
} }
// update geometry // update geometry
if (flags & (RenderUpdateFlag::Path | RenderUpdateFlag::Stroke)) { if (flags & (RenderUpdateFlag::Path | RenderUpdateFlag::Stroke)) {
renderDataShape->releaseRenderData(); renderDataShape->releaseRenderData();
renderDataShape->tesselate(mDevice, mQueue, rshape); renderDataShape->tesselate(mContext.device, mContext.queue, rshape);
renderDataShape->stroke(mDevice, mQueue, rshape); renderDataShape->stroke(mContext.device, mContext.queue, rshape);
} }
// update paint settings // update paint settings
if (flags & (RenderUpdateFlag::Transform | RenderUpdateFlag::Blend)) { if (flags & (RenderUpdateFlag::Transform | RenderUpdateFlag::Blend)) {
WgShaderTypeMat4x4f modelMat(transform); WgShaderTypeMat4x4f modelMat(transform);
WgShaderTypeBlendSettings blendSettings(mTargetSurface.cs); WgShaderTypeBlendSettings blendSettings(mTargetSurface.cs);
renderDataShape->mBindGroupPaint.initialize(mDevice, mQueue, modelMat, blendSettings); renderDataShape->mBindGroupPaint.initialize(mContext.device, mContext.queue, modelMat, blendSettings);
} }
// setup fill settings // setup fill settings
renderDataShape->mRenderSettingsShape.update(mDevice, mQueue, rshape.fill, rshape.color, flags); renderDataShape->mRenderSettingsShape.update(mContext.device, mContext.queue, rshape.fill, rshape.color, flags);
if (rshape.stroke) if (rshape.stroke)
renderDataShape->mRenderSettingsStroke.update(mDevice, mQueue, rshape.stroke->fill, rshape.stroke->color, flags); renderDataShape->mRenderSettingsStroke.update(mContext.device, mContext.queue, rshape.stroke->fill, rshape.stroke->color, flags);
return renderDataShape; return renderDataShape;
} }
@ -169,22 +100,22 @@ RenderData WgRenderer::prepare(Surface* surface, const RenderMesh* mesh, RenderD
auto renderDataShape = (WgRenderDataShape*)data; auto renderDataShape = (WgRenderDataShape*)data;
if (!renderDataShape) { if (!renderDataShape) {
renderDataShape = new WgRenderDataShape(); renderDataShape = new WgRenderDataShape();
renderDataShape->initialize(mDevice); renderDataShape->initialize(mContext.device);
} }
// update paint settings // update paint settings
if (flags & (RenderUpdateFlag::Transform | RenderUpdateFlag::Blend)) { if (flags & (RenderUpdateFlag::Transform | RenderUpdateFlag::Blend)) {
WgShaderTypeMat4x4f modelMat(transform); WgShaderTypeMat4x4f modelMat(transform);
WgShaderTypeBlendSettings blendSettings(surface->cs); WgShaderTypeBlendSettings blendSettings(surface->cs);
renderDataShape->mBindGroupPaint.initialize(mDevice, mQueue, modelMat, blendSettings); renderDataShape->mBindGroupPaint.initialize(mContext.device, mContext.queue, modelMat, blendSettings);
} }
// update image data // update image data
if (flags & (RenderUpdateFlag::Path | RenderUpdateFlag::Image)) { if (flags & (RenderUpdateFlag::Path | RenderUpdateFlag::Image)) {
renderDataShape->releaseRenderData(); renderDataShape->releaseRenderData();
renderDataShape->tesselate(mDevice, mQueue, surface, mesh); renderDataShape->tesselate(mContext.device, mContext.queue, surface, mesh);
renderDataShape->mBindGroupPicture.initialize( renderDataShape->mBindGroupPicture.initialize(
mDevice, mQueue, mContext.device, mContext.queue,
renderDataShape->mImageData.mSampler, renderDataShape->mImageData.mSampler,
renderDataShape->mImageData.mTextureView); renderDataShape->mImageData.mTextureView);
} }
@ -258,7 +189,6 @@ ColorSpace WgRenderer::colorSpace()
bool WgRenderer::clear() bool WgRenderer::clear()
{ {
mClearBuffer = true;
return true; return true;
} }
@ -271,7 +201,7 @@ bool WgRenderer::sync()
WGPUCommandEncoderDescriptor commandEncoderDesc{}; WGPUCommandEncoderDescriptor commandEncoderDesc{};
commandEncoderDesc.nextInChain = nullptr; commandEncoderDesc.nextInChain = nullptr;
commandEncoderDesc.label = "The command encoder"; commandEncoderDesc.label = "The command encoder";
WGPUCommandEncoder commandEncoder = wgpuDeviceCreateCommandEncoder(mDevice, &commandEncoderDesc); WGPUCommandEncoder commandEncoder = wgpuDeviceCreateCommandEncoder(mContext.device, &commandEncoderDesc);
// render datas // render datas
mRenderTarget.beginRenderPass(commandEncoder, backBufferView); mRenderTarget.beginRenderPass(commandEncoder, backBufferView);
@ -283,14 +213,7 @@ bool WgRenderer::sync()
} }
mRenderTarget.endRenderPass(); mRenderTarget.endRenderPass();
// execute command encoder mContext.executeCommandEncoder(commandEncoder);
WGPUCommandBufferDescriptor commandBufferDesc{};
commandBufferDesc.nextInChain = nullptr;
commandBufferDesc.label = "The command buffer";
WGPUCommandBuffer commandsBuffer{};
commandsBuffer = wgpuCommandEncoderFinish(commandEncoder, &commandBufferDesc);
wgpuQueueSubmit(mQueue, 1, &commandsBuffer);
wgpuCommandBufferRelease(commandsBuffer);
wgpuCommandEncoderRelease(commandEncoder); wgpuCommandEncoderRelease(commandEncoder);
// go to the next frame // go to the next frame
@ -333,7 +256,7 @@ bool WgRenderer::target(void* window, uint32_t w, uint32_t h)
WGPUSurfaceDescriptor surfaceDesc{}; WGPUSurfaceDescriptor surfaceDesc{};
surfaceDesc.nextInChain = (const WGPUChainedStruct*)&surfaceDescHwnd; surfaceDesc.nextInChain = (const WGPUChainedStruct*)&surfaceDescHwnd;
surfaceDesc.label = "The surface"; surfaceDesc.label = "The surface";
mSurface = wgpuInstanceCreateSurface(mInstance, &surfaceDesc); mSurface = wgpuInstanceCreateSurface(mContext.instance, &surfaceDesc);
assert(mSurface); assert(mSurface);
// get preferred format // get preferred format
@ -347,10 +270,10 @@ bool WgRenderer::target(void* window, uint32_t w, uint32_t h)
swapChainDesc.width = mTargetSurface.w; swapChainDesc.width = mTargetSurface.w;
swapChainDesc.height = mTargetSurface.h; swapChainDesc.height = mTargetSurface.h;
swapChainDesc.presentMode = WGPUPresentMode_Mailbox; swapChainDesc.presentMode = WGPUPresentMode_Mailbox;
mSwapChain = wgpuDeviceCreateSwapChain(mDevice, mSurface, &swapChainDesc); mSwapChain = wgpuDeviceCreateSwapChain(mContext.device, mSurface, &swapChainDesc);
assert(mSwapChain); assert(mSwapChain);
mRenderTarget.initialize(mDevice, mQueue, mPipelines, w, h); mRenderTarget.initialize(mContext.device, mContext.queue, mPipelines, w, h);
return true; return true;
} }

View file

@ -62,20 +62,12 @@ public:
private: private:
Array<RenderData> mRenderDatas{}; Array<RenderData> mRenderDatas{};
WgContext mContext;
Surface mTargetSurface = { nullptr, 0, 0, 0, ColorSpace::Unsupported, true };
// basic webgpu instances (TODO: create separated entity)
WGPUInstance mInstance{};
WGPUAdapter mAdapter{};
WGPUDevice mDevice{};
WGPUQueue mQueue{};
// webgpu surface handles (TODO: create separated entity)
WGPUSurface mSurface{};
WGPUSwapChain mSwapChain{};
WgPipelines mPipelines; WgPipelines mPipelines;
WgRenderTarget mRenderTarget; WgRenderTarget mRenderTarget;
WGPUSurface mSurface{};
bool mClearBuffer; WGPUSwapChain mSwapChain{};
Surface mTargetSurface = { nullptr, 0, 0, 0, ColorSpace::Unsupported, true };
}; };
#endif /* _TVG_WG_RENDERER_H_ */ #endif /* _TVG_WG_RENDERER_H_ */