LumixEngine/src/renderer/renderer.cpp

1393 lines
35 KiB
C++
Raw Normal View History

2015-07-04 15:22:28 +02:00
#include "renderer.h"
2022-02-09 22:58:50 +01:00
#include "engine/allocators.h"
2016-05-10 08:24:31 +02:00
#include "engine/array.h"
#include "engine/command_line_parser.h"
2016-05-10 08:24:31 +02:00
#include "engine/crc32.h"
2019-06-13 17:26:52 +02:00
#include "engine/debug.h"
2017-01-26 09:28:23 +01:00
#include "engine/engine.h"
2016-05-10 08:24:31 +02:00
#include "engine/log.h"
2020-02-21 22:09:11 +01:00
#include "engine/atomic.h"
2020-02-06 18:51:29 +01:00
#include "engine/job_system.h"
2020-02-21 22:09:11 +01:00
#include "engine/sync.h"
#include "engine/thread.h"
#include "engine/os.h"
2016-05-10 08:24:31 +02:00
#include "engine/profiler.h"
2017-11-19 14:04:10 +01:00
#include "engine/reflection.h"
2017-01-26 09:28:23 +01:00
#include "engine/resource_manager.h"
#include "engine/string.h"
2020-02-21 22:09:11 +01:00
#include "engine/universe.h"
2019-06-26 18:52:52 +02:00
#include "renderer/font.h"
2015-08-17 23:45:26 +02:00
#include "renderer/material.h"
#include "renderer/model.h"
2018-07-01 18:13:44 +02:00
#include "renderer/pipeline.h"
2018-08-22 19:52:08 +02:00
#include "renderer/particle_system.h"
2015-08-17 23:45:26 +02:00
#include "renderer/render_scene.h"
#include "renderer/shader.h"
2017-01-26 09:28:23 +01:00
#include "renderer/terrain.h"
2015-08-17 23:45:26 +02:00
#include "renderer/texture.h"
2019-06-26 18:52:52 +02:00
2018-06-24 17:50:33 +02:00
2015-07-04 15:22:28 +02:00
namespace Lumix
{
2018-07-08 18:16:16 +02:00
static const ComponentType MODEL_INSTANCE_TYPE = reflection::getComponentType("model_instance");
2021-02-06 15:04:02 +01:00
static const char* downscale_src = R"#(
2021-02-06 14:21:00 +01:00
layout(local_size_x = 16, local_size_y = 16, local_size_z = 1) in;
layout (rgba8, binding = 0) uniform readonly image2D u_src;
layout (rgba8, binding = 1) uniform writeonly image2D u_dst;
layout(std140, binding = 4) uniform Data {
ivec2 u_scale;
};
void main() {
vec4 accum = vec4(0);
for (int j = 0; j < u_scale.y; ++j) {
for (int i = 0; i < u_scale.x; ++i) {
vec4 v = imageLoad(u_src, ivec2(gl_GlobalInvocationID.xy) * u_scale + ivec2(i, j));
accum += v;
}
}
accum *= 1.0 / (u_scale.x * u_scale.y);
imageStore(u_dst, ivec2(gl_GlobalInvocationID.xy), accum);
}
)#";
2022-01-23 11:39:18 +01:00
template <u32 ALIGN>
struct TransientBuffer {
2020-11-15 12:06:01 +01:00
static constexpr u32 INIT_SIZE = 1024 * 1024;
static constexpr u32 OVERFLOW_BUFFER_SIZE = 512 * 1024 * 1024;
2022-01-23 17:34:17 +01:00
void init(gpu::BufferFlags flags) {
2022-01-25 21:42:12 +01:00
m_flags = flags;
m_buffer = gpu::allocBufferHandle();
m_offset = 0;
2022-01-23 17:34:17 +01:00
gpu::createBuffer(m_buffer, gpu::BufferFlags::MAPPABLE | flags, INIT_SIZE, nullptr);
m_size = INIT_SIZE;
m_ptr = (u8*)gpu::map(m_buffer, INIT_SIZE);
}
Renderer::TransientSlice alloc(u32 size) {
Renderer::TransientSlice slice;
2022-01-23 11:39:18 +01:00
size = (size + (ALIGN - 1)) & ~(ALIGN - 1);
2020-02-21 22:09:11 +01:00
slice.offset = atomicAdd(&m_offset, size);
slice.size = size;
if (slice.offset + size <= m_size) {
slice.buffer = m_buffer;
slice.ptr = m_ptr + slice.offset;
return slice;
}
jobs::MutexGuard lock(m_mutex);
2020-10-01 23:15:43 +02:00
if (!m_overflow.buffer) {
m_overflow.buffer = gpu::allocBufferHandle();
m_overflow.data = (u8*)os::memReserve(OVERFLOW_BUFFER_SIZE);
m_overflow.size = 0;
m_overflow.commit = 0;
}
slice.ptr = m_overflow.data + m_overflow.size;
slice.offset = m_overflow.size;
m_overflow.size += size;
if (m_overflow.size > m_overflow.commit) {
const u32 page_size = os::getMemPageSize();
m_overflow.commit = (m_overflow.size + page_size - 1) & ~(page_size - 1);
os::memCommit(m_overflow.data, m_overflow.commit);
}
slice.buffer = m_overflow.buffer;
return slice;
}
void prepareToRender() {
gpu::unmap(m_buffer);
m_ptr = nullptr;
2020-10-01 23:15:43 +02:00
if (m_overflow.buffer) {
2022-01-25 21:42:12 +01:00
gpu::createBuffer(m_overflow.buffer, gpu::BufferFlags::MAPPABLE | m_flags, nextPow2(m_overflow.size + m_size), nullptr);
2021-04-02 18:47:53 +02:00
void* mem = gpu::map(m_overflow.buffer, m_overflow.size + m_size);
if (mem) {
memcpy(mem, m_overflow.data, m_overflow.size);
gpu::unmap(m_overflow.buffer);
}
os::memRelease(m_overflow.data, OVERFLOW_BUFFER_SIZE);
m_overflow.data = nullptr;
m_overflow.commit = 0;
}
}
void renderDone() {
2020-10-01 23:15:43 +02:00
if (m_overflow.buffer) {
m_size = nextPow2(m_overflow.size + m_size);
gpu::destroy(m_buffer);
m_buffer = m_overflow.buffer;
m_overflow.buffer = gpu::INVALID_BUFFER;
m_overflow.size = 0;
}
ASSERT(!m_ptr);
m_ptr = (u8*)gpu::map(m_buffer, m_size);
m_offset = 0;
}
gpu::BufferHandle m_buffer = gpu::INVALID_BUFFER;
i32 m_offset = 0;
u32 m_size = 0;
u8* m_ptr = nullptr;
jobs::Mutex m_mutex;
2022-01-25 21:42:12 +01:00
gpu::BufferFlags m_flags = gpu::BufferFlags::NONE;
struct {
gpu::BufferHandle buffer = gpu::INVALID_BUFFER;
u8* data = nullptr;
u32 size = 0;
u32 commit = 0;
} m_overflow;
};
2018-07-22 15:22:36 +02:00
2018-07-08 18:16:16 +02:00
2019-10-16 22:36:33 +02:00
struct FrameData {
FrameData(struct RendererImpl& renderer, IAllocator& allocator)
: jobs(allocator)
, renderer(renderer)
, to_compile_shaders(allocator)
2020-02-20 01:49:21 +01:00
, material_updates(allocator)
2022-02-09 22:58:50 +01:00
, job_allocator(1024 * 1024 * 64)
2019-10-16 22:36:33 +02:00
{}
struct ShaderToCompile {
Shader* shader;
2019-10-24 21:53:19 +02:00
gpu::VertexDecl decl;
u32 defines;
2019-10-24 21:53:19 +02:00
gpu::ProgramHandle program;
Shader::Sources sources;
};
2020-02-20 01:49:21 +01:00
struct MaterialUpdates {
u32 idx;
MaterialConsts value;
};
2022-01-23 11:39:18 +01:00
TransientBuffer<16> transient_buffer;
TransientBuffer<256> uniform_buffer;
2020-09-29 19:56:03 +02:00
u32 gpu_frame = 0xffFFffFF;
2022-02-09 22:58:50 +01:00
LinearAllocator job_allocator;
2020-02-20 01:49:21 +01:00
Array<MaterialUpdates> material_updates;
2019-10-16 22:36:33 +02:00
Array<Renderer::RenderJob*> jobs;
jobs::Mutex shader_mutex;
Array<ShaderToCompile> to_compile_shaders;
2019-10-16 22:36:33 +02:00
RendererImpl& renderer;
jobs::Signal can_setup;
jobs::Signal setup_done;
2019-10-16 22:36:33 +02:00
};
2019-06-12 23:38:06 +02:00
template <typename T>
2020-02-21 22:09:11 +01:00
struct RenderResourceManager : ResourceManager
2019-06-12 23:38:06 +02:00
{
RenderResourceManager(Renderer& renderer, IAllocator& allocator)
: ResourceManager(allocator)
, m_renderer(renderer)
{}
Resource* createResource(const Path& path) override
{
return LUMIX_NEW(m_allocator, T)(path, *this, m_renderer, m_allocator);
}
void destroyResource(Resource& resource) override
{
LUMIX_DELETE(m_allocator, &resource);
}
Renderer& m_renderer;
};
2018-07-11 23:35:34 +02:00
struct GPUProfiler
2018-07-08 18:16:16 +02:00
{
struct Query
{
StaticString<32> name;
2019-10-24 21:53:19 +02:00
gpu::QueryHandle handle;
2022-01-24 23:39:14 +01:00
gpu::QueryHandle stats = gpu::INVALID_QUERY;
u64 result;
i64 profiler_link;
bool is_end;
bool is_frame;
};
2018-07-11 23:35:34 +02:00
GPUProfiler(IAllocator& allocator)
2018-07-14 17:52:06 +02:00
: m_queries(allocator)
2018-07-14 10:03:38 +02:00
, m_pool(allocator)
2022-01-24 23:39:14 +01:00
, m_stats_pool(allocator)
, m_gpu_to_cpu_offset(0)
2018-07-14 10:03:38 +02:00
{
}
2018-07-08 18:16:16 +02:00
2018-07-11 23:35:34 +02:00
~GPUProfiler()
2018-07-08 18:16:16 +02:00
{
2018-07-14 17:52:06 +02:00
ASSERT(m_pool.empty());
ASSERT(m_queries.empty());
}
u64 toCPUTimestamp(u64 gpu_timestamp) const
{
return u64(gpu_timestamp * (os::Timer::getFrequency() / double(gpu::getQueryFrequency()))) + m_gpu_to_cpu_offset;
}
void init()
{
2022-01-24 23:39:14 +01:00
gpu::QueryHandle q = gpu::createQuery(gpu::QueryType::TIMESTAMP);
2019-10-24 21:53:19 +02:00
gpu::queryTimestamp(q);
const u64 cpu_timestamp = os::Timer::getRawTimestamp();
2020-10-05 18:48:30 +02:00
2019-08-25 13:39:05 +02:00
u32 try_num = 0;
2020-10-05 18:48:30 +02:00
while (!gpu::isQueryReady(q) && try_num < 10) {
gpu::swapBuffers();
2019-08-25 13:39:05 +02:00
++try_num;
}
2020-10-05 18:48:30 +02:00
if (try_num == 10) {
logError("Failed to get GPU timestamp, timings are unreliable.");
2019-08-25 13:39:05 +02:00
m_gpu_to_cpu_offset = 0;
}
else {
2019-10-24 21:53:19 +02:00
const u64 gpu_timestamp = gpu::getQueryResult(q);
m_gpu_to_cpu_offset = cpu_timestamp - u64(gpu_timestamp * (os::Timer::getFrequency() / double(gpu::getQueryFrequency())));
2019-10-24 21:53:19 +02:00
gpu::destroy(q);
2019-08-25 13:39:05 +02:00
}
}
2018-07-14 17:52:06 +02:00
void clear()
{
2022-02-10 18:15:56 +01:00
for(const Query& q : m_queries) {
if (!q.is_frame) gpu::destroy(q.handle);
}
2019-06-23 14:22:33 +02:00
m_queries.clear();
2019-10-24 21:53:19 +02:00
for(const gpu::QueryHandle h : m_pool) {
gpu::destroy(h);
2018-07-14 17:52:06 +02:00
}
m_pool.clear();
2022-02-10 18:15:56 +01:00
if (m_stats_query) gpu::destroy(m_stats_query);
m_stats_query = gpu::INVALID_QUERY;
for(const gpu::QueryHandle h : m_stats_pool) {
gpu::destroy(h);
}
m_stats_pool.clear();
2018-07-08 18:16:16 +02:00
}
2019-10-24 21:53:19 +02:00
gpu::QueryHandle allocQuery()
2018-07-08 18:16:16 +02:00
{
2018-07-14 10:03:38 +02:00
if(!m_pool.empty()) {
2019-10-24 21:53:19 +02:00
const gpu::QueryHandle res = m_pool.back();
2018-07-14 10:03:38 +02:00
m_pool.pop();
return res;
2018-07-11 23:35:34 +02:00
}
2022-01-24 23:39:14 +01:00
return gpu::createQuery(gpu::QueryType::TIMESTAMP);
}
gpu::QueryHandle allocStatsQuery()
{
if(!m_stats_pool.empty()) {
const gpu::QueryHandle res = m_stats_pool.back();
m_stats_pool.pop();
return res;
}
return gpu::createQuery(gpu::QueryType::STATS);
2018-07-14 10:03:38 +02:00
}
2018-07-08 18:16:16 +02:00
2022-01-24 23:39:14 +01:00
void beginQuery(const char* name, i64 profiler_link, bool stats)
2018-07-14 10:03:38 +02:00
{
jobs::MutexGuard lock(m_mutex);
2018-07-14 10:03:38 +02:00
Query& q = m_queries.emplace();
q.profiler_link = profiler_link;
2018-07-14 10:03:38 +02:00
q.name = name;
q.is_end = false;
q.is_frame = false;
2018-07-14 10:03:38 +02:00
q.handle = allocQuery();
2019-10-24 21:53:19 +02:00
gpu::queryTimestamp(q.handle);
2022-01-24 23:39:14 +01:00
if (stats) {
ASSERT(m_stats_counter == 0); // nested counters are not supported
m_stats_query = allocStatsQuery();
gpu::beginQuery(m_stats_query);
m_stats_counter = 1;
}
else if (m_stats_counter > 0) {
++m_stats_counter;
}
2018-07-14 10:03:38 +02:00
}
2018-07-08 18:16:16 +02:00
2018-07-14 10:03:38 +02:00
void endQuery()
{
jobs::MutexGuard lock(m_mutex);
2018-07-14 10:03:38 +02:00
Query& q = m_queries.emplace();
q.is_end = true;
q.is_frame = false;
2018-07-14 10:03:38 +02:00
q.handle = allocQuery();
2019-10-24 21:53:19 +02:00
gpu::queryTimestamp(q.handle);
2022-01-24 23:39:14 +01:00
if (m_stats_counter > 0) {
--m_stats_counter;
if (m_stats_counter == 0) {
gpu::endQuery(m_stats_query);
q.stats = m_stats_query;
m_stats_query = gpu::INVALID_QUERY;
}
}
2018-07-08 18:16:16 +02:00
}
2018-07-11 23:35:34 +02:00
void frame()
2018-07-08 18:16:16 +02:00
{
2019-05-04 23:37:59 +02:00
PROFILE_FUNCTION();
jobs::MutexGuard lock(m_mutex);
Query frame_query;
frame_query.is_frame = true;
m_queries.push(frame_query);
while (!m_queries.empty()) {
Query q = m_queries[0];
if (q.is_frame) {
2020-12-25 18:11:09 +01:00
profiler::gpuFrame();
m_queries.erase(0);
continue;
}
2019-10-24 21:53:19 +02:00
if (!gpu::isQueryReady(q.handle)) break;
if (q.is_end) {
2022-01-24 23:39:14 +01:00
if (q.stats && !gpu::isQueryReady(q.stats)) break;
2019-10-24 21:53:19 +02:00
const u64 timestamp = toCPUTimestamp(gpu::getQueryResult(q.handle));
2022-01-24 23:39:14 +01:00
if (q.stats) {
profiler::gpuStats(gpu::getQueryResult(q.stats));
m_stats_pool.push(q.stats);
}
2020-12-25 18:11:09 +01:00
profiler::endGPUBlock(timestamp);
}
else {
2019-10-24 21:53:19 +02:00
const u64 timestamp = toCPUTimestamp(gpu::getQueryResult(q.handle));
2020-12-25 18:11:09 +01:00
profiler::beginGPUBlock(q.name, timestamp, q.profiler_link);
}
2018-07-14 10:03:38 +02:00
m_pool.push(q.handle);
m_queries.erase(0);
2018-07-14 10:03:38 +02:00
}
2018-07-08 18:16:16 +02:00
}
2018-07-11 23:35:34 +02:00
Array<Query> m_queries;
2019-10-24 21:53:19 +02:00
Array<gpu::QueryHandle> m_pool;
2022-01-24 23:39:14 +01:00
Array<gpu::QueryHandle> m_stats_pool;
jobs::Mutex m_mutex;
i64 m_gpu_to_cpu_offset;
2022-01-24 23:39:14 +01:00
u32 m_stats_counter = 0;
2022-02-10 18:15:56 +01:00
gpu::QueryHandle m_stats_query = gpu::INVALID_QUERY;
2018-07-08 18:16:16 +02:00
};
2020-02-21 22:09:11 +01:00
struct RendererImpl final : Renderer
{
2016-03-01 16:48:04 +01:00
explicit RendererImpl(Engine& engine)
: m_engine(engine)
, m_allocator(engine.getAllocator())
2018-07-08 18:16:16 +02:00
, m_texture_manager(*this, m_allocator)
2019-06-13 17:26:52 +02:00
, m_pipeline_manager(*this, m_allocator)
2017-11-07 16:49:18 +01:00
, m_model_manager(*this, m_allocator)
2019-06-13 17:26:52 +02:00
, m_particle_emitter_manager(*this, m_allocator)
, m_material_manager(*this, m_allocator)
, m_shader_manager(*this, m_allocator)
2018-01-08 13:08:11 +01:00
, m_font_manager(nullptr)
2015-09-05 13:08:47 +02:00
, m_shader_defines(m_allocator)
, m_profiler(m_allocator)
2018-10-13 15:08:58 +02:00
, m_layers(m_allocator)
2019-08-07 21:21:31 +02:00
, m_material_buffer(m_allocator)
2020-10-07 19:19:16 +02:00
, m_plugins(m_allocator)
2021-02-13 15:39:02 +01:00
, m_free_sort_keys(m_allocator)
, m_sort_key_to_mesh_map(m_allocator)
{
2020-12-01 00:17:00 +01:00
RenderScene::reflect();
LUMIX_GLOBAL_FUNC(Model::getBoneCount);
LUMIX_GLOBAL_FUNC(Model::getBoneName);
LUMIX_GLOBAL_FUNC(Model::getBoneParent);
2020-10-20 19:47:34 +02:00
2019-08-21 21:11:51 +02:00
m_shader_defines.reserve(32);
2020-09-20 12:22:33 +02:00
gpu::preinit(m_allocator, shouldLoadRenderdoc());
2020-10-16 19:56:08 +02:00
m_frames[0].create(*this, m_allocator);
m_frames[1].create(*this, m_allocator);
m_frames[2].create(*this, m_allocator);
}
2020-04-29 19:13:16 +02:00
u32 getVersion() const override { return 0; }
void serialize(OutputMemoryStream& stream) const override {}
bool deserialize(u32 version, InputMemoryStream& stream) override { return version == 0; }
~RendererImpl()
{
m_particle_emitter_manager.destroy();
m_pipeline_manager.destroy();
m_texture_manager.destroy();
m_model_manager.destroy();
m_material_manager.destroy();
m_shader_manager.destroy();
m_font_manager->destroy();
LUMIX_DELETE(m_allocator, m_font_manager);
2019-07-09 02:28:08 +02:00
frame();
2020-10-03 23:14:17 +02:00
frame();
frame();
2019-10-19 00:23:56 +02:00
waitForRender();
2019-10-17 18:10:38 +02:00
jobs::Signal signal;
jobs::runLambda([this]() {
for (const Local<FrameData>& frame : m_frames) {
2020-10-16 19:56:08 +02:00
gpu::destroy(frame->transient_buffer.m_buffer);
2022-01-23 11:39:18 +01:00
gpu::destroy(frame->uniform_buffer.m_buffer);
2019-10-16 22:36:33 +02:00
}
gpu::destroy(m_material_buffer.buffer);
gpu::destroy(m_material_buffer.staging_buffer);
gpu::destroy(m_tmp_uniform_buffer);
gpu::destroy(m_scratch_buffer);
gpu::destroy(m_downscale_program);
m_profiler.clear();
2019-10-24 21:53:19 +02:00
gpu::shutdown();
}, &signal, 1);
jobs::wait(&signal);
}
2020-09-20 12:22:33 +02:00
static bool shouldLoadRenderdoc() {
char cmd_line[4096];
os::getCommandLine(Span(cmd_line));
2020-09-20 12:22:33 +02:00
CommandLineParser cmd_line_parser(cmd_line);
while (cmd_line_parser.next()) {
if (cmd_line_parser.currentEquals("-renderdoc")) {
return true;
}
}
return false;
}
2020-12-01 00:17:00 +01:00
void init() override {
2019-10-17 18:10:38 +02:00
struct InitData {
2020-11-26 20:25:14 +01:00
gpu::InitFlags flags = gpu::InitFlags::VSYNC;
2019-10-17 18:10:38 +02:00
RendererImpl* renderer;
} init_data;
init_data.renderer = this;
char cmd_line[4096];
os::getCommandLine(Span(cmd_line));
2017-05-23 19:57:11 +02:00
CommandLineParser cmd_line_parser(cmd_line);
2019-07-30 21:58:31 +02:00
while (cmd_line_parser.next()) {
if (cmd_line_parser.currentEquals("-no_vsync")) {
2020-11-26 20:25:14 +01:00
init_data.flags = init_data.flags & ~gpu::InitFlags::VSYNC;
2019-07-30 21:58:31 +02:00
}
else if (cmd_line_parser.currentEquals("-debug_opengl")) {
2020-11-26 20:25:14 +01:00
init_data.flags = init_data.flags | gpu::InitFlags::DEBUG_OUTPUT;
2017-10-02 16:35:21 +02:00
}
}
jobs::Signal signal;
jobs::runLambda([&init_data]() {
PROFILE_BLOCK("init_render");
RendererImpl& renderer = *(RendererImpl*)init_data.renderer;
Engine& engine = renderer.getEngine();
2020-02-05 18:08:37 +01:00
void* window_handle = engine.getWindowHandle();
if (!gpu::init(window_handle, init_data.flags)) {
os::messageBox("Failed to initialize renderer. More info in lumix.log.");
fatal(false, "gpu::init()");
}
2019-10-16 22:36:33 +02:00
2019-11-25 16:35:42 +01:00
gpu::MemoryStats mem_stats;
2021-03-09 21:31:55 +01:00
if (gpu::getMemoryStats(mem_stats)) {
logInfo("Initial GPU memory stats:\n",
"total: ", (mem_stats.total_available_mem / (1024.f * 1024.f)), "MB\n"
"currect: ", (mem_stats.current_available_mem / (1024.f * 1024.f)), "MB\n"
"dedicated: ", (mem_stats.dedicated_vidmem/ (1024.f * 1024.f)), "MB\n");
2019-11-25 16:35:42 +01:00
}
2020-10-16 19:56:08 +02:00
for (const Local<FrameData>& frame : renderer.m_frames) {
2022-01-23 17:34:17 +01:00
frame->transient_buffer.init(gpu::BufferFlags::NONE);
frame->uniform_buffer.init(gpu::BufferFlags::UNIFORM_BUFFER);
2019-10-16 22:36:33 +02:00
}
2020-10-16 19:56:08 +02:00
renderer.m_cpu_frame = renderer.m_frames[0].get();
renderer.m_gpu_frame = renderer.m_frames[0].get();
2019-10-16 22:36:33 +02:00
renderer.m_profiler.init();
2019-08-07 21:21:31 +02:00
2020-02-20 01:49:21 +01:00
MaterialBuffer& mb = renderer.m_material_buffer;
2020-08-31 22:05:33 +02:00
mb.buffer = gpu::allocBufferHandle();
2020-08-31 22:49:40 +02:00
mb.staging_buffer = gpu::allocBufferHandle();
2020-02-20 01:49:21 +01:00
mb.map.insert(0, 0);
mb.data.resize(400);
mb.data[0].hash = 0;
mb.data[0].ref_count = 1;
mb.first_free = 1;
for (int i = 1; i < 400; ++i) {
mb.data[i].ref_count = 0;
mb.data[i].next_free = i + 1;
2019-08-07 21:21:31 +02:00
}
2020-02-20 01:49:21 +01:00
mb.data.back().next_free = -1;
2020-08-31 22:05:33 +02:00
gpu::createBuffer(mb.buffer
2020-11-26 20:25:14 +01:00
, gpu::BufferFlags::UNIFORM_BUFFER
2020-08-31 22:05:33 +02:00
, sizeof(MaterialConsts) * 400
2020-02-20 01:49:21 +01:00
, nullptr
);
2020-08-31 22:49:40 +02:00
gpu::createBuffer(mb.staging_buffer
2020-11-26 20:25:14 +01:00
, gpu::BufferFlags::UNIFORM_BUFFER
2020-08-31 22:49:40 +02:00
, sizeof(MaterialConsts)
, nullptr
);
2020-02-20 01:49:21 +01:00
2021-02-06 14:21:00 +01:00
renderer.m_downscale_program = gpu::allocProgramHandle();
const gpu::ShaderType type = gpu::ShaderType::COMPUTE;
const char* srcs[] = { downscale_src };
gpu::createProgram(renderer.m_downscale_program, {}, srcs, &type, 1, nullptr, 0, "downscale");
renderer.m_tmp_uniform_buffer = gpu::allocBufferHandle();
gpu::createBuffer(renderer.m_tmp_uniform_buffer, gpu::BufferFlags::UNIFORM_BUFFER, 16 * 1024, nullptr);
2020-09-01 01:50:18 +02:00
2021-02-09 21:10:40 +01:00
renderer.m_scratch_buffer = gpu::allocBufferHandle();
gpu::createBuffer(renderer.m_scratch_buffer, gpu::BufferFlags::SHADER_BUFFER | gpu::BufferFlags::COMPUTE_WRITE, SCRATCH_BUFFER_SIZE, nullptr);
2020-09-28 22:36:48 +02:00
MaterialConsts default_mat;
default_mat.color = Vec4(1, 0, 1, 1);
gpu::update(mb.buffer, &default_mat, sizeof(MaterialConsts));
}, &signal, 1);
jobs::wait(&signal);
ResourceManagerHub& manager = m_engine.getResourceManager();
2018-09-09 17:58:25 +02:00
m_pipeline_manager.create(PipelineResource::TYPE, manager);
2018-01-11 21:13:59 +01:00
m_texture_manager.create(Texture::TYPE, manager);
m_model_manager.create(Model::TYPE, manager);
m_material_manager.create(Material::TYPE, manager);
2018-08-22 19:52:08 +02:00
m_particle_emitter_manager.create(ParticleEmitterResource::TYPE, manager);
2018-01-11 21:13:59 +01:00
m_shader_manager.create(Shader::TYPE, manager);
2018-01-08 13:08:11 +01:00
m_font_manager = LUMIX_NEW(m_allocator, FontManager)(*this, m_allocator);
2018-01-11 21:13:59 +01:00
m_font_manager->create(FontResource::TYPE, manager);
RenderScene::registerLuaAPI(m_engine.getState());
2018-07-08 18:16:16 +02:00
2018-10-27 21:05:52 +02:00
m_layers.emplace("default");
}
2017-10-11 17:13:47 +02:00
2019-07-25 18:50:31 +02:00
MemRef copy(const void* data, u32 size) override
2018-07-08 18:16:16 +02:00
{
MemRef mem = allocate(size);
memcpy(mem.data, data, size);
2018-07-08 18:16:16 +02:00
return mem;
}
IAllocator& getAllocator() override
{
return m_allocator;
}
2018-07-11 23:35:34 +02:00
void free(const MemRef& memory) override
{
ASSERT(memory.own);
m_allocator.deallocate(memory.data);
}
2019-07-25 18:50:31 +02:00
MemRef allocate(u32 size) override
2018-07-08 18:16:16 +02:00
{
MemRef ret;
ret.size = size;
ret.own = true;
ret.data = m_allocator.allocate(size);
return ret;
}
2021-02-09 21:10:40 +01:00
gpu::BufferHandle getScratchBuffer() override {
return m_scratch_buffer;
}
2018-07-08 18:16:16 +02:00
2022-01-24 23:39:14 +01:00
void beginProfileBlock(const char* name, i64 link, bool stats) override
2018-07-14 10:03:38 +02:00
{
2022-01-23 11:39:18 +01:00
gpu::pushDebugGroup(name);
2022-01-24 23:39:14 +01:00
m_profiler.beginQuery(name, link, stats);
2018-07-14 10:03:38 +02:00
}
void endProfileBlock() override
{
m_profiler.endQuery();
2022-01-23 11:39:18 +01:00
gpu::popDebugGroup();
2018-07-14 10:03:38 +02:00
}
2019-11-14 22:26:43 +01:00
void getTextureImage(gpu::TextureHandle texture, u32 w, u32 h, gpu::TextureFormat out_format, Span<u8> data) override
2018-09-05 20:45:06 +02:00
{
2018-10-12 00:12:44 +02:00
struct Cmd : RenderJob {
2018-09-05 20:45:06 +02:00
void setup() override {}
void execute() override {
2019-06-07 01:26:09 +02:00
PROFILE_FUNCTION();
2019-10-24 21:53:19 +02:00
gpu::pushDebugGroup("get image data");
2019-11-05 19:11:59 +01:00
gpu::TextureHandle staging = gpu::allocTextureHandle();
2020-11-26 20:25:14 +01:00
const gpu::TextureFlags flags = gpu::TextureFlags::NO_MIPS | gpu::TextureFlags::READBACK;
2021-04-01 15:19:02 +02:00
gpu::createTexture(staging, w, h, 1, out_format, flags, "staging_buffer");
gpu::copy(staging, handle, 0, 0);
2020-05-23 13:52:45 +02:00
gpu::readTexture(staging, 0, buf);
2019-11-05 19:11:59 +01:00
gpu::destroy(staging);
2019-10-24 21:53:19 +02:00
gpu::popDebugGroup();
2018-09-05 20:45:06 +02:00
}
2019-10-24 21:53:19 +02:00
gpu::TextureHandle handle;
2019-11-14 22:26:43 +01:00
gpu::TextureFormat out_format;
2019-11-05 19:11:59 +01:00
u32 w;
u32 h;
2019-11-14 22:26:43 +01:00
Span<u8> buf;
2018-09-05 20:45:06 +02:00
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
cmd.handle = texture;
cmd.w = w;
cmd.h = h;
cmd.buf = data;
cmd.out_format = out_format;
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2018-09-05 20:45:06 +02:00
}
2022-01-21 17:40:00 +01:00
void updateBuffer(gpu::BufferHandle handle, const MemRef& mem) override {
ASSERT(mem.size > 0);
ASSERT(handle);
struct Cmd : RenderJob {
void setup() override {}
void execute() override {
PROFILE_FUNCTION();
gpu::update(handle, mem.data, mem.size);
if (mem.own) {
renderer->free(mem);
}
}
gpu::BufferHandle handle;
MemRef mem;
RendererImpl* renderer;
};
Cmd& cmd = createJob<Cmd>();
cmd.handle = handle;
cmd.mem = mem;
cmd.renderer = this;
queue(cmd, 0);
}
2018-09-05 20:45:06 +02:00
void updateTexture(gpu::TextureHandle handle, u32 slice, u32 x, u32 y, u32 w, u32 h, gpu::TextureFormat format, const MemRef& mem) override
2019-05-12 00:18:35 +02:00
{
ASSERT(mem.size > 0);
2020-10-01 23:15:43 +02:00
ASSERT(handle);
2019-05-12 00:18:35 +02:00
struct Cmd : RenderJob {
void setup() override {}
void execute() override {
2019-06-07 01:26:09 +02:00
PROFILE_FUNCTION();
2021-04-01 15:19:02 +02:00
gpu::update(handle, 0, x, y, slice, w, h, format, mem.data, mem.size);
2019-05-12 00:18:35 +02:00
if (mem.own) {
renderer->free(mem);
}
}
2019-10-24 21:53:19 +02:00
gpu::TextureHandle handle;
u32 x, y, w, h, slice;
2019-10-24 21:53:19 +02:00
gpu::TextureFormat format;
2019-05-12 00:18:35 +02:00
MemRef mem;
RendererImpl* renderer;
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
cmd.handle = handle;
cmd.x = x;
cmd.y = y;
cmd.w = w;
cmd.h = h;
cmd.slice = slice;
cmd.format = format;
cmd.mem = mem;
cmd.renderer = this;
2019-05-12 00:18:35 +02:00
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2019-05-12 00:18:35 +02:00
}
2021-04-01 15:19:02 +02:00
gpu::TextureHandle loadTexture(const gpu::TextureDesc& desc, const MemRef& memory, gpu::TextureFlags flags, const char* debug_name) override
2018-07-08 18:16:16 +02:00
{
2018-07-11 23:35:34 +02:00
ASSERT(memory.size > 0);
2018-07-08 18:16:16 +02:00
2019-10-24 21:53:19 +02:00
const gpu::TextureHandle handle = gpu::allocTextureHandle();
2020-10-01 23:15:43 +02:00
if (!handle) return handle;
2018-07-08 18:16:16 +02:00
2018-10-12 00:12:44 +02:00
struct Cmd : RenderJob {
2018-07-14 10:03:38 +02:00
void setup() override {}
void execute() override {
2019-06-07 01:26:09 +02:00
PROFILE_FUNCTION();
2021-04-01 15:19:02 +02:00
if (!gpu::createTexture(handle, desc.width, desc.height, desc.depth, desc.format, flags, debug_name)) {
if(memory.own) renderer->free(memory);
logError("Failed to create texture ", debug_name);
return;
2018-07-14 17:52:06 +02:00
}
2021-04-01 15:19:02 +02:00
const u8* ptr = (const u8*)memory.data;
for (u32 layer = 0; layer < desc.depth; ++layer) {
for(int side = 0; side < (desc.is_cubemap ? 6 : 1); ++side) {
const u32 z = layer * (desc.is_cubemap ? 6 : 1) + side;
for (u32 mip = 0; mip < desc.mips; ++mip) {
const u32 w = maximum(desc.width >> mip, 1);
const u32 h = maximum(desc.height >> mip, 1);
const u32 mip_size_bytes = gpu::getSize(desc.format, w, h);
gpu::update(handle, mip, 0, 0, z, w, h, desc.format, ptr, mip_size_bytes);
ptr += mip_size_bytes;
}
}
}
if(memory.own) renderer->free(memory);
2018-07-08 18:16:16 +02:00
}
2020-12-25 18:11:09 +01:00
StaticString<LUMIX_MAX_PATH> debug_name;
2019-10-24 21:53:19 +02:00
gpu::TextureHandle handle;
2018-07-08 18:16:16 +02:00
MemRef memory;
2020-11-26 20:25:14 +01:00
gpu::TextureFlags flags;
2021-04-01 15:19:02 +02:00
gpu::TextureDesc desc;
2018-07-14 17:52:06 +02:00
RendererImpl* renderer;
2018-07-08 18:16:16 +02:00
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
cmd.debug_name = debug_name;
cmd.handle = handle;
cmd.memory = memory;
cmd.flags = flags;
2021-04-01 15:19:02 +02:00
if (desc.is_cubemap) cmd.flags = cmd.flags | gpu::TextureFlags::IS_CUBE;
if (desc.mips < 2) cmd.flags = cmd.flags | gpu::TextureFlags::NO_MIPS;
2020-10-14 21:13:29 +02:00
cmd.renderer = this;
2021-04-01 15:19:02 +02:00
cmd.desc = desc;
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2018-07-08 18:16:16 +02:00
2018-07-11 23:35:34 +02:00
return handle;
2018-07-08 18:16:16 +02:00
}
2019-07-25 18:50:31 +02:00
TransientSlice allocTransient(u32 size) override
2018-07-17 01:13:58 +02:00
{
return m_cpu_frame->transient_buffer.alloc(size);
2018-07-17 01:13:58 +02:00
}
2022-01-23 11:39:18 +01:00
TransientSlice allocUniform(u32 size) override
{
return m_cpu_frame->uniform_buffer.alloc(size);
}
2019-07-07 11:13:33 +02:00
2020-08-31 22:05:33 +02:00
gpu::BufferHandle getMaterialUniformBuffer() override {
2019-08-07 21:21:31 +02:00
return m_material_buffer.buffer;
}
2019-08-12 18:10:01 +02:00
u32 createMaterialConstants(const MaterialConsts& data) override {
const u32 hash = crc32(&data, sizeof(data));
2019-08-07 21:21:31 +02:00
auto iter = m_material_buffer.map.find(hash);
u32 idx;
if(iter.isValid()) {
idx = iter.value();
}
else {
2020-02-20 01:49:21 +01:00
if (m_material_buffer.first_free == -1) {
ASSERT(false);
++m_material_buffer.data[0].ref_count;
return 0;
}
2020-02-20 01:49:21 +01:00
idx = m_material_buffer.first_free;
m_material_buffer.first_free = m_material_buffer.data[m_material_buffer.first_free].next_free;
m_material_buffer.data[idx].ref_count = 0;
m_material_buffer.data[idx].hash = crc32(&data, sizeof(data));
2019-08-07 21:21:31 +02:00
m_material_buffer.map.insert(hash, idx);
2020-02-20 01:49:21 +01:00
m_cpu_frame->material_updates.push({idx, data});
2019-08-07 21:21:31 +02:00
}
++m_material_buffer.data[idx].ref_count;
return idx;
}
void destroyMaterialConstants(u32 idx) override {
--m_material_buffer.data[idx].ref_count;
2020-02-20 01:49:21 +01:00
if (m_material_buffer.data[idx].ref_count > 0) return;
const u32 hash = m_material_buffer.data[idx].hash;
m_material_buffer.data[idx].next_free = m_material_buffer.first_free;
m_material_buffer.first_free = idx;
m_material_buffer.map.erase(hash);
2019-08-07 21:21:31 +02:00
}
2018-07-17 01:13:58 +02:00
2020-11-26 20:25:14 +01:00
gpu::BufferHandle createBuffer(const MemRef& memory, gpu::BufferFlags flags) override
2018-07-08 18:16:16 +02:00
{
2019-10-24 21:53:19 +02:00
gpu::BufferHandle handle = gpu::allocBufferHandle();
2020-10-01 23:15:43 +02:00
if(!handle) return handle;
2018-07-11 23:35:34 +02:00
2018-10-12 00:12:44 +02:00
struct Cmd : RenderJob {
2018-07-14 10:03:38 +02:00
void setup() override {}
void execute() override {
2019-06-07 01:26:09 +02:00
PROFILE_FUNCTION();
2019-10-24 21:53:19 +02:00
gpu::createBuffer(handle, flags, memory.size, memory.data);
2018-07-14 17:52:06 +02:00
if (memory.own) {
renderer->free(memory);
}
2018-07-08 18:16:16 +02:00
}
2019-10-24 21:53:19 +02:00
gpu::BufferHandle handle;
2018-07-08 18:16:16 +02:00
MemRef memory;
2020-11-26 20:25:14 +01:00
gpu::BufferFlags flags;
2019-10-24 21:53:19 +02:00
gpu::TextureFormat format;
2018-07-14 17:52:06 +02:00
Renderer* renderer;
2018-07-08 18:16:16 +02:00
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
cmd.handle = handle;
cmd.memory = memory;
cmd.renderer = this;
cmd.flags = flags;
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2018-07-08 18:16:16 +02:00
2018-07-11 23:35:34 +02:00
return handle;
2018-07-08 18:16:16 +02:00
}
2018-10-27 21:05:52 +02:00
u8 getLayersCount() const override
{
return (u8)m_layers.size();
}
const char* getLayerName(u8 layer) const override
{
return m_layers[layer];
}
2018-07-08 18:16:16 +02:00
2018-10-13 15:08:58 +02:00
u8 getLayerIdx(const char* name) override
{
2019-12-20 19:25:33 +01:00
for(u8 i = 0; i < m_layers.size(); ++i) {
2018-10-13 15:08:58 +02:00
if(m_layers[i] == name) return i;
}
2019-12-20 19:25:33 +01:00
ASSERT(m_layers.size() < 0xff);
2018-10-13 15:08:58 +02:00
m_layers.emplace(name);
return m_layers.size() - 1;
}
void runInRenderThread(void* user_ptr, void (*fnc)(Renderer& renderer, void*)) override
2018-08-09 22:35:00 +02:00
{
2018-10-12 00:12:44 +02:00
struct Cmd : RenderJob {
2018-08-09 22:35:00 +02:00
void setup() override {}
2019-06-07 01:26:09 +02:00
void execute() override {
PROFILE_FUNCTION();
fnc(*renderer, ptr);
}
2018-08-09 22:35:00 +02:00
void* ptr;
void (*fnc)(Renderer&, void*);
Renderer* renderer;
2018-08-09 22:35:00 +02:00
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
cmd.fnc = fnc;
cmd.ptr = user_ptr;
cmd.renderer = this;
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2018-08-09 22:35:00 +02:00
}
2021-02-13 15:39:02 +01:00
const Mesh** getSortKeyToMeshMap() const override {
return m_sort_key_to_mesh_map.begin();
}
u32 allocSortKey(Mesh* mesh) override {
if (!m_free_sort_keys.empty()) {
const u32 key = m_free_sort_keys.back();
m_free_sort_keys.pop();
ASSERT(key != 0);
if ((u32)m_sort_key_to_mesh_map.size() < key + 1)
m_sort_key_to_mesh_map.resize(key + 1);
m_sort_key_to_mesh_map[key] = mesh;
return key;
}
++m_max_sort_key;
const u32 key = m_max_sort_key;
ASSERT(key != 0);
if ((u32)m_sort_key_to_mesh_map.size() < key + 1)
m_sort_key_to_mesh_map.resize(key + 1);
m_sort_key_to_mesh_map[key] = mesh;
return key;
}
void freeSortKey(u32 key) override {
if (key != 0) {
m_free_sort_keys.push(key);
}
}
2018-08-09 22:35:00 +02:00
2021-02-13 15:39:02 +01:00
u32 getMaxSortKey() const override {
return m_max_sort_key;
}
2019-10-24 21:53:19 +02:00
void destroy(gpu::ProgramHandle program) override
2018-08-09 22:35:00 +02:00
{
2018-10-12 00:12:44 +02:00
struct Cmd : RenderJob {
2018-08-09 22:35:00 +02:00
void setup() override {}
2019-06-07 01:26:09 +02:00
void execute() override {
PROFILE_FUNCTION();
2019-10-24 21:53:19 +02:00
gpu::destroy(program);
2019-06-07 01:26:09 +02:00
}
2018-08-09 22:35:00 +02:00
2019-10-24 21:53:19 +02:00
gpu::ProgramHandle program;
2018-08-09 22:35:00 +02:00
RendererImpl* renderer;
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
cmd.program = program;
cmd.renderer = this;
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2018-08-09 22:35:00 +02:00
}
2019-10-24 21:53:19 +02:00
void destroy(gpu::BufferHandle buffer) override
2018-07-08 18:16:16 +02:00
{
2020-10-07 22:17:59 +02:00
if (!buffer) return;
2018-10-12 00:12:44 +02:00
struct Cmd : RenderJob {
2018-07-14 10:03:38 +02:00
void setup() override {}
2019-06-07 01:26:09 +02:00
void execute() override {
PROFILE_FUNCTION();
2019-10-24 21:53:19 +02:00
gpu::destroy(buffer);
2019-06-07 01:26:09 +02:00
}
2018-07-11 23:35:34 +02:00
2019-10-24 21:53:19 +02:00
gpu::BufferHandle buffer;
2018-07-11 23:35:34 +02:00
RendererImpl* renderer;
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
cmd.buffer = buffer;
cmd.renderer = this;
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2018-07-08 18:16:16 +02:00
}
void copy(gpu::TextureHandle dst, gpu::TextureHandle src) override {
struct Cmd : RenderJob {
void setup() override {}
void execute() override {
PROFILE_FUNCTION();
gpu::copy(dst, src, 0, 0);
}
gpu::TextureHandle src;
gpu::TextureHandle dst;
};
Cmd& cmd = createJob<Cmd>();
cmd.src = src;
cmd.dst = dst;
queue(cmd, 0);
}
2018-07-08 18:16:16 +02:00
2021-02-06 14:21:00 +01:00
void downscale(gpu::TextureHandle src, u32 src_w, u32 src_h, gpu::TextureHandle dst, u32 dst_w, u32 dst_h) override {
ASSERT(src_w % dst_w == 0);
ASSERT(src_h % dst_h == 0);
struct Cmd : RenderJob {
void setup() override {}
void execute() override {
PROFILE_FUNCTION();
const IVec2 scale = src_size / dst_size;
gpu::update(ub, &scale, sizeof(scale));
gpu::bindUniformBuffer(4, ub, 0, sizeof(scale));
gpu::bindImageTexture(src, 0);
gpu::bindImageTexture(dst, 1);
gpu::useProgram(program);
gpu::dispatch((dst_size.x + 15) / 16, (dst_size.y + 15) / 16, 1);
}
gpu::TextureHandle src;
gpu::TextureHandle dst;
gpu::ProgramHandle program;
gpu::BufferHandle ub;
IVec2 src_size;
IVec2 dst_size;
};
Cmd& cmd = createJob<Cmd>();
cmd.src = src;
cmd.dst = dst;
cmd.src_size = {(i32)src_w, (i32)src_h};
cmd.dst_size = {(i32)dst_w, (i32)dst_h};
cmd.program = m_downscale_program;
cmd.ub = m_tmp_uniform_buffer;
queue(cmd, 0);
}
2020-11-26 20:25:14 +01:00
gpu::TextureHandle createTexture(u32 w, u32 h, u32 depth, gpu::TextureFormat format, gpu::TextureFlags flags, const MemRef& memory, const char* debug_name) override
2018-07-08 18:16:16 +02:00
{
2019-10-24 21:53:19 +02:00
gpu::TextureHandle handle = gpu::allocTextureHandle();
2020-10-01 23:15:43 +02:00
if(!handle) return handle;
2018-07-11 23:35:34 +02:00
2018-10-12 00:12:44 +02:00
struct Cmd : RenderJob {
2018-07-14 10:03:38 +02:00
void setup() override {}
2018-07-14 17:52:06 +02:00
void execute() override
{
2019-06-07 01:26:09 +02:00
PROFILE_FUNCTION();
2021-04-01 15:19:02 +02:00
bool res = gpu::createTexture(handle, w, h, depth, format, flags, debug_name);
ASSERT(res);
if (memory.data && memory.size) {
ASSERT(depth == 1);
gpu::update(handle, 0, 0, 0, 0, w, h, format, memory.data, memory.size);
if (u32(flags & gpu::TextureFlags::NO_MIPS) == 0) gpu::generateMipmaps(handle);
}
2018-10-21 18:05:43 +02:00
if (memory.own) renderer->free(memory);
2018-07-14 17:52:06 +02:00
}
2018-07-11 23:35:34 +02:00
2020-12-25 18:11:09 +01:00
StaticString<LUMIX_MAX_PATH> debug_name;
2019-10-24 21:53:19 +02:00
gpu::TextureHandle handle;
2018-07-11 23:35:34 +02:00
MemRef memory;
2019-07-25 18:50:31 +02:00
u32 w;
u32 h;
u32 depth;
2019-10-24 21:53:19 +02:00
gpu::TextureFormat format;
2018-07-14 17:52:06 +02:00
Renderer* renderer;
2020-11-26 20:25:14 +01:00
gpu::TextureFlags flags;
2018-07-11 23:35:34 +02:00
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
cmd.debug_name = debug_name;
cmd.handle = handle;
cmd.memory = memory;
cmd.format = format;
cmd.flags = flags;
cmd.w = w;
cmd.h = h;
cmd.depth = depth;
cmd.renderer = this;
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2018-07-11 23:35:34 +02:00
return handle;
2018-07-08 18:16:16 +02:00
}
2019-12-20 19:25:33 +01:00
void destroy(gpu::TextureHandle tex) override
2018-07-11 23:35:34 +02:00
{
2020-10-07 22:17:59 +02:00
if (!tex) return;
2018-10-12 00:12:44 +02:00
struct Cmd : RenderJob {
2018-07-14 10:03:38 +02:00
void setup() override {}
2019-06-07 01:26:09 +02:00
void execute() override {
PROFILE_FUNCTION();
2019-10-24 21:53:19 +02:00
gpu::destroy(texture);
2019-06-07 01:26:09 +02:00
}
2018-07-11 23:35:34 +02:00
2019-10-24 21:53:19 +02:00
gpu::TextureHandle texture;
2018-07-11 23:35:34 +02:00
RendererImpl* renderer;
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
cmd.texture = tex;
cmd.renderer = this;
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2018-07-11 23:35:34 +02:00
}
2020-10-14 21:13:29 +02:00
void queue(RenderJob& cmd, i64 profiler_link) override
2018-07-08 18:16:16 +02:00
{
2020-10-14 21:13:29 +02:00
cmd.profiler_link = profiler_link;
2020-10-14 21:13:29 +02:00
m_cpu_frame->jobs.push(&cmd);
2019-07-04 13:31:48 +02:00
jobs::runLambda([&cmd](){
PROFILE_BLOCK("setup_render_job");
profiler::blockColor(0x50, 0xff, 0xff);
cmd.setup();
2019-10-17 18:10:38 +02:00
}, &m_cpu_frame->setup_done);
2018-07-08 18:16:16 +02:00
}
2020-10-07 19:19:16 +02:00
void addPlugin(RenderPlugin& plugin) override {
m_plugins.push(&plugin);
}
void removePlugin(RenderPlugin& plugin) override {
m_plugins.eraseItem(&plugin);
}
Span<RenderPlugin*> getPlugins() override { return m_plugins; }
2018-07-08 18:16:16 +02:00
2019-06-26 18:52:52 +02:00
ResourceManager& getTextureManager() override { return m_texture_manager; }
2018-01-08 13:08:11 +01:00
FontManager& getFontManager() override { return *m_font_manager; }
2015-12-09 17:12:59 +01:00
2016-11-09 09:51:03 +01:00
void createScenes(Universe& ctx) override
{
2020-10-09 20:16:55 +02:00
UniquePtr<RenderScene> scene = RenderScene::createInstance(*this, m_engine, ctx, m_allocator);
ctx.addScene(scene.move());
}
2020-10-14 21:13:29 +02:00
void* allocJob(u32 size, u32 align) override {
2022-02-09 22:58:50 +01:00
return m_cpu_frame->job_allocator.allocate_aligned(size, align);
2020-10-14 21:13:29 +02:00
}
void deallocJob(void* job) override {
2022-02-09 22:58:50 +01:00
m_cpu_frame->job_allocator.deallocate_aligned(job);
2020-10-14 21:13:29 +02:00
}
2015-11-22 00:31:23 +01:00
const char* getName() const override { return "renderer"; }
2015-11-20 16:58:10 +01:00
Engine& getEngine() override { return m_engine; }
2017-02-22 15:12:36 +01:00
int getShaderDefinesCount() const override { return m_shader_defines.size(); }
2018-07-01 18:13:44 +02:00
const char* getShaderDefine(int define_idx) const override { return m_shader_defines[define_idx]; }
2019-06-13 17:26:52 +02:00
2019-10-24 21:53:19 +02:00
gpu::ProgramHandle queueShaderCompile(Shader& shader, gpu::VertexDecl decl, u32 defines) override {
ASSERT(shader.isReady());
jobs::MutexGuard lock(m_cpu_frame->shader_mutex);
for (const auto& i : m_cpu_frame->to_compile_shaders) {
if (i.shader == &shader && decl.hash == i.decl.hash && defines == i.defines) {
return i.program;
}
}
2019-10-24 21:53:19 +02:00
gpu::ProgramHandle program = gpu::allocProgramHandle();
m_cpu_frame->to_compile_shaders.push({&shader, decl, defines, program, shader.m_sources});
return program;
}
2018-06-24 17:50:33 +02:00
void makeScreenshot(const Path& filename) override { }
2015-09-05 21:36:34 +02:00
2016-11-20 17:27:41 +01:00
u8 getShaderDefineIdx(const char* define) override
2015-09-05 13:08:47 +02:00
{
jobs::MutexGuard lock(m_shader_defines_mutex);
2015-09-05 13:08:47 +02:00
for (int i = 0; i < m_shader_defines.size(); ++i)
{
2016-04-28 10:14:47 +02:00
if (m_shader_defines[i] == define)
2015-09-05 13:08:47 +02:00
{
return i;
}
}
2018-06-16 19:16:01 +02:00
if (m_shader_defines.size() >= MAX_SHADER_DEFINES) {
ASSERT(false);
logError("Too many shader defines.");
2018-06-16 19:16:01 +02:00
}
2016-04-28 10:14:47 +02:00
m_shader_defines.emplace(define);
2019-08-21 21:11:51 +02:00
ASSERT(m_shader_defines.size() <= 32); // m_shader_defines are reserved in renderer constructor, so getShaderDefine() is MT safe
2019-12-20 19:25:33 +01:00
return u8(m_shader_defines.size() - 1);
2015-09-05 13:08:47 +02:00
}
2018-09-05 20:45:06 +02:00
void startCapture() override
{
2018-10-12 00:12:44 +02:00
struct Cmd : RenderJob {
2018-09-05 20:45:06 +02:00
void setup() override {}
void execute() override {
PROFILE_FUNCTION();
2019-10-24 21:53:19 +02:00
gpu::startCapture();
2018-09-05 20:45:06 +02:00
}
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2018-09-05 20:45:06 +02:00
}
void stopCapture() override
{
2018-10-12 00:12:44 +02:00
struct Cmd : RenderJob {
2018-09-05 20:45:06 +02:00
void setup() override {}
void execute() override {
PROFILE_FUNCTION();
2019-10-24 21:53:19 +02:00
gpu::stopCapture();
2018-09-05 20:45:06 +02:00
}
};
2020-10-14 21:13:29 +02:00
Cmd& cmd = createJob<Cmd>();
2019-07-04 13:31:48 +02:00
queue(cmd, 0);
2018-09-05 20:45:06 +02:00
}
2019-10-17 18:10:38 +02:00
void render() {
2022-01-29 12:53:26 +01:00
FrameData& check_frame = *m_frames[(getFrameIndex(m_gpu_frame) + 1) % lengthOf(m_frames)].get();
if (check_frame.gpu_frame != 0xffFFffFF && gpu::frameFinished(check_frame.gpu_frame)) {
check_frame.gpu_frame = 0xffFFffFF;
check_frame.transient_buffer.renderDone();
check_frame.uniform_buffer.renderDone();
jobs::setGreen(&check_frame.can_setup);
2022-01-29 12:53:26 +01:00
}
2019-10-17 18:10:38 +02:00
FrameData& frame = *m_gpu_frame;
frame.transient_buffer.prepareToRender();
2022-01-23 11:39:18 +01:00
frame.uniform_buffer.prepareToRender();
2019-10-17 18:10:38 +02:00
2019-11-25 16:35:42 +01:00
gpu::MemoryStats mem_stats;
2021-03-09 21:31:55 +01:00
if (gpu::getMemoryStats(mem_stats)) {
2022-01-21 22:15:48 +01:00
profiler::gpuMemStats(mem_stats.total_available_mem, mem_stats.current_available_mem, mem_stats.dedicated_vidmem, mem_stats.buffer_mem, mem_stats.texture_mem, mem_stats.render_target_mem);
2019-11-25 16:35:42 +01:00
}
for (const auto& i : frame.to_compile_shaders) {
Shader::compile(i.program, i.decl, i.defines, i.sources, *this);
}
frame.to_compile_shaders.clear();
2020-02-20 01:49:21 +01:00
for (const auto& i : frame.material_updates) {
2020-08-31 22:49:40 +02:00
gpu::update(m_material_buffer.staging_buffer, &i.value, sizeof(MaterialConsts));
2022-01-30 21:34:59 +01:00
gpu::copy(m_material_buffer.buffer, m_material_buffer.staging_buffer, i.idx * sizeof(MaterialConsts), 0, sizeof(MaterialConsts));
2019-10-16 22:36:33 +02:00
}
2020-02-20 01:49:21 +01:00
frame.material_updates.clear();
2019-08-22 19:25:15 +02:00
2022-01-24 23:39:14 +01:00
m_profiler.beginQuery("frame", 0, false);
2019-10-24 21:53:19 +02:00
gpu::useProgram(gpu::INVALID_PROGRAM);
gpu::bindIndexBuffer(gpu::INVALID_BUFFER);
2022-01-21 17:40:00 +01:00
{
for (RenderJob* job : frame.jobs) {
2022-01-28 16:20:26 +01:00
PROFILE_BLOCK("render job");
profiler::blockColor(0xaa, 0xff, 0xaa);
2022-01-21 17:40:00 +01:00
profiler::link(job->profiler_link);
job->execute();
destroyJob(*job);
}
2022-02-09 22:58:50 +01:00
frame.job_allocator.reset();
2019-10-16 22:36:33 +02:00
}
2022-01-21 17:40:00 +01:00
m_profiler.endQuery();
2019-10-16 22:36:33 +02:00
frame.jobs.clear();
2019-07-04 13:31:48 +02:00
2019-10-16 22:36:33 +02:00
PROFILE_BLOCK("swap buffers");
2020-12-25 18:11:09 +01:00
jobs::enableBackupWorker(true);
2019-08-09 09:55:19 +02:00
2020-09-29 19:56:03 +02:00
frame.gpu_frame = gpu::swapBuffers();
2019-07-04 13:31:48 +02:00
2020-12-25 18:11:09 +01:00
jobs::enableBackupWorker(false);
2019-10-16 22:36:33 +02:00
m_profiler.frame();
2020-10-16 19:56:08 +02:00
m_gpu_frame = m_frames[(getFrameIndex(m_gpu_frame) + 1) % lengthOf(m_frames)].get();
2022-01-29 12:53:26 +01:00
FrameData& check_frame2 = *m_frames[(getFrameIndex(m_gpu_frame) + 1) % lengthOf(m_frames)].get();
2020-11-15 12:06:01 +01:00
2022-01-29 12:53:26 +01:00
if (check_frame2.gpu_frame != 0xffFFffFF && gpu::frameFinished(check_frame2.gpu_frame)) {
check_frame2.gpu_frame = 0xffFFffFF;
check_frame2.transient_buffer.renderDone();
check_frame2.uniform_buffer.renderDone();
jobs::setGreen(&check_frame2.can_setup);
2020-11-15 12:06:01 +01:00
}
2020-09-29 19:56:03 +02:00
if (m_gpu_frame->gpu_frame != 0xffFFffFF) {
gpu::waitFrame(m_gpu_frame->gpu_frame);
2020-11-15 12:06:01 +01:00
m_gpu_frame->gpu_frame = 0xFFffFFff;
2020-09-29 19:56:03 +02:00
m_gpu_frame->transient_buffer.renderDone();
2022-01-23 11:39:18 +01:00
m_gpu_frame->uniform_buffer.renderDone();
jobs::setGreen(&m_gpu_frame->can_setup);
2020-09-29 19:56:03 +02:00
}
2019-10-16 22:36:33 +02:00
}
2022-02-11 00:10:04 +01:00
LinearAllocator& getCurrentFrameAllocator() { return m_cpu_frame->job_allocator; }
2018-09-05 20:45:06 +02:00
2019-09-29 15:02:27 +02:00
void waitForCommandSetup() override
{
jobs::wait(&m_cpu_frame->setup_done);
2019-09-29 15:02:27 +02:00
}
2019-10-19 00:23:56 +02:00
void waitForRender() override {
jobs::wait(&m_last_render);
2019-10-19 00:23:56 +02:00
}
2020-10-16 19:56:08 +02:00
i32 getFrameIndex(FrameData* frame) const {
for (i32 i = 0; i < (i32)lengthOf(m_frames); ++i) {
if (frame == m_frames[i].get()) return i;
}
ASSERT(false);
return -1;
}
2019-05-25 13:03:00 +02:00
void frame() override
2018-07-11 23:35:34 +02:00
{
2019-05-25 13:03:00 +02:00
PROFILE_FUNCTION();
2019-07-04 13:31:48 +02:00
jobs::wait(&m_cpu_frame->setup_done);
for (const auto& i : m_cpu_frame->to_compile_shaders) {
const u64 key = i.defines | ((u64)i.decl.hash << 32);
i.shader->m_programs.insert(key, i.program);
}
jobs::setRed(&m_cpu_frame->can_setup);
2019-10-16 22:36:33 +02:00
2020-10-16 19:56:08 +02:00
m_cpu_frame = m_frames[(getFrameIndex(m_cpu_frame) + 1) % lengthOf(m_frames)].get();
jobs::runLambda([this](){
render();
}, &m_last_render, 1);
2020-09-29 19:56:03 +02:00
jobs::wait(&m_cpu_frame->can_setup);
}
Engine& m_engine;
2016-02-04 16:20:01 +01:00
IAllocator& m_allocator;
2019-07-04 13:56:05 +02:00
Array<StaticString<32>> m_shader_defines;
jobs::Mutex m_shader_defines_mutex;
2018-10-13 15:08:58 +02:00
Array<StaticString<32>> m_layers;
2018-01-08 13:08:11 +01:00
FontManager* m_font_manager;
MaterialManager m_material_manager;
2019-06-12 23:38:06 +02:00
RenderResourceManager<Model> m_model_manager;
2019-06-13 17:26:52 +02:00
RenderResourceManager<ParticleEmitterResource> m_particle_emitter_manager;
RenderResourceManager<PipelineResource> m_pipeline_manager;
2019-06-12 23:38:06 +02:00
RenderResourceManager<Shader> m_shader_manager;
2019-06-26 18:52:52 +02:00
RenderResourceManager<Texture> m_texture_manager;
2021-02-06 14:21:00 +01:00
gpu::ProgramHandle m_downscale_program;
gpu::BufferHandle m_tmp_uniform_buffer;
2021-02-09 21:10:40 +01:00
gpu::BufferHandle m_scratch_buffer;
2021-02-13 15:39:02 +01:00
Array<u32> m_free_sort_keys;
Array<const Mesh*> m_sort_key_to_mesh_map;
u32 m_max_sort_key = 0;
2019-10-16 22:36:33 +02:00
2020-10-07 19:19:16 +02:00
Array<RenderPlugin*> m_plugins;
2020-10-16 19:56:08 +02:00
Local<FrameData> m_frames[3];
2019-10-16 22:36:33 +02:00
FrameData* m_gpu_frame = nullptr;
2019-10-17 18:10:38 +02:00
FrameData* m_cpu_frame = nullptr;
jobs::Signal m_last_render;
2019-10-16 22:36:33 +02:00
GPUProfiler m_profiler;
2019-08-07 21:21:31 +02:00
struct MaterialBuffer {
2020-02-20 01:49:21 +01:00
MaterialBuffer(IAllocator& alloc)
: map(alloc)
, data(alloc)
{}
struct Data {
u32 ref_count;
union {
u32 hash;
u32 next_free;
};
};
2020-08-31 22:05:33 +02:00
gpu::BufferHandle buffer = gpu::INVALID_BUFFER;
2020-08-31 22:49:40 +02:00
gpu::BufferHandle staging_buffer = gpu::INVALID_BUFFER;
2020-02-20 01:49:21 +01:00
Array<Data> data;
int first_free;
2019-08-07 21:21:31 +02:00
HashMap<u32, u32> map;
} m_material_buffer;
};
2018-07-11 23:35:34 +02:00
2015-08-17 23:45:26 +02:00
extern "C"
{
2016-02-14 11:28:08 +01:00
LUMIX_PLUGIN_ENTRY(renderer)
2015-08-17 23:45:26 +02:00
{
2016-07-11 19:41:28 +02:00
return LUMIX_NEW(engine.getAllocator(), RendererImpl)(engine);
2015-08-17 23:45:26 +02:00
}
}
2014-11-16 19:31:51 +01:00
2016-02-14 21:02:09 +01:00
} // namespace Lumix
2015-08-17 23:45:26 +02:00