LumixEngine/src/renderer/renderer.cpp

1062 lines
29 KiB
C++
Raw Normal View History

2015-07-04 15:22:28 +02:00
#include "renderer.h"
2022-02-09 22:58:50 +01:00
#include "engine/allocators.h"
2016-05-10 08:24:31 +02:00
#include "engine/array.h"
2022-11-08 22:04:52 +01:00
#include "engine/atomic.h"
#include "engine/command_line_parser.h"
2017-01-26 09:28:23 +01:00
#include "engine/engine.h"
2022-03-12 00:45:40 +01:00
#include "engine/hash.h"
2016-05-10 08:24:31 +02:00
#include "engine/log.h"
2020-02-06 18:51:29 +01:00
#include "engine/job_system.h"
2022-11-08 22:04:52 +01:00
#include "engine/page_allocator.h"
2020-02-21 22:09:11 +01:00
#include "engine/sync.h"
#include "engine/thread.h"
#include "engine/os.h"
2016-05-10 08:24:31 +02:00
#include "engine/profiler.h"
2017-11-19 14:04:10 +01:00
#include "engine/reflection.h"
2017-01-26 09:28:23 +01:00
#include "engine/resource_manager.h"
#include "engine/string.h"
2023-03-18 12:12:52 +01:00
#include "engine/world.h"
2022-11-08 22:04:52 +01:00
#include "renderer/draw_stream.h"
2019-06-26 18:52:52 +02:00
#include "renderer/font.h"
2015-08-17 23:45:26 +02:00
#include "renderer/material.h"
#include "renderer/model.h"
2018-07-01 18:13:44 +02:00
#include "renderer/pipeline.h"
2018-08-22 19:52:08 +02:00
#include "renderer/particle_system.h"
2023-04-28 17:26:19 +02:00
#include "renderer/render_module.h"
2015-08-17 23:45:26 +02:00
#include "renderer/shader.h"
2017-01-26 09:28:23 +01:00
#include "renderer/terrain.h"
2015-08-17 23:45:26 +02:00
#include "renderer/texture.h"
2019-06-26 18:52:52 +02:00
2022-11-08 22:04:52 +01:00
namespace Lumix {
2018-07-08 18:16:16 +02:00
static const ComponentType MODEL_INSTANCE_TYPE = reflection::getComponentType("model_instance");
2022-01-23 11:39:18 +01:00
template <u32 ALIGN>
struct TransientBuffer {
2020-11-15 12:06:01 +01:00
static constexpr u32 INIT_SIZE = 1024 * 1024;
static constexpr u32 OVERFLOW_BUFFER_SIZE = 512 * 1024 * 1024;
2022-01-23 17:34:17 +01:00
void init(gpu::BufferFlags flags) {
2022-01-25 21:42:12 +01:00
m_flags = flags;
m_buffer = gpu::allocBufferHandle();
m_offset = 0;
2022-01-23 17:34:17 +01:00
gpu::createBuffer(m_buffer, gpu::BufferFlags::MAPPABLE | flags, INIT_SIZE, nullptr);
m_size = INIT_SIZE;
m_ptr = (u8*)gpu::map(m_buffer, INIT_SIZE);
}
Renderer::TransientSlice alloc(u32 size) {
Renderer::TransientSlice slice;
2022-01-23 11:39:18 +01:00
size = (size + (ALIGN - 1)) & ~(ALIGN - 1);
2023-09-22 17:13:57 +02:00
slice.offset = m_offset.add(size);
slice.size = size;
if (slice.offset + size <= m_size) {
slice.buffer = m_buffer;
slice.ptr = m_ptr + slice.offset;
return slice;
}
jobs::MutexGuard lock(m_mutex);
2020-10-01 23:15:43 +02:00
if (!m_overflow.buffer) {
m_overflow.buffer = gpu::allocBufferHandle();
m_overflow.data = (u8*)os::memReserve(OVERFLOW_BUFFER_SIZE);
m_overflow.size = 0;
m_overflow.commit = 0;
}
slice.ptr = m_overflow.data + m_overflow.size;
slice.offset = m_overflow.size;
m_overflow.size += size;
if (m_overflow.size > m_overflow.commit) {
const u32 page_size = os::getMemPageSize();
m_overflow.commit = (m_overflow.size + page_size - 1) & ~(page_size - 1);
os::memCommit(m_overflow.data, m_overflow.commit);
}
slice.buffer = m_overflow.buffer;
return slice;
}
void prepareToRender() {
gpu::unmap(m_buffer);
m_ptr = nullptr;
2020-10-01 23:15:43 +02:00
if (m_overflow.buffer) {
2022-01-25 21:42:12 +01:00
gpu::createBuffer(m_overflow.buffer, gpu::BufferFlags::MAPPABLE | m_flags, nextPow2(m_overflow.size + m_size), nullptr);
2021-04-02 18:47:53 +02:00
void* mem = gpu::map(m_overflow.buffer, m_overflow.size + m_size);
if (mem) {
memcpy(mem, m_overflow.data, m_overflow.size);
gpu::unmap(m_overflow.buffer);
}
os::memRelease(m_overflow.data, OVERFLOW_BUFFER_SIZE);
m_overflow.data = nullptr;
m_overflow.commit = 0;
}
}
void renderDone() {
2020-10-01 23:15:43 +02:00
if (m_overflow.buffer) {
m_size = nextPow2(m_overflow.size + m_size);
gpu::destroy(m_buffer);
m_buffer = m_overflow.buffer;
m_overflow.buffer = gpu::INVALID_BUFFER;
m_overflow.size = 0;
}
ASSERT(!m_ptr);
m_ptr = (u8*)gpu::map(m_buffer, m_size);
ASSERT(m_ptr);
m_offset = 0;
}
gpu::BufferHandle m_buffer = gpu::INVALID_BUFFER;
2023-09-22 17:13:57 +02:00
AtomicI32 m_offset = 0;
u32 m_size = 0;
u8* m_ptr = nullptr;
jobs::Mutex m_mutex;
2022-01-25 21:42:12 +01:00
gpu::BufferFlags m_flags = gpu::BufferFlags::NONE;
struct {
gpu::BufferHandle buffer = gpu::INVALID_BUFFER;
u8* data = nullptr;
u32 size = 0;
u32 commit = 0;
} m_overflow;
};
2018-07-22 15:22:36 +02:00
2018-07-08 18:16:16 +02:00
2019-10-16 22:36:33 +02:00
struct FrameData {
FrameData(struct RendererImpl& renderer, IAllocator& allocator, PageAllocator& page_allocator);
2019-10-16 22:36:33 +02:00
struct ShaderToCompile {
Shader* shader;
2019-10-24 21:53:19 +02:00
gpu::VertexDecl decl;
gpu::ProgramHandle program;
2023-09-15 16:23:35 +02:00
ShaderKey key;
2020-02-20 01:49:21 +01:00
};
2022-01-23 11:39:18 +01:00
TransientBuffer<16> transient_buffer;
TransientBuffer<256> uniform_buffer;
2020-09-29 19:56:03 +02:00
u32 gpu_frame = 0xffFFffFF;
LinearAllocator linear_allocator;
jobs::Mutex shader_mutex;
Array<ShaderToCompile> to_compile_shaders;
2019-10-16 22:36:33 +02:00
RendererImpl& renderer;
jobs::Signal can_setup;
jobs::Signal setup_done;
u32 frame_number = 0;
DrawStream begin_frame_draw_stream;
DrawStream draw_stream;
2022-11-30 21:32:05 +01:00
DrawStream end_frame_draw_stream;
2019-10-16 22:36:33 +02:00
};
2019-06-12 23:38:06 +02:00
template <typename T>
2020-02-21 22:09:11 +01:00
struct RenderResourceManager : ResourceManager
2019-06-12 23:38:06 +02:00
{
2023-07-26 22:40:49 +02:00
RenderResourceManager(const char* type_name ,Renderer& renderer, IAllocator& allocator)
2019-06-12 23:38:06 +02:00
: ResourceManager(allocator)
, m_renderer(renderer)
2023-07-26 22:40:49 +02:00
, m_allocator(allocator, type_name)
2019-06-12 23:38:06 +02:00
{}
Resource* createResource(const Path& path) override
{
return LUMIX_NEW(m_allocator, T)(path, *this, m_renderer, m_allocator);
}
void destroyResource(Resource& resource) override
{
LUMIX_DELETE(m_allocator, &resource);
}
Renderer& m_renderer;
2023-07-26 22:40:49 +02:00
TagAllocator m_allocator;
2019-06-12 23:38:06 +02:00
};
2018-07-11 23:35:34 +02:00
struct GPUProfiler
2018-07-08 18:16:16 +02:00
{
struct Query
{
StaticString<32> name;
2019-10-24 21:53:19 +02:00
gpu::QueryHandle handle;
2022-01-24 23:39:14 +01:00
gpu::QueryHandle stats = gpu::INVALID_QUERY;
u64 result;
i64 profiler_link;
bool is_end;
bool is_frame;
};
2018-07-11 23:35:34 +02:00
GPUProfiler(IAllocator& allocator)
2018-07-14 17:52:06 +02:00
: m_queries(allocator)
2018-07-14 10:03:38 +02:00
, m_pool(allocator)
2022-01-24 23:39:14 +01:00
, m_stats_pool(allocator)
, m_gpu_to_cpu_offset(0)
2018-07-14 10:03:38 +02:00
{
}
2018-07-08 18:16:16 +02:00
2018-07-11 23:35:34 +02:00
~GPUProfiler()
2018-07-08 18:16:16 +02:00
{
2018-07-14 17:52:06 +02:00
ASSERT(m_pool.empty());
ASSERT(m_queries.empty());
}
u64 toCPUTimestamp(u64 gpu_timestamp) const
{
return u64(gpu_timestamp * (os::Timer::getFrequency() / double(gpu::getQueryFrequency()))) + m_gpu_to_cpu_offset;
}
void init()
{
2023-08-04 01:50:28 +02:00
PROFILE_FUNCTION();
2022-01-24 23:39:14 +01:00
gpu::QueryHandle q = gpu::createQuery(gpu::QueryType::TIMESTAMP);
2019-10-24 21:53:19 +02:00
gpu::queryTimestamp(q);
const u64 cpu_timestamp = os::Timer::getRawTimestamp();
2020-10-05 18:48:30 +02:00
2019-08-25 13:39:05 +02:00
u32 try_num = 0;
2020-10-05 18:48:30 +02:00
while (!gpu::isQueryReady(q) && try_num < 10) {
gpu::swapBuffers();
2019-08-25 13:39:05 +02:00
++try_num;
}
2020-10-05 18:48:30 +02:00
if (try_num == 10) {
logError("Failed to get GPU timestamp, timings are unreliable.");
2019-08-25 13:39:05 +02:00
m_gpu_to_cpu_offset = 0;
}
else {
2019-10-24 21:53:19 +02:00
const u64 gpu_timestamp = gpu::getQueryResult(q);
m_gpu_to_cpu_offset = cpu_timestamp - u64(gpu_timestamp * (os::Timer::getFrequency() / double(gpu::getQueryFrequency())));
2019-10-24 21:53:19 +02:00
gpu::destroy(q);
2019-08-25 13:39:05 +02:00
}
}
2018-07-14 17:52:06 +02:00
void clear()
{
2022-02-10 18:15:56 +01:00
for(const Query& q : m_queries) {
if (!q.is_frame) gpu::destroy(q.handle);
}
2019-06-23 14:22:33 +02:00
m_queries.clear();
2019-10-24 21:53:19 +02:00
for(const gpu::QueryHandle h : m_pool) {
gpu::destroy(h);
2018-07-14 17:52:06 +02:00
}
m_pool.clear();
2022-02-10 18:15:56 +01:00
if (m_stats_query) gpu::destroy(m_stats_query);
m_stats_query = gpu::INVALID_QUERY;
for(const gpu::QueryHandle h : m_stats_pool) {
gpu::destroy(h);
}
m_stats_pool.clear();
2018-07-08 18:16:16 +02:00
}
2019-10-24 21:53:19 +02:00
gpu::QueryHandle allocQuery()
2018-07-08 18:16:16 +02:00
{
2018-07-14 10:03:38 +02:00
if(!m_pool.empty()) {
2019-10-24 21:53:19 +02:00
const gpu::QueryHandle res = m_pool.back();
2018-07-14 10:03:38 +02:00
m_pool.pop();
return res;
2018-07-11 23:35:34 +02:00
}
2022-01-24 23:39:14 +01:00
return gpu::createQuery(gpu::QueryType::TIMESTAMP);
}
gpu::QueryHandle allocStatsQuery()
{
if(!m_stats_pool.empty()) {
const gpu::QueryHandle res = m_stats_pool.back();
m_stats_pool.pop();
return res;
}
return gpu::createQuery(gpu::QueryType::STATS);
2018-07-14 10:03:38 +02:00
}
2018-07-08 18:16:16 +02:00
2022-01-24 23:39:14 +01:00
void beginQuery(const char* name, i64 profiler_link, bool stats)
2018-07-14 10:03:38 +02:00
{
jobs::MutexGuard lock(m_mutex);
2018-07-14 10:03:38 +02:00
Query& q = m_queries.emplace();
q.profiler_link = profiler_link;
2018-07-14 10:03:38 +02:00
q.name = name;
q.is_end = false;
q.is_frame = false;
2018-07-14 10:03:38 +02:00
q.handle = allocQuery();
2019-10-24 21:53:19 +02:00
gpu::queryTimestamp(q.handle);
2022-01-24 23:39:14 +01:00
if (stats) {
ASSERT(m_stats_counter == 0); // nested counters are not supported
m_stats_query = allocStatsQuery();
gpu::beginQuery(m_stats_query);
m_stats_counter = 1;
}
else if (m_stats_counter > 0) {
++m_stats_counter;
}
2018-07-14 10:03:38 +02:00
}
2018-07-08 18:16:16 +02:00
2018-07-14 10:03:38 +02:00
void endQuery()
{
jobs::MutexGuard lock(m_mutex);
2018-07-14 10:03:38 +02:00
Query& q = m_queries.emplace();
q.is_end = true;
q.is_frame = false;
2018-07-14 10:03:38 +02:00
q.handle = allocQuery();
2019-10-24 21:53:19 +02:00
gpu::queryTimestamp(q.handle);
2022-01-24 23:39:14 +01:00
if (m_stats_counter > 0) {
--m_stats_counter;
if (m_stats_counter == 0) {
gpu::endQuery(m_stats_query);
q.stats = m_stats_query;
m_stats_query = gpu::INVALID_QUERY;
}
}
2018-07-08 18:16:16 +02:00
}
2018-07-11 23:35:34 +02:00
void frame()
2018-07-08 18:16:16 +02:00
{
2019-05-04 23:37:59 +02:00
PROFILE_FUNCTION();
jobs::MutexGuard lock(m_mutex);
while (!m_queries.empty()) {
Query q = m_queries[0];
2019-10-24 21:53:19 +02:00
if (!gpu::isQueryReady(q.handle)) break;
if (q.is_end) {
2022-01-24 23:39:14 +01:00
if (q.stats && !gpu::isQueryReady(q.stats)) break;
2019-10-24 21:53:19 +02:00
const u64 timestamp = toCPUTimestamp(gpu::getQueryResult(q.handle));
2022-01-24 23:39:14 +01:00
if (q.stats) {
profiler::gpuStats(gpu::getQueryResult(q.stats));
m_stats_pool.push(q.stats);
}
2020-12-25 18:11:09 +01:00
profiler::endGPUBlock(timestamp);
}
else {
2019-10-24 21:53:19 +02:00
const u64 timestamp = toCPUTimestamp(gpu::getQueryResult(q.handle));
2020-12-25 18:11:09 +01:00
profiler::beginGPUBlock(q.name, timestamp, q.profiler_link);
}
2018-07-14 10:03:38 +02:00
m_pool.push(q.handle);
m_queries.erase(0);
2018-07-14 10:03:38 +02:00
}
2018-07-08 18:16:16 +02:00
}
2018-07-11 23:35:34 +02:00
Array<Query> m_queries;
2019-10-24 21:53:19 +02:00
Array<gpu::QueryHandle> m_pool;
2022-01-24 23:39:14 +01:00
Array<gpu::QueryHandle> m_stats_pool;
jobs::Mutex m_mutex;
i64 m_gpu_to_cpu_offset;
2022-01-24 23:39:14 +01:00
u32 m_stats_counter = 0;
2022-02-10 18:15:56 +01:00
gpu::QueryHandle m_stats_query = gpu::INVALID_QUERY;
2018-07-08 18:16:16 +02:00
};
2020-02-21 22:09:11 +01:00
struct RendererImpl final : Renderer
{
2016-03-01 16:48:04 +01:00
explicit RendererImpl(Engine& engine)
: m_engine(engine)
2023-07-26 00:31:04 +02:00
, m_allocator(engine.getAllocator(), "renderer")
2023-07-26 22:40:49 +02:00
, m_texture_manager("textures", *this, m_allocator)
, m_model_manager("models", *this, m_allocator)
, m_particle_emitter_manager("particle emitters", *this, m_allocator)
, m_material_manager(*this, m_allocator)
2023-07-26 22:40:49 +02:00
, m_shader_manager("shaders", *this, m_allocator)
2018-01-08 13:08:11 +01:00
, m_font_manager(nullptr)
2015-09-05 13:08:47 +02:00
, m_shader_defines(m_allocator)
, m_profiler(m_allocator)
2018-10-13 15:08:58 +02:00
, m_layers(m_allocator)
2019-08-07 21:21:31 +02:00
, m_material_buffer(m_allocator)
2020-10-07 19:19:16 +02:00
, m_plugins(m_allocator)
2021-02-13 15:39:02 +01:00
, m_free_sort_keys(m_allocator)
, m_sort_key_to_mesh_map(m_allocator)
2023-09-15 16:23:35 +02:00
, m_semantic_defines(m_allocator)
{
2023-04-28 17:26:19 +02:00
RenderModule::reflect();
2020-12-01 00:17:00 +01:00
LUMIX_GLOBAL_FUNC(Model::getBoneCount);
LUMIX_GLOBAL_FUNC(Model::getBoneName);
LUMIX_GLOBAL_FUNC(Model::getBoneParent);
2020-10-20 19:47:34 +02:00
2019-08-21 21:11:51 +02:00
m_shader_defines.reserve(32);
2020-09-20 12:22:33 +02:00
gpu::preinit(m_allocator, shouldLoadRenderdoc());
2023-10-05 15:21:06 +02:00
for (Local<FrameData>& f : m_frames) f.create(*this, m_allocator, m_engine.getPageAllocator());
}
2022-04-15 18:17:18 +02:00
float getLODMultiplier() const override { return m_lod_multiplier; }
2022-04-16 01:27:37 +02:00
void setLODMultiplier(float value) override { m_lod_multiplier = maximum(0.f, value); }
2022-04-15 18:17:18 +02:00
2020-04-29 19:13:16 +02:00
void serialize(OutputMemoryStream& stream) const override {}
2023-04-28 17:26:19 +02:00
bool deserialize(i32 version, InputMemoryStream& stream) override { return version == 0; }
~RendererImpl()
{
m_particle_emitter_manager.destroy();
m_texture_manager.destroy();
m_model_manager.destroy();
m_material_manager.destroy();
m_shader_manager.destroy();
m_font_manager->destroy();
LUMIX_DELETE(m_allocator, m_font_manager);
2019-07-09 02:28:08 +02:00
frame();
2020-10-03 23:14:17 +02:00
frame();
frame();
2019-10-19 00:23:56 +02:00
waitForRender();
2019-10-17 18:10:38 +02:00
jobs::Signal signal;
jobs::runLambda([this]() {
for (const Local<FrameData>& frame : m_frames) {
2020-10-16 19:56:08 +02:00
gpu::destroy(frame->transient_buffer.m_buffer);
2022-01-23 11:39:18 +01:00
gpu::destroy(frame->uniform_buffer.m_buffer);
2019-10-16 22:36:33 +02:00
}
gpu::destroy(m_material_buffer.buffer);
m_profiler.clear();
2019-10-24 21:53:19 +02:00
gpu::shutdown();
}, &signal, 1);
jobs::wait(&signal);
}
2023-09-15 16:23:35 +02:00
static void add(String& res, const char* a, u32 b) {
char tmp[32];
toCString(b, Span(tmp));
res.append(a, tmp, "\n");
}
const char* getSemanticDefines(Span<const AttributeSemantic> attributes) override {
RuntimeHash hash(attributes.begin(), sizeof(attributes[0]) * attributes.length());
auto iter = m_semantic_defines.find(hash);
if (!iter.isValid()) {
String s(m_allocator);
u32 first_empty = attributes.length();
for (u32 i = 0; i < attributes.length(); ++i) {
switch (attributes[i]) {
case AttributeSemantic::COUNT: ASSERT(false); break;
case AttributeSemantic::NONE: first_empty = minimum(first_empty, i); break;
case AttributeSemantic::POSITION: break;
case AttributeSemantic::NORMAL: add(s, "#define NORMAL_ATTR ", i); break;
case AttributeSemantic::TANGENT: add(s, "#define TANGENT_ATTR ", i); break;
case AttributeSemantic::BITANGENT: add(s, "#define BITANGENT_ATTR ", i); break;
case AttributeSemantic::COLOR0: add(s, "#define COLOR0_ATTR ", i); break;
case AttributeSemantic::COLOR1: add(s, "#define COLOR1_ATTR ", i); break;
case AttributeSemantic::INDICES: add(s, "#define INDICES_ATTR ", i); break;
case AttributeSemantic::WEIGHTS: add(s, "#define WEIGHTS_ATTR ", i); break;
case AttributeSemantic::TEXCOORD0: add(s, "#define UV0_ATTR ", i); break;
case AttributeSemantic::TEXCOORD1: add(s, "#define UV1_ATTR ", i); break;
case AttributeSemantic::AO: add(s, "#define AO_ATTR ", i); break;
}
}
add(s, "#define INSTANCE0_ATTR ", first_empty + 0);
add(s, "#define INSTANCE1_ATTR ", first_empty + 1);
add(s, "#define INSTANCE2_ATTR ", first_empty + 2);
add(s, "#define INSTANCE3_ATTR ", first_empty + 3);
add(s, "#define INSTANCE4_ATTR ", first_empty + 4);
add(s, "#define INSTANCE5_ATTR ", first_empty + 5);
iter = m_semantic_defines.insert(hash, static_cast<String&&>(s));
}
return iter.value().c_str();
}
2020-09-20 12:22:33 +02:00
static bool shouldLoadRenderdoc() {
char cmd_line[4096];
os::getCommandLine(Span(cmd_line));
2020-09-20 12:22:33 +02:00
CommandLineParser cmd_line_parser(cmd_line);
while (cmd_line_parser.next()) {
if (cmd_line_parser.currentEquals("-renderdoc")) {
return true;
}
}
return false;
}
2023-08-04 01:50:28 +02:00
void initEnd() override {
jobs::wait(&m_init_signal);
}
void initBegin() override {
PROFILE_FUNCTION();
2022-11-08 22:04:52 +01:00
gpu::InitFlags flags = gpu::InitFlags::VSYNC;
2019-10-17 18:10:38 +02:00
char cmd_line[4096];
os::getCommandLine(Span(cmd_line));
2017-05-23 19:57:11 +02:00
CommandLineParser cmd_line_parser(cmd_line);
2019-07-30 21:58:31 +02:00
while (cmd_line_parser.next()) {
if (cmd_line_parser.currentEquals("-no_vsync")) {
2022-11-08 22:04:52 +01:00
flags = flags & ~gpu::InitFlags::VSYNC;
2019-07-30 21:58:31 +02:00
}
else if (cmd_line_parser.currentEquals("-debug_opengl")) {
2022-11-08 22:04:52 +01:00
flags = flags | gpu::InitFlags::DEBUG_OUTPUT;
2017-10-02 16:35:21 +02:00
}
}
jobs::Signal signal;
2022-11-08 22:04:52 +01:00
jobs::runLambda([this, flags]() {
PROFILE_BLOCK("init_render");
2022-11-08 22:04:52 +01:00
void* window_handle = m_engine.getWindowHandle();
if (!gpu::init(window_handle, flags)) {
os::messageBox("Failed to initialize renderer. More info in lumix.log.");
}
2019-10-16 22:36:33 +02:00
2019-11-25 16:35:42 +01:00
gpu::MemoryStats mem_stats;
2021-03-09 21:31:55 +01:00
if (gpu::getMemoryStats(mem_stats)) {
logInfo("Initial GPU memory stats:\n",
"total: ", (mem_stats.total_available_mem / (1024.f * 1024.f)), "MB\n"
"currect: ", (mem_stats.current_available_mem / (1024.f * 1024.f)), "MB\n"
"dedicated: ", (mem_stats.dedicated_vidmem/ (1024.f * 1024.f)), "MB\n");
2019-11-25 16:35:42 +01:00
}
2022-11-08 22:04:52 +01:00
for (const Local<FrameData>& frame : m_frames) {
2022-01-23 17:34:17 +01:00
frame->transient_buffer.init(gpu::BufferFlags::NONE);
frame->uniform_buffer.init(gpu::BufferFlags::UNIFORM_BUFFER);
2019-10-16 22:36:33 +02:00
}
2022-11-08 22:04:52 +01:00
m_profiler.init();
2023-08-04 01:50:28 +02:00
}, &m_init_signal, 1);
2022-11-08 22:04:52 +01:00
m_cpu_frame = m_frames[0].get();
m_gpu_frame = m_frames[0].get();
MaterialBuffer& mb = m_material_buffer;
const u32 MAX_MATERIAL_CONSTS_COUNT = 400;
mb.buffer = gpu::allocBufferHandle();
mb.map.insert(RuntimeHash(), 0);
mb.data.resize(MAX_MATERIAL_CONSTS_COUNT);
mb.data[0].hash = RuntimeHash();
mb.data[0].ref_count = 1;
mb.first_free = 1;
for (int i = 1; i < MAX_MATERIAL_CONSTS_COUNT; ++i) {
mb.data[i].ref_count = 0;
mb.data[i].next_free = i + 1;
}
mb.data.back().next_free = -1;
DrawStream& stream = m_cpu_frame->draw_stream;
2022-11-08 22:04:52 +01:00
stream.createBuffer(mb.buffer
, gpu::BufferFlags::UNIFORM_BUFFER
, Material::MAX_UNIFORMS_BYTES * MAX_MATERIAL_CONSTS_COUNT
, nullptr
);
float default_mat[Material::MAX_UNIFORMS_FLOATS] = {};
stream.update(mb.buffer, &default_mat, sizeof(default_mat));
ResourceManagerHub& manager = m_engine.getResourceManager();
2018-01-11 21:13:59 +01:00
m_texture_manager.create(Texture::TYPE, manager);
m_model_manager.create(Model::TYPE, manager);
m_material_manager.create(Material::TYPE, manager);
2023-07-04 01:09:54 +02:00
m_particle_emitter_manager.create(ParticleSystemResource::TYPE, manager);
2018-01-11 21:13:59 +01:00
m_shader_manager.create(Shader::TYPE, manager);
2018-01-08 13:08:11 +01:00
m_font_manager = LUMIX_NEW(m_allocator, FontManager)(*this, m_allocator);
2018-01-11 21:13:59 +01:00
m_font_manager->create(FontResource::TYPE, manager);
2023-04-28 17:26:19 +02:00
RenderModule::registerLuaAPI(m_engine.getState(), *this);
2018-07-08 18:16:16 +02:00
2018-10-27 21:05:52 +02:00
m_layers.emplace("default");
}
2017-10-11 17:13:47 +02:00
2019-07-25 18:50:31 +02:00
MemRef copy(const void* data, u32 size) override
2018-07-08 18:16:16 +02:00
{
MemRef mem = allocate(size);
memcpy(mem.data, data, size);
2018-07-08 18:16:16 +02:00
return mem;
}
IAllocator& getAllocator() override
{
return m_allocator;
}
2018-07-11 23:35:34 +02:00
void free(const MemRef& memory) override
{
ASSERT(memory.own);
m_allocator.deallocate(memory.data);
}
2019-07-25 18:50:31 +02:00
MemRef allocate(u32 size) override
2018-07-08 18:16:16 +02:00
{
MemRef ret;
ret.size = size;
ret.own = true;
ret.data = m_allocator.allocate(size, 8);
2018-07-08 18:16:16 +02:00
return ret;
}
2022-01-24 23:39:14 +01:00
void beginProfileBlock(const char* name, i64 link, bool stats) override
2018-07-14 10:03:38 +02:00
{
2022-01-23 11:39:18 +01:00
gpu::pushDebugGroup(name);
2022-01-24 23:39:14 +01:00
m_profiler.beginQuery(name, link, stats);
2018-07-14 10:03:38 +02:00
}
void endProfileBlock() override
{
m_profiler.endQuery();
2022-01-23 11:39:18 +01:00
gpu::popDebugGroup();
2018-07-14 10:03:38 +02:00
}
2019-07-25 18:50:31 +02:00
TransientSlice allocTransient(u32 size) override
2018-07-17 01:13:58 +02:00
{
2022-02-13 14:19:17 +01:00
jobs::wait(&m_cpu_frame->can_setup);
return m_cpu_frame->transient_buffer.alloc(size);
2018-07-17 01:13:58 +02:00
}
2022-01-23 11:39:18 +01:00
2022-11-08 22:04:52 +01:00
TransientSlice allocUniform(const void* data, u32 size) override {
jobs::wait(&m_cpu_frame->can_setup);
const TransientSlice slice = m_cpu_frame->uniform_buffer.alloc(size);
memcpy(slice.ptr, data, size);
return slice;
}
2022-01-23 11:39:18 +01:00
TransientSlice allocUniform(u32 size) override
{
2022-02-13 14:19:17 +01:00
jobs::wait(&m_cpu_frame->can_setup);
2022-01-23 11:39:18 +01:00
return m_cpu_frame->uniform_buffer.alloc(size);
}
2019-07-07 11:13:33 +02:00
2020-08-31 22:05:33 +02:00
gpu::BufferHandle getMaterialUniformBuffer() override {
2019-08-07 21:21:31 +02:00
return m_material_buffer.buffer;
}
2022-10-24 23:27:20 +02:00
u32 createMaterialConstants(Span<const float> data) override {
2022-11-01 23:51:00 +01:00
const RuntimeHash hash(data.begin(), data.length() * sizeof(float));
2019-08-07 21:21:31 +02:00
auto iter = m_material_buffer.map.find(hash);
u32 idx;
if(iter.isValid()) {
idx = iter.value();
}
else {
2020-02-20 01:49:21 +01:00
if (m_material_buffer.first_free == -1) {
ASSERT(false);
++m_material_buffer.data[0].ref_count;
return 0;
}
2020-02-20 01:49:21 +01:00
idx = m_material_buffer.first_free;
m_material_buffer.first_free = m_material_buffer.data[m_material_buffer.first_free].next_free;
m_material_buffer.data[idx].ref_count = 0;
2022-11-01 23:51:00 +01:00
m_material_buffer.data[idx].hash = RuntimeHash(data.begin(), data.length() * sizeof(float));
2019-08-07 21:21:31 +02:00
m_material_buffer.map.insert(hash, idx);
2022-10-24 23:27:20 +02:00
2022-11-08 22:04:52 +01:00
jobs::wait(&m_cpu_frame->can_setup);
const u32 size = u32(data.length() * sizeof(float));
const TransientSlice slice = m_cpu_frame->uniform_buffer.alloc(size);
memcpy(slice.ptr, data.begin(), size);
m_cpu_frame->draw_stream.copy(m_material_buffer.buffer, slice.buffer, idx * Material::MAX_UNIFORMS_BYTES, slice.offset, size);
2019-08-07 21:21:31 +02:00
}
++m_material_buffer.data[idx].ref_count;
return idx;
}
void destroyMaterialConstants(u32 idx) override {
--m_material_buffer.data[idx].ref_count;
2020-02-20 01:49:21 +01:00
if (m_material_buffer.data[idx].ref_count > 0) return;
2022-03-12 00:45:40 +01:00
const RuntimeHash hash = m_material_buffer.data[idx].hash;
2020-02-20 01:49:21 +01:00
m_material_buffer.data[idx].next_free = m_material_buffer.first_free;
m_material_buffer.first_free = idx;
m_material_buffer.map.erase(hash);
2019-08-07 21:21:31 +02:00
}
2018-07-17 01:13:58 +02:00
2020-11-26 20:25:14 +01:00
gpu::BufferHandle createBuffer(const MemRef& memory, gpu::BufferFlags flags) override
2018-07-08 18:16:16 +02:00
{
2019-10-24 21:53:19 +02:00
gpu::BufferHandle handle = gpu::allocBufferHandle();
2020-10-01 23:15:43 +02:00
if(!handle) return handle;
2018-07-11 23:35:34 +02:00
DrawStream& stream = getDrawStream();
2022-11-08 22:04:52 +01:00
stream.createBuffer(handle, flags, memory.size, memory.data);
if (memory.own) stream.freeMemory(memory.data, m_allocator);
2018-07-11 23:35:34 +02:00
return handle;
2018-07-08 18:16:16 +02:00
}
2018-10-27 21:05:52 +02:00
u8 getLayersCount() const override
{
return (u8)m_layers.size();
}
const char* getLayerName(u8 layer) const override
{
return m_layers[layer];
}
2018-07-08 18:16:16 +02:00
2018-10-13 15:08:58 +02:00
u8 getLayerIdx(const char* name) override
{
2019-12-20 19:25:33 +01:00
for(u8 i = 0; i < m_layers.size(); ++i) {
2018-10-13 15:08:58 +02:00
if(m_layers[i] == name) return i;
}
2019-12-20 19:25:33 +01:00
ASSERT(m_layers.size() < 0xff);
2018-10-13 15:08:58 +02:00
m_layers.emplace(name);
return m_layers.size() - 1;
}
2021-02-13 15:39:02 +01:00
const Mesh** getSortKeyToMeshMap() const override {
return m_sort_key_to_mesh_map.begin();
}
u32 allocSortKey(Mesh* mesh) override {
if (!m_free_sort_keys.empty()) {
const u32 key = m_free_sort_keys.back();
m_free_sort_keys.pop();
ASSERT(key != 0);
if ((u32)m_sort_key_to_mesh_map.size() < key + 1)
m_sort_key_to_mesh_map.resize(key + 1);
m_sort_key_to_mesh_map[key] = mesh;
return key;
}
++m_max_sort_key;
const u32 key = m_max_sort_key;
ASSERT(key != 0);
if ((u32)m_sort_key_to_mesh_map.size() < key + 1)
m_sort_key_to_mesh_map.resize(key + 1);
m_sort_key_to_mesh_map[key] = mesh;
return key;
}
void freeSortKey(u32 key) override {
if (key != 0) {
m_free_sort_keys.push(key);
}
}
2018-08-09 22:35:00 +02:00
2021-02-13 15:39:02 +01:00
u32 getMaxSortKey() const override {
return m_max_sort_key;
}
2020-11-26 20:25:14 +01:00
gpu::TextureHandle createTexture(u32 w, u32 h, u32 depth, gpu::TextureFormat format, gpu::TextureFlags flags, const MemRef& memory, const char* debug_name) override
2018-07-08 18:16:16 +02:00
{
2019-10-24 21:53:19 +02:00
gpu::TextureHandle handle = gpu::allocTextureHandle();
2020-10-01 23:15:43 +02:00
if(!handle) return handle;
2018-07-11 23:35:34 +02:00
DrawStream& stream = getDrawStream();
2022-11-08 22:04:52 +01:00
stream.createTexture(handle, w, h, depth, format, flags, debug_name);
if (memory.data && memory.size) {
ASSERT(depth == 1);
stream.update(handle, 0, 0, 0, 0, w, h, format, memory.data, memory.size);
if (u32(flags & gpu::TextureFlags::NO_MIPS) == 0) stream.generateMipmaps(handle);
}
if (memory.own) stream.freeMemory(memory.data, m_allocator);
2018-07-11 23:35:34 +02:00
return handle;
2018-07-08 18:16:16 +02:00
}
void setupJob(void* user_ptr, void(*task)(void*)) override {
jobs::run(user_ptr, task, &m_cpu_frame->setup_done);
2018-07-08 18:16:16 +02:00
}
2020-10-07 19:19:16 +02:00
void addPlugin(RenderPlugin& plugin) override {
m_plugins.push(&plugin);
}
void removePlugin(RenderPlugin& plugin) override {
m_plugins.eraseItem(&plugin);
}
Span<RenderPlugin*> getPlugins() override { return m_plugins; }
2018-07-08 18:16:16 +02:00
2019-06-26 18:52:52 +02:00
ResourceManager& getTextureManager() override { return m_texture_manager; }
2018-01-08 13:08:11 +01:00
FontManager& getFontManager() override { return *m_font_manager; }
2015-12-09 17:12:59 +01:00
2023-04-28 17:26:19 +02:00
void createModules(World& world) override
{
2023-04-28 17:26:19 +02:00
UniquePtr<RenderModule> module = RenderModule::createInstance(*this, m_engine, world, m_allocator);
world.addModule(module.move());
}
DrawStream& getDrawStream() override {
wait(&m_cpu_frame->can_setup);
return m_cpu_frame->draw_stream;
2020-10-14 21:13:29 +02:00
}
2022-11-30 21:32:05 +01:00
DrawStream& getEndFrameDrawStream() override {
wait(&m_cpu_frame->can_setup);
return m_cpu_frame->end_frame_draw_stream;
}
2015-11-22 00:31:23 +01:00
const char* getName() const override { return "renderer"; }
2015-11-20 16:58:10 +01:00
Engine& getEngine() override { return m_engine; }
2017-02-22 15:12:36 +01:00
int getShaderDefinesCount() const override { return m_shader_defines.size(); }
2018-07-01 18:13:44 +02:00
const char* getShaderDefine(int define_idx) const override { return m_shader_defines[define_idx]; }
2019-06-13 17:26:52 +02:00
2023-09-15 16:23:35 +02:00
gpu::ProgramHandle queueShaderCompile(Shader& shader, const ShaderKey& key, gpu::VertexDecl decl) override {
ASSERT(shader.isReady());
jobs::MutexGuard lock(m_cpu_frame->shader_mutex);
for (const auto& i : m_cpu_frame->to_compile_shaders) {
2023-09-15 16:23:35 +02:00
if (i.shader == &shader && key == i.key) {
return i.program;
}
}
2019-10-24 21:53:19 +02:00
gpu::ProgramHandle program = gpu::allocProgramHandle();
2023-09-15 16:23:35 +02:00
shader.compile(program, key, decl, m_cpu_frame->begin_frame_draw_stream);
m_cpu_frame->to_compile_shaders.push({&shader, decl, program, key});
return program;
}
2015-09-05 21:36:34 +02:00
2016-11-20 17:27:41 +01:00
u8 getShaderDefineIdx(const char* define) override
2015-09-05 13:08:47 +02:00
{
jobs::MutexGuard lock(m_shader_defines_mutex);
2015-09-05 13:08:47 +02:00
for (int i = 0; i < m_shader_defines.size(); ++i)
{
2016-04-28 10:14:47 +02:00
if (m_shader_defines[i] == define)
2015-09-05 13:08:47 +02:00
{
return i;
}
}
2018-06-16 19:16:01 +02:00
if (m_shader_defines.size() >= MAX_SHADER_DEFINES) {
ASSERT(false);
logError("Too many shader defines.");
2018-06-16 19:16:01 +02:00
}
2016-04-28 10:14:47 +02:00
m_shader_defines.emplace(define);
2019-08-21 21:11:51 +02:00
ASSERT(m_shader_defines.size() <= 32); // m_shader_defines are reserved in renderer constructor, so getShaderDefine() is MT safe
2019-12-20 19:25:33 +01:00
return u8(m_shader_defines.size() - 1);
2015-09-05 13:08:47 +02:00
}
2019-10-17 18:10:38 +02:00
void render() {
2022-02-13 20:43:05 +01:00
jobs::MutexGuard guard(m_render_mutex);
2022-02-13 14:19:17 +01:00
FrameData* next_frame = m_frames[(getFrameIndex(m_gpu_frame) + 1) % lengthOf(m_frames)].get();
2022-01-29 12:53:26 +01:00
2022-02-13 14:19:17 +01:00
if (next_frame->gpu_frame != 0xffFFffFF && gpu::frameFinished(next_frame->gpu_frame)) {
2023-08-03 22:57:29 +02:00
next_frame->gpu_frame = 0xFFffFFff;
2022-02-13 14:19:17 +01:00
next_frame->transient_buffer.renderDone();
next_frame->uniform_buffer.renderDone();
jobs::setGreen(&next_frame->can_setup);
2022-01-29 12:53:26 +01:00
}
2022-02-13 14:19:17 +01:00
2019-10-17 18:10:38 +02:00
FrameData& frame = *m_gpu_frame;
2022-02-13 14:19:17 +01:00
profiler::pushInt("GPU Frame", getFrameIndex(m_gpu_frame));
frame.transient_buffer.prepareToRender();
2022-01-23 11:39:18 +01:00
frame.uniform_buffer.prepareToRender();
2019-10-17 18:10:38 +02:00
2019-11-25 16:35:42 +01:00
gpu::MemoryStats mem_stats;
2021-03-09 21:31:55 +01:00
if (gpu::getMemoryStats(mem_stats)) {
2022-02-18 00:14:53 +01:00
//static u32 total_counter = profiler::createCounter("Total GPU memory (MB)", 0);
static u32 available_counter = profiler::createCounter("Available GPU memory (MB)", 0);
//static u32 dedicated_counter = profiler::createCounter("Dedicate Vid memory (MB)", 0);
static u32 buffer_counter = profiler::createCounter("Buffer memory (MB)", 0);
static u32 texture_counter = profiler::createCounter("Texture memory (MB)", 0);
2023-07-26 00:31:04 +02:00
static u32 rt_counter = profiler::createCounter("Render target memory (MB)", 0);
2022-02-18 00:14:53 +01:00
auto to_MB = [](u64 B){
return float(double(B) / (1024.0 * 1024.0));
};
//profiler::pushCounter(total_counter, to_MB(mem_stats.total_available_mem));
profiler::pushCounter(available_counter, to_MB(mem_stats.current_available_mem));
//profiler::pushCounter(dedicated_counter, to_MB(mem_stats.dedicated_vidmem));
profiler::pushCounter(buffer_counter, to_MB(mem_stats.buffer_mem));
profiler::pushCounter(texture_counter, to_MB(mem_stats.texture_mem));
2023-07-26 00:31:04 +02:00
profiler::pushCounter(rt_counter, to_MB(mem_stats.render_target_mem));
2019-11-25 16:35:42 +01:00
}
m_profiler.beginQuery("frame", 0, false);
frame.begin_frame_draw_stream.run();
frame.begin_frame_draw_stream.reset();
frame.draw_stream.run();
frame.draw_stream.reset();
2022-11-30 21:32:05 +01:00
frame.end_frame_draw_stream.run();
frame.end_frame_draw_stream.reset();
frame.linear_allocator.reset();
2022-01-21 17:40:00 +01:00
m_profiler.endQuery();
2019-07-04 13:31:48 +02:00
2020-12-25 18:11:09 +01:00
jobs::enableBackupWorker(true);
2022-02-13 14:19:17 +01:00
FrameData* prev_frame = m_frames[(getFrameIndex(m_gpu_frame) + lengthOf(m_frames) - 1) % lengthOf(m_frames)].get();
if (prev_frame->gpu_frame != 0xffFFffFF && gpu::frameFinished(prev_frame->gpu_frame)) {
2023-08-03 22:57:29 +02:00
prev_frame->gpu_frame = 0xFFffFFff;
2022-02-13 14:19:17 +01:00
prev_frame->transient_buffer.renderDone();
prev_frame->uniform_buffer.renderDone();
jobs::setGreen(&prev_frame->can_setup);
}
{
PROFILE_BLOCK("swap buffers");
frame.gpu_frame = gpu::swapBuffers();
}
2023-08-03 22:57:29 +02:00
if (frame.gpu_frame != 0xffFFffFF && gpu::frameFinished(frame.gpu_frame)) {
frame.gpu_frame = 0xFFffFFff;
frame.transient_buffer.renderDone();
frame.uniform_buffer.renderDone();
jobs::setGreen(&frame.can_setup);
}
2020-12-25 18:11:09 +01:00
jobs::enableBackupWorker(false);
2019-10-16 22:36:33 +02:00
m_profiler.frame();
2020-10-16 19:56:08 +02:00
m_gpu_frame = m_frames[(getFrameIndex(m_gpu_frame) + 1) % lengthOf(m_frames)].get();
2020-09-29 19:56:03 +02:00
if (m_gpu_frame->gpu_frame != 0xffFFffFF) {
gpu::waitFrame(m_gpu_frame->gpu_frame);
2023-08-03 22:57:29 +02:00
m_gpu_frame->gpu_frame = 0xFFffFFff;
2020-09-29 19:56:03 +02:00
m_gpu_frame->transient_buffer.renderDone();
2022-01-23 11:39:18 +01:00
m_gpu_frame->uniform_buffer.renderDone();
jobs::setGreen(&m_gpu_frame->can_setup);
2020-09-29 19:56:03 +02:00
}
2019-10-16 22:36:33 +02:00
}
2022-02-11 00:10:04 +01:00
2022-12-10 16:19:39 +01:00
LinearAllocator& getCurrentFrameAllocator() override { return m_cpu_frame->linear_allocator; }
2018-09-05 20:45:06 +02:00
2019-09-29 15:02:27 +02:00
void waitForCommandSetup() override
{
jobs::wait(&m_cpu_frame->setup_done);
2019-09-29 15:02:27 +02:00
}
2022-02-13 14:19:17 +01:00
void waitCanSetup() override
{
jobs::wait(&m_cpu_frame->can_setup);
}
2019-10-19 00:23:56 +02:00
void waitForRender() override {
jobs::wait(&m_last_render);
2019-10-19 00:23:56 +02:00
}
2020-10-16 19:56:08 +02:00
i32 getFrameIndex(FrameData* frame) const {
for (i32 i = 0; i < (i32)lengthOf(m_frames); ++i) {
if (frame == m_frames[i].get()) return i;
}
ASSERT(false);
return -1;
}
u32 frameNumber() const override { return m_cpu_frame->frame_number; }
2019-05-25 13:03:00 +02:00
void frame() override
2018-07-11 23:35:34 +02:00
{
2019-05-25 13:03:00 +02:00
PROFILE_FUNCTION();
2019-07-04 13:31:48 +02:00
// we have to wait for `can_setup` in case somebody calls frame() several times in a row
jobs::wait(&m_cpu_frame->can_setup);
jobs::wait(&m_cpu_frame->setup_done);
2022-11-08 22:04:52 +01:00
m_cpu_frame->draw_stream.useProgram(gpu::INVALID_PROGRAM);
m_cpu_frame->draw_stream.bindIndexBuffer(gpu::INVALID_BUFFER);
m_cpu_frame->draw_stream.bindVertexBuffer(0, gpu::INVALID_BUFFER, 0, 0);
m_cpu_frame->draw_stream.bindVertexBuffer(1, gpu::INVALID_BUFFER, 0, 0);
2022-11-08 22:04:52 +01:00
for (u32 i = 0; i < (u32)UniformBuffer::COUNT; ++i) {
m_cpu_frame->draw_stream.bindUniformBuffer(i, gpu::INVALID_BUFFER, 0, 0);
2022-11-08 22:04:52 +01:00
}
for (const auto& i : m_cpu_frame->to_compile_shaders) {
2023-09-15 16:23:35 +02:00
i.shader->m_programs.push({i.key, i.program});
}
2022-11-08 22:04:52 +01:00
m_cpu_frame->to_compile_shaders.clear();
2022-02-18 00:14:53 +01:00
u32 frame_data_mem = 0;
for (const Local<FrameData>& fd : m_frames) {
frame_data_mem += fd->linear_allocator.getCommitedBytes();
2022-02-18 00:14:53 +01:00
}
static u32 frame_data_counter = profiler::createCounter("Render frame data (kB)", 0);
profiler::pushCounter(frame_data_counter, float(double(frame_data_mem) / 1024.0));
jobs::setRed(&m_cpu_frame->can_setup);
2020-10-16 19:56:08 +02:00
m_cpu_frame = m_frames[(getFrameIndex(m_cpu_frame) + 1) % lengthOf(m_frames)].get();
++m_frame_number;
m_cpu_frame->frame_number = m_frame_number;
2023-09-22 15:12:56 +02:00
for (RenderPlugin* plugin : m_plugins) {
plugin->frame(*this);
}
jobs::runLambda([this](){
render();
}, &m_last_render, 1);
}
Engine& m_engine;
TagAllocator m_allocator;
2019-07-04 13:56:05 +02:00
Array<StaticString<32>> m_shader_defines;
2022-02-13 20:43:05 +01:00
jobs::Mutex m_render_mutex;
jobs::Mutex m_shader_defines_mutex;
2018-10-13 15:08:58 +02:00
Array<StaticString<32>> m_layers;
2018-01-08 13:08:11 +01:00
FontManager* m_font_manager;
MaterialManager m_material_manager;
2019-06-12 23:38:06 +02:00
RenderResourceManager<Model> m_model_manager;
2023-07-04 01:09:54 +02:00
RenderResourceManager<ParticleSystemResource> m_particle_emitter_manager;
2019-06-12 23:38:06 +02:00
RenderResourceManager<Shader> m_shader_manager;
2019-06-26 18:52:52 +02:00
RenderResourceManager<Texture> m_texture_manager;
2021-02-13 15:39:02 +01:00
Array<u32> m_free_sort_keys;
Array<const Mesh*> m_sort_key_to_mesh_map;
u32 m_max_sort_key = 0;
u32 m_frame_number = 0;
2022-04-15 18:17:18 +02:00
float m_lod_multiplier = 1;
2023-08-04 01:50:28 +02:00
jobs::Signal m_init_signal;
2023-09-15 16:23:35 +02:00
HashMap<RuntimeHash, String> m_semantic_defines;
2019-10-16 22:36:33 +02:00
2020-10-07 19:19:16 +02:00
Array<RenderPlugin*> m_plugins;
2020-10-16 19:56:08 +02:00
Local<FrameData> m_frames[3];
2019-10-16 22:36:33 +02:00
FrameData* m_gpu_frame = nullptr;
2019-10-17 18:10:38 +02:00
FrameData* m_cpu_frame = nullptr;
jobs::Signal m_last_render;
2019-10-16 22:36:33 +02:00
GPUProfiler m_profiler;
2019-08-07 21:21:31 +02:00
struct MaterialBuffer {
2020-02-20 01:49:21 +01:00
MaterialBuffer(IAllocator& alloc)
: map(alloc)
, data(alloc)
{}
struct Data {
2022-03-12 00:45:40 +01:00
Data() {}
2020-02-20 01:49:21 +01:00
u32 ref_count;
union {
2022-03-12 00:45:40 +01:00
RuntimeHash hash;
2020-02-20 01:49:21 +01:00
u32 next_free;
};
};
2020-08-31 22:05:33 +02:00
gpu::BufferHandle buffer = gpu::INVALID_BUFFER;
2020-02-20 01:49:21 +01:00
Array<Data> data;
int first_free;
2022-03-12 00:45:40 +01:00
HashMap<RuntimeHash, u32> map;
2019-08-07 21:21:31 +02:00
} m_material_buffer;
};
2018-07-11 23:35:34 +02:00
FrameData::FrameData(struct RendererImpl& renderer, IAllocator& allocator, PageAllocator& page_allocator)
: renderer(renderer)
, to_compile_shaders(allocator)
, linear_allocator(1024 * 1024 * 64)
, draw_stream(renderer)
, begin_frame_draw_stream(renderer)
2022-11-30 21:32:05 +01:00
, end_frame_draw_stream(renderer)
{}
2018-07-11 23:35:34 +02:00
2023-08-04 01:50:28 +02:00
LUMIX_PLUGIN_ENTRY(renderer) {
PROFILE_FUNCTION();
return LUMIX_NEW(engine.getAllocator(), RendererImpl)(engine);
}
2014-11-16 19:31:51 +01:00
2016-02-14 21:02:09 +01:00
} // namespace Lumix
2015-08-17 23:45:26 +02:00