renderer/scheduler: --binary size by 2.2kb

replace the stl with own lightweight data structures.
This commit is contained in:
Hermet Park 2023-12-12 15:46:13 +09:00
parent 6f19c581e8
commit 79facf3656
2 changed files with 32 additions and 28 deletions

View file

@ -20,11 +20,11 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <deque>
#include <thread> #include <thread>
#include <vector>
#include <atomic> #include <atomic>
#include <condition_variable> #include <condition_variable>
#include "tvgArray.h"
#include "tvgInlist.h"
#include "tvgTaskScheduler.h" #include "tvgTaskScheduler.h"
/************************************************************************/ /************************************************************************/
@ -34,7 +34,7 @@
namespace tvg { namespace tvg {
struct TaskQueue { struct TaskQueue {
deque<Task*> taskDeque; Inlist<Task> taskDeque;
mutex mtx; mutex mtx;
condition_variable ready; condition_variable ready;
bool done = false; bool done = false;
@ -44,8 +44,6 @@ struct TaskQueue {
unique_lock<mutex> lock{mtx, try_to_lock}; unique_lock<mutex> lock{mtx, try_to_lock};
if (!lock || taskDeque.empty()) return false; if (!lock || taskDeque.empty()) return false;
*task = taskDeque.front(); *task = taskDeque.front();
taskDeque.pop_front();
return true; return true;
} }
@ -54,11 +52,9 @@ struct TaskQueue {
{ {
unique_lock<mutex> lock{mtx, try_to_lock}; unique_lock<mutex> lock{mtx, try_to_lock};
if (!lock) return false; if (!lock) return false;
taskDeque.push_back(task); taskDeque.back(task);
} }
ready.notify_one(); ready.notify_one();
return true; return true;
} }
@ -82,8 +78,6 @@ struct TaskQueue {
if (taskDeque.empty()) return false; if (taskDeque.empty()) return false;
*task = taskDeque.front(); *task = taskDeque.front();
taskDeque.pop_front();
return true; return true;
} }
@ -91,12 +85,10 @@ struct TaskQueue {
{ {
{ {
unique_lock<mutex> lock{mtx}; unique_lock<mutex> lock{mtx};
taskDeque.push_back(task); taskDeque.back(task);
} }
ready.notify_one(); ready.notify_one();
} }
}; };
@ -105,24 +97,33 @@ static thread_local bool _async = true; //toggle async tasking for each thread
struct TaskSchedulerImpl struct TaskSchedulerImpl
{ {
uint32_t threadCnt; Array<thread*> threads;
vector<thread> threads; Array<TaskQueue*> taskQueues;
vector<TaskQueue> taskQueues;
atomic<uint32_t> idx{0}; atomic<uint32_t> idx{0};
TaskSchedulerImpl(unsigned threadCnt) : threadCnt(threadCnt), taskQueues(threadCnt) TaskSchedulerImpl(unsigned threadCnt)
{ {
threads.reserve(threadCnt); threads.reserve(threadCnt);
taskQueues.reserve(threadCnt);
for (unsigned i = 0; i < threadCnt; ++i) { for (unsigned i = 0; i < threadCnt; ++i) {
threads.emplace_back([&, i] { run(i); }); taskQueues.push(new TaskQueue);
threads.push(new thread([&, i] { run(i); }));
} }
} }
~TaskSchedulerImpl() ~TaskSchedulerImpl()
{ {
for (auto& queue : taskQueues) queue.complete(); for (auto tq = taskQueues.data; tq < taskQueues.end(); ++tq) {
for (auto& thread : threads) thread.join(); (*tq)->complete();
}
for (auto thread = threads.data; thread < threads.end(); ++thread) {
(*thread)->join();
delete(*thread);
}
for (auto tq = taskQueues.data; tq < taskQueues.end(); ++tq) {
delete(*tq);
}
} }
void run(unsigned i) void run(unsigned i)
@ -132,14 +133,14 @@ struct TaskSchedulerImpl
//Thread Loop //Thread Loop
while (true) { while (true) {
auto success = false; auto success = false;
for (unsigned x = 0; x < threadCnt * 2; ++x) { for (unsigned x = 0; x < threads.count * 2; ++x) {
if (taskQueues[(i + x) % threadCnt].tryPop(&task)) { if (taskQueues[(i + x) % threads.count]->tryPop(&task)) {
success = true; success = true;
break; break;
} }
} }
if (!success && !taskQueues[i].pop(&task)) break; if (!success && !taskQueues[i]->pop(&task)) break;
(*task)(i + 1); (*task)(i + 1);
} }
} }
@ -147,13 +148,13 @@ struct TaskSchedulerImpl
void request(Task* task) void request(Task* task)
{ {
//Async //Async
if (threadCnt > 0 && _async) { if (threads.count > 0 && _async) {
task->prepare(); task->prepare();
auto i = idx++; auto i = idx++;
for (unsigned n = 0; n < threadCnt; ++n) { for (unsigned n = 0; n < threads.count; ++n) {
if (taskQueues[(i + n) % threadCnt].tryPush(task)) return; if (taskQueues[(i + n) % threads.count]->tryPush(task)) return;
} }
taskQueues[i % threadCnt].push(task); taskQueues[i % threads.count]->push(task);
//Sync //Sync
} else { } else {
task->run(0); task->run(0);
@ -192,7 +193,7 @@ void TaskScheduler::request(Task* task)
unsigned TaskScheduler::threads() unsigned TaskScheduler::threads()
{ {
if (inst) return inst->threadCnt; if (inst) return inst->threads.count;
return 0; return 0;
} }

View file

@ -26,6 +26,7 @@
#include <mutex> #include <mutex>
#include <condition_variable> #include <condition_variable>
#include "tvgCommon.h" #include "tvgCommon.h"
#include "tvgInlist.h"
namespace tvg namespace tvg
{ {
@ -50,6 +51,8 @@ private:
bool pending = false; bool pending = false;
public: public:
INLIST_ITEM(Task);
virtual ~Task() = default; virtual ~Task() = default;
void done() void done()