common taskscheduler: revert 4db25db962

There is a report of the thread sanitizer.
It could be a false-positive as far as I reviewed,
but we hate to be bothered by it. So let's revert it.

@Issue: https://github.com/thorvg/thorvg/issues/1409
This commit is contained in:
Hermet Park 2023-05-04 13:28:10 +09:00 committed by Hermet Park
parent 51a31e226d
commit e8eef1af1d
2 changed files with 23 additions and 63 deletions

View file

@ -173,8 +173,6 @@ struct SwShapeTask : SwTask
//Clip Path
for (auto clip = clips.data; clip < (clips.data + clips.count); ++clip) {
auto clipper = static_cast<SwTask*>(*clip);
//Guarantee composition targets get ready.
clipper->done(tid);
//Clip shape rle
if (shape.rle && !clipper->clip(shape.rle)) goto err;
//Clip stroke rle
@ -225,25 +223,17 @@ struct SwSceneTask : SwTask
if (!sceneRle) sceneRle = static_cast<SwRleData*>(calloc(1, sizeof(SwRleData)));
else rleReset(sceneRle);
//Only one shape
if (scene.count == 1) {
auto clipper = static_cast<SwTask*>(*scene.data);
clipper->done(tid);
//Merge shapes if it has more than one shapes
} else {
if (scene.count > 1) {
//Merge first two clippers
auto clipper1 = static_cast<SwTask*>(*scene.data);
clipper1->done(tid);
auto clipper2 = static_cast<SwTask*>(*(scene.data + 1));
clipper2->done(tid);
rleMerge(sceneRle, clipper1->rle(), clipper2->rle());
//Unify the remained clippers
for (auto rd = scene.data + 2; rd < (scene.data + scene.count); ++rd) {
auto clipper = static_cast<SwTask*>(*rd);
clipper->done(tid);
rleMerge(sceneRle, sceneRle, clipper->rle());
}
}
@ -305,8 +295,6 @@ struct SwImageTask : SwTask
imageDelOutline(&image, mpool, tid);
for (auto clip = clips.data; clip < (clips.data + clips.count); ++clip) {
auto clipper = static_cast<SwTask*>(*clip);
//Guarantee composition targets get ready.
clipper->done(tid);
if (!clipper->clip(image.rle)) goto err;
}
return;
@ -697,6 +685,15 @@ void* SwRenderer::prepareCommon(SwTask* task, const RenderTransform* transform,
//Finish previous task if it has duplicated request.
task->done();
//TODO: Failed threading them. It would be better if it's possible.
//See: https://github.com/thorvg/thorvg/issues/1409
if (clips.count > 0) {
//Guarantee composition targets get ready.
for (auto clip = clips.data; clip < (clips.data + clips.count); ++clip) {
static_cast<SwTask*>(*clip)->done();
}
}
task->clips = clips;
if (transform) {

View file

@ -43,77 +43,40 @@ struct TaskScheduler
struct Task
{
private:
mutex finishedMtx;
mutex preparedMtx;
mutex mtx;
condition_variable cv;
bool finished = true; //if run() finished
bool prepared = false; //the task is requested
bool ready = true;
bool pending = false;
public:
virtual ~Task()
{
if (!prepared) return;
//Guarantee the task is finished by TaskScheduler.
unique_lock<mutex> lock(preparedMtx);
while (prepared) {
cv.wait(lock);
}
}
virtual ~Task() = default;
void done(unsigned tid = 0)
{
if (finished) return;
if (!pending) return;
lock_guard<mutex> lock(finishedMtx);
if (finished) return;
//the job hasn't been launched yet.
//set finished so that operator() quickly returns.
finished = true;
run(tid);
unique_lock<mutex> lock(mtx);
while (!ready) cv.wait(lock);
pending = false;
}
protected:
virtual void run(unsigned tid) = 0;
private:
void finish()
{
lock_guard<mutex> lock(preparedMtx);
prepared = false;
cv.notify_one();
}
void operator()(unsigned tid)
{
if (finished) {
finish();
return;
}
lock_guard<mutex> lock(finishedMtx);
if (finished) {
finish();
return;
}
run(tid);
finished = true;
finish();
lock_guard<mutex> lock(mtx);
ready = true;
cv.notify_one();
}
void prepare()
{
finished = false;
prepared = true;
ready = false;
pending = true;
}
friend struct TaskSchedulerImpl;