mirror of
https://github.com/OpenRCT2/OpenRCT2
synced 2026-01-06 06:32:56 +01:00
Tighten the use of locks in JobPool (#23533)
Should result in less lock contention, and therefore marginally faster loading times.
This commit is contained in:
@@ -12,14 +12,14 @@
|
||||
#include <cassert>
|
||||
|
||||
JobPool::TaskData::TaskData(std::function<void()> workFn, std::function<void()> completionFn)
|
||||
: WorkFn(workFn)
|
||||
, CompletionFn(completionFn)
|
||||
: WorkFn(std::move(workFn))
|
||||
, CompletionFn(std::move(completionFn))
|
||||
{
|
||||
}
|
||||
|
||||
JobPool::JobPool(size_t maxThreads)
|
||||
{
|
||||
maxThreads = std::min<size_t>(maxThreads, std::thread::hardware_concurrency());
|
||||
maxThreads = std::min<size_t>(maxThreads, std::max(1u, std::thread::hardware_concurrency()));
|
||||
for (size_t n = 0; n < maxThreads; n++)
|
||||
{
|
||||
_threads.emplace_back(&JobPool::ProcessQueue, this);
|
||||
@@ -31,8 +31,8 @@ JobPool::~JobPool()
|
||||
{
|
||||
unique_lock lock(_mutex);
|
||||
_shouldStop = true;
|
||||
_condPending.notify_all();
|
||||
}
|
||||
_condPending.notify_all();
|
||||
|
||||
for (auto& th : _threads)
|
||||
{
|
||||
@@ -43,8 +43,10 @@ JobPool::~JobPool()
|
||||
|
||||
void JobPool::AddTask(std::function<void()> workFn, std::function<void()> completionFn)
|
||||
{
|
||||
unique_lock lock(_mutex);
|
||||
_pending.emplace_back(workFn, completionFn);
|
||||
{
|
||||
unique_lock lock(_mutex);
|
||||
_pending.emplace_back(workFn, completionFn);
|
||||
}
|
||||
_condPending.notify_one();
|
||||
}
|
||||
|
||||
@@ -59,7 +61,7 @@ void JobPool::Join(std::function<void()> reportFn)
|
||||
// Dispatch all completion callbacks if there are any.
|
||||
while (!_completed.empty())
|
||||
{
|
||||
auto taskData = _completed.front();
|
||||
auto taskData = std::move(_completed.front());
|
||||
_completed.pop_front();
|
||||
|
||||
if (taskData.CompletionFn)
|
||||
@@ -89,16 +91,10 @@ void JobPool::Join(std::function<void()> reportFn)
|
||||
}
|
||||
}
|
||||
|
||||
size_t JobPool::CountPending()
|
||||
bool JobPool::IsBusy()
|
||||
{
|
||||
unique_lock lock(_mutex);
|
||||
return _pending.size();
|
||||
}
|
||||
|
||||
size_t JobPool::CountProcessing()
|
||||
{
|
||||
unique_lock lock(_mutex);
|
||||
return _processing;
|
||||
return _processing != 0 || !_pending.empty();
|
||||
}
|
||||
|
||||
void JobPool::ProcessQueue()
|
||||
@@ -113,7 +109,7 @@ void JobPool::ProcessQueue()
|
||||
{
|
||||
_processing++;
|
||||
|
||||
auto taskData = _pending.front();
|
||||
auto taskData = std::move(_pending.front());
|
||||
_pending.pop_front();
|
||||
|
||||
lock.unlock();
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <deque>
|
||||
#include <functional>
|
||||
@@ -28,8 +27,8 @@ private:
|
||||
TaskData(std::function<void()> workFn, std::function<void()> completionFn);
|
||||
};
|
||||
|
||||
std::atomic_bool _shouldStop = { false };
|
||||
std::atomic<size_t> _processing = { 0 };
|
||||
bool _shouldStop = false;
|
||||
size_t _processing = 0;
|
||||
std::vector<std::thread> _threads;
|
||||
std::deque<TaskData> _pending;
|
||||
std::deque<TaskData> _completed;
|
||||
@@ -45,8 +44,7 @@ public:
|
||||
|
||||
void AddTask(std::function<void()> workFn, std::function<void()> completionFn = nullptr);
|
||||
void Join(std::function<void()> reportFn = nullptr);
|
||||
size_t CountPending();
|
||||
size_t CountProcessing();
|
||||
bool IsBusy();
|
||||
|
||||
private:
|
||||
void ProcessQueue();
|
||||
|
||||
@@ -54,7 +54,7 @@ void PreloaderScene::Tick()
|
||||
|
||||
gInUpdateCode = false;
|
||||
|
||||
if (_jobs.CountPending() == 0 && _jobs.CountProcessing() == 0)
|
||||
if (!_jobs.IsBusy())
|
||||
{
|
||||
// Make sure the job is fully completed.
|
||||
_jobs.Join();
|
||||
|
||||
Reference in New Issue
Block a user