Compare commits

..

No commits in common. "8fd24a3c1a93a676a3710fcfd9226eeec268a7c2" and "068656ddc2ea80b117d2e6a273783e991b28db90" have entirely different histories.

11 changed files with 116 additions and 79 deletions

View File

@ -12,11 +12,12 @@
// Helper for logging location info, e.g. LOG << LOCATION(from) // Helper for logging location info, e.g. LOG << LOCATION(from)
#define LOCATION(from) \ #define LOCATION(from) \
std::get<0>(from) << "() [" << [](std::string path) -> std::string { \ std::get<0>(from) << "() [" << [](const char* path) -> std::string { \
size_t last_slash_pos = path.find_last_of("\\/"); \ std::string file_name(path); \
size_t last_slash_pos = file_name.find_last_of("\\/"); \
if (last_slash_pos != std::string::npos) \ if (last_slash_pos != std::string::npos) \
path = path.substr(last_slash_pos + 1); \ file_name = file_name.substr(last_slash_pos + 1); \
return path; \ return file_name; \
}(std::get<1>(from)) << ":" \ }(std::get<1>(from)) << ":" \
<< std::get<2>(from) << "]" << std::get<2>(from) << "]"
@ -35,7 +36,7 @@ using Closure = std::function<void()>;
// Provides location info (function name, file name and line number) where of a // Provides location info (function name, file name and line number) where of a
// Closure was constructed. // Closure was constructed.
using Location = std::tuple<std::string, std::string, int>; using Location = std::tuple<const char*, const char*, int>;
#else #else

View File

@ -4,14 +4,12 @@
#include "base/log.h" #include "base/log.h"
namespace base {
namespace { namespace {
void PostTaskAndReplyRelay(Location from, void PostTaskAndReplyRelay(base::Location from,
Closure task_cb, base::Closure task_cb,
Closure reply_cb, base::Closure reply_cb,
std::shared_ptr<TaskRunner> destination) { base::TaskRunner* destination) {
task_cb(); task_cb();
if (reply_cb) if (reply_cb)
@ -20,23 +18,21 @@ void PostTaskAndReplyRelay(Location from,
} // namespace } // namespace
// The task runner that belongs to the thread it's created in. Tasks to be run namespace base {
// on a specific thread can be posted to this task runner.
// TaskRunner::GetThreadLocalTaskRunner()->RunTasks() is expected to be thread_local std::unique_ptr<TaskRunner> TaskRunner::thread_local_task_runner;
// periodically called.
thread_local std::shared_ptr<TaskRunner> TaskRunner::thread_local_task_runner;
void TaskRunner::CreateThreadLocalTaskRunner() { void TaskRunner::CreateThreadLocalTaskRunner() {
DCHECK(!thread_local_task_runner); DCHECK(!thread_local_task_runner);
thread_local_task_runner = std::make_shared<TaskRunner>(); thread_local_task_runner = std::make_unique<TaskRunner>();
} }
std::shared_ptr<TaskRunner> TaskRunner::GetThreadLocalTaskRunner() { TaskRunner* TaskRunner::GetThreadLocalTaskRunner() {
return thread_local_task_runner; return thread_local_task_runner.get();
} }
void TaskRunner::PostTask(Location from, Closure task) { void TaskRunner::PostTask(const Location& from, Closure task) {
DCHECK(task) << LOCATION(from); DCHECK(task) << LOCATION(from);
task_count_.fetch_add(1, std::memory_order_relaxed); task_count_.fetch_add(1, std::memory_order_relaxed);
@ -44,28 +40,19 @@ void TaskRunner::PostTask(Location from, Closure task) {
queue_.emplace_back(from, std::move(task)); queue_.emplace_back(from, std::move(task));
} }
void TaskRunner::PostTaskAndReply(Location from, Closure task, Closure reply) { void TaskRunner::PostTaskAndReply(const Location& from,
Closure task,
Closure reply) {
DCHECK(task) << LOCATION(from); DCHECK(task) << LOCATION(from);
DCHECK(reply) << LOCATION(from); DCHECK(reply) << LOCATION(from);
DCHECK(thread_local_task_runner) << LOCATION(from); DCHECK(thread_local_task_runner) << LOCATION(from);
auto relay = std::bind(PostTaskAndReplyRelay, from, std::move(task), auto relay = std::bind(::PostTaskAndReplyRelay, from, std::move(task),
std::move(reply), thread_local_task_runner); std::move(reply), thread_local_task_runner.get());
PostTask(from, std::move(relay)); PostTask(from, std::move(relay));
} }
void TaskRunner::CancelTasks() { void TaskRunner::MultiConsumerRun() {
std::lock_guard<std::mutex> scoped_lock(lock_);
task_count_.fetch_sub(queue_.size(), std::memory_order_release);
queue_.clear();
}
void TaskRunner::WaitForCompletion() {
while (task_count_.load(std::memory_order_acquire) > 0)
std::this_thread::yield();
}
void TaskRunner::RunTasks() {
for (;;) { for (;;) {
Task task; Task task;
{ {
@ -84,7 +71,55 @@ void TaskRunner::RunTasks() {
task_cb(); task_cb();
task_count_.fetch_sub(1, std::memory_order_release); task_count_.fetch_sub(1, std::memory_order_release);
if (cancel_tasks_.load(std::memory_order_relaxed)) {
CancelTasksInternal();
break;
}
} }
} }
void TaskRunner::SingleConsumerRun() {
std::deque<Task> queue;
{
std::lock_guard<std::mutex> scoped_lock(lock_);
if (queue_.empty())
return;
queue.swap(queue_);
}
while (!queue.empty()) {
auto [from, task_cb] = queue.front();
queue.pop_front();
#if 0
LOG << __func__ << " from: " << LOCATION(from);
#endif
task_cb();
task_count_.fetch_sub(1, std::memory_order_release);
if (cancel_tasks_.load(std::memory_order_relaxed)) {
CancelTasksInternal();
break;
}
}
}
void TaskRunner::CancelTasks() {
cancel_tasks_.store(true, std::memory_order_relaxed);
}
void TaskRunner::WaitForCompletion() {
while (task_count_.load(std::memory_order_acquire) > 0)
std::this_thread::yield();
}
void TaskRunner::CancelTasksInternal() {
cancel_tasks_.store(false, std::memory_order_relaxed);
task_count_.store(0, std::memory_order_relaxed);
std::lock_guard<std::mutex> scoped_lock(lock_);
queue_.clear();
}
} // namespace base } // namespace base

View File

@ -26,7 +26,7 @@ void ReturnAsParamAdapter(std::function<ReturnType()> func,
template <typename ReturnType> template <typename ReturnType>
void ReplyAdapter(std::function<void(ReturnType)> callback, void ReplyAdapter(std::function<void(ReturnType)> callback,
ReturnType* result) { ReturnType* result) {
callback(std::move(*result)); callback(*result);
delete result; delete result;
} }
@ -34,23 +34,20 @@ void ReplyAdapter(std::function<void(ReturnType)> callback,
// Runs queued tasks (in the form of Closure objects). All methods are // Runs queued tasks (in the form of Closure objects). All methods are
// thread-safe and can be called on any thread. // thread-safe and can be called on any thread.
// Tasks run in FIFO order when consumed by a single thread. When consumed // Tasks run in FIFO order. When consumed concurrently by multiple threads, it
// concurrently by multiple threads, it doesn't guarantee whether tasks overlap, // doesn't guarantee whether tasks overlap, or whether they run on a particular
// or whether they run on a particular thread. // thread.
class TaskRunner { class TaskRunner {
public: public:
TaskRunner() = default; TaskRunner() = default;
~TaskRunner() = default; ~TaskRunner() = default;
static void CreateThreadLocalTaskRunner(); void PostTask(const Location& from, Closure task);
static std::shared_ptr<TaskRunner> GetThreadLocalTaskRunner();
void PostTask(Location from, Closure task); void PostTaskAndReply(const Location& from, Closure task, Closure reply);
void PostTaskAndReply(Location from, Closure task, Closure reply);
template <typename ReturnType> template <typename ReturnType>
void PostTaskAndReplyWithResult(Location from, void PostTaskAndReplyWithResult(const Location& from,
std::function<ReturnType()> task, std::function<ReturnType()> task,
std::function<void(ReturnType)> reply) { std::function<void(ReturnType)> reply) {
auto* result = new ReturnType; auto* result = new ReturnType;
@ -62,11 +59,14 @@ class TaskRunner {
result)); result));
} }
void CancelTasks(); void MultiConsumerRun();
void SingleConsumerRun();
void CancelTasks();
void WaitForCompletion(); void WaitForCompletion();
void RunTasks(); static void CreateThreadLocalTaskRunner();
static TaskRunner* GetThreadLocalTaskRunner();
private: private:
using Task = std::tuple<Location, Closure>; using Task = std::tuple<Location, Closure>;
@ -74,8 +74,11 @@ class TaskRunner {
std::deque<Task> queue_; std::deque<Task> queue_;
mutable std::mutex lock_; mutable std::mutex lock_;
std::atomic<size_t> task_count_{0}; std::atomic<size_t> task_count_{0};
std::atomic<bool> cancel_tasks_{false};
static thread_local std::shared_ptr<TaskRunner> thread_local_task_runner; static thread_local std::unique_ptr<TaskRunner> thread_local_task_runner;
void CancelTasksInternal();
TaskRunner(TaskRunner const&) = delete; TaskRunner(TaskRunner const&) = delete;
TaskRunner& operator=(TaskRunner const&) = delete; TaskRunner& operator=(TaskRunner const&) = delete;

View File

@ -40,31 +40,30 @@ void ThreadPool::Shutdown() {
threads_.clear(); threads_.clear();
} }
void ThreadPool::PostTask(Location from, Closure task) { void ThreadPool::PostTask(const Location& from, Closure task) {
DCHECK((!threads_.empty())); DCHECK((!threads_.empty()));
task_runner_.PostTask(from, std::move(task)); task_runner_.PostTask(from, std::move(task));
semaphore_.release(); semaphore_.release();
} }
void ThreadPool::PostTaskAndReply(Location from, Closure task, Closure reply) { void ThreadPool::PostTaskAndReply(const Location& from,
Closure task,
Closure reply) {
DCHECK((!threads_.empty())); DCHECK((!threads_.empty()));
task_runner_.PostTaskAndReply(from, std::move(task), std::move(reply)); task_runner_.PostTaskAndReply(from, std::move(task), std::move(reply));
semaphore_.release(); semaphore_.release();
} }
void ThreadPool::CancelTasks() {
task_runner_.CancelTasks();
}
void ThreadPool::WorkerMain() { void ThreadPool::WorkerMain() {
for (;;) { for (;;) {
semaphore_.acquire(); semaphore_.acquire();
if (quit_.load(std::memory_order_relaxed)) if (quit_.load(std::memory_order_relaxed))
return; return;
task_runner_.RunTasks(); task_runner_.MultiConsumerRun();
} }
} }

View File

@ -11,6 +11,8 @@
namespace base { namespace base {
class TaskRunner;
// Feed the ThreadPool tasks (in the form of Closure objects) and they will be // Feed the ThreadPool tasks (in the form of Closure objects) and they will be
// called on any thread from the pool. // called on any thread from the pool.
class ThreadPool { class ThreadPool {
@ -24,12 +26,12 @@ class ThreadPool {
void Shutdown(); void Shutdown();
void PostTask(Location from, Closure task); void PostTask(const Location& from, Closure task);
void PostTaskAndReply(Location from, Closure task, Closure reply); void PostTaskAndReply(const Location& from, Closure task, Closure reply);
template <typename ReturnType> template <typename ReturnType>
void PostTaskAndReplyWithResult(Location from, void PostTaskAndReplyWithResult(const Location& from,
std::function<ReturnType()> task, std::function<ReturnType()> task,
std::function<void(ReturnType)> reply) { std::function<void(ReturnType)> reply) {
task_runner_.PostTaskAndReplyWithResult(from, std::move(task), task_runner_.PostTaskAndReplyWithResult(from, std::move(task),
@ -37,8 +39,6 @@ class ThreadPool {
semaphore_.release(); semaphore_.release();
} }
void CancelTasks();
private: private:
std::vector<std::thread> threads_; std::vector<std::thread> threads_;

View File

@ -201,6 +201,7 @@ bool Menu::Initialize() {
: RendererType::kOpenGL); : RendererType::kOpenGL);
renderer_type_.SetEnabled( renderer_type_.SetEnabled(
(Engine::Get().GetRendererType() == RendererType::kVulkan)); (Engine::Get().GetRendererType() == RendererType::kVulkan));
Engine::Get().ConsumeInputEvents();
}, },
true, Engine::Get().GetRendererType() == RendererType::kVulkan, true, Engine::Get().GetRendererType() == RendererType::kVulkan,
kColorFadeOut, {Vector4f{1, 1, 1, 1}, Vector4f{1, 1, 1, 1}}); kColorFadeOut, {Vector4f{1, 1, 1, 1}, Vector4f{1, 1, 1, 1}});

View File

@ -16,6 +16,7 @@ class TaskRunner;
namespace eng { namespace eng {
class AudioSink;
class AudioBus; class AudioBus;
// Mix and render audio with low overhead. A platform specific AudioSink // Mix and render audio with low overhead. A platform specific AudioSink
@ -84,13 +85,13 @@ class AudioMixer : public AudioSink::Delegate {
std::list<std::shared_ptr<Resource>> end_list_; std::list<std::shared_ptr<Resource>> end_list_;
std::shared_ptr<base::TaskRunner> main_thread_task_runner_; base::TaskRunner* main_thread_task_runner_;
std::unique_ptr<AudioSink> audio_sink_; std::unique_ptr<AudioSink> audio_sink_;
bool audio_enabled_ = true; bool audio_enabled_ = true;
// AudioSink::Delegate interface // AudioSink::Delegate implementation
int GetChannelCount() final { return kChannelCount; } int GetChannelCount() final { return kChannelCount; }
void RenderAudio(float* output_buffer, size_t num_frames) final; void RenderAudio(float* output_buffer, size_t num_frames) final;

View File

@ -50,9 +50,6 @@ Engine::Engine(Platform* platform)
Engine::~Engine() { Engine::~Engine() {
LOG << "Shutting down engine."; LOG << "Shutting down engine.";
thread_pool_.CancelTasks();
thread_pool_.Shutdown();
game_.reset(); game_.reset();
stats_.reset(); stats_.reset();
textures_.clear(); textures_.clear();
@ -76,7 +73,7 @@ void Engine::Run() {
float frame_frac = 0.0f; float frame_frac = 0.0f;
for (;;) { for (;;) {
TaskRunner::GetThreadLocalTaskRunner()->RunTasks(); TaskRunner::GetThreadLocalTaskRunner()->SingleConsumerRun();
platform_->Update(); platform_->Update();
if (platform_->should_exit()) if (platform_->should_exit())
@ -293,14 +290,12 @@ void Engine::RefreshImage(const std::string& asset_name) {
return; return;
} }
if (it->second.persistent || it->second.use_count > 0) {
auto image = it->second.create_image(); auto image = it->second.create_image();
if (image) if (image)
it->second.texture->Update(std::move(image)); it->second.texture->Update(std::move(image));
else else
it->second.texture->Destroy(); it->second.texture->Destroy();
} }
}
Texture* Engine::AcquireTexture(const std::string& asset_name) { Texture* Engine::AcquireTexture(const std::string& asset_name) {
auto it = textures_.find(asset_name); auto it = textures_.find(asset_name);
@ -309,9 +304,9 @@ Texture* Engine::AcquireTexture(const std::string& asset_name) {
return nullptr; return nullptr;
} }
it->second.use_count++;
if (!it->second.texture->IsValid()) if (!it->second.texture->IsValid())
RefreshImage(it->first); RefreshImage(it->first);
it->second.use_count++;
return it->second.texture.get(); return it->second.texture.get();
} }
@ -401,6 +396,10 @@ std::unique_ptr<InputEvent> Engine::GetNextInputEvent() {
return event; return event;
} }
void Engine::ConsumeInputEvents() {
input_queue_.clear();
}
void Engine::StartRecording(const Json::Value& payload) { void Engine::StartRecording(const Json::Value& payload) {
if (!replaying_ && !recording_) { if (!replaying_ && !recording_) {
recording_ = true; recording_ = true;
@ -636,8 +635,6 @@ void Engine::ContextLost() {
if (game_) if (game_)
game_->ContextLost(); game_->ContextLost();
input_queue_.clear();
} }
void Engine::SetStatsVisible(bool visible) { void Engine::SetStatsVisible(bool visible) {

View File

@ -84,6 +84,7 @@ class Engine : public PlatformObserver {
void RemoveCustomShader(const std::string& asset_name); void RemoveCustomShader(const std::string& asset_name);
std::unique_ptr<InputEvent> GetNextInputEvent(); std::unique_ptr<InputEvent> GetNextInputEvent();
void ConsumeInputEvents();
void StartRecording(const Json::Value& payload); void StartRecording(const Json::Value& payload);
void EndRecording(const std::string file_name); void EndRecording(const std::string file_name);

View File

@ -144,7 +144,7 @@ class RendererOpenGL final : public Renderer {
std::counting_semaphore<> draw_complete_semaphore_{0}; std::counting_semaphore<> draw_complete_semaphore_{0};
std::shared_ptr<base::TaskRunner> main_thread_task_runner_; base::TaskRunner* main_thread_task_runner_;
#endif // THREADED_RENDERING #endif // THREADED_RENDERING
// Stats. // Stats.

View File

@ -987,7 +987,6 @@ void RendererVulkan::Shutdown() {
return; return;
LOG << "Shutting down renderer."; LOG << "Shutting down renderer.";
task_runner_.CancelTasks();
quit_.store(true, std::memory_order_relaxed); quit_.store(true, std::memory_order_relaxed);
semaphore_.release(); semaphore_.release();
setup_thread_.join(); setup_thread_.join();
@ -2006,7 +2005,7 @@ void RendererVulkan::SetupThreadMain(int preallocate) {
if (quit_.load(std::memory_order_relaxed)) if (quit_.load(std::memory_order_relaxed))
break; break;
task_runner_.RunTasks(); task_runner_.SingleConsumerRun();
} }
for (size_t i = 0; i < staging_buffers_.size(); i++) { for (size_t i = 0; i < staging_buffers_.size(); i++) {