Gpu: use an std mutex instead of a spin_lock to guard syncpoints

This commit is contained in:
Fernando Sahmkow 2019-06-08 16:45:25 -04:00 committed by FernandoS27
parent 78add28aab
commit c13433aee4
2 changed files with 6 additions and 6 deletions

View File

@ -69,7 +69,7 @@ const DmaPusher& GPU::DmaPusher() const {
void GPU::IncrementSyncPoint(const u32 syncpoint_id) {
syncpoints[syncpoint_id]++;
sync_guard.lock();
sync_mutex.lock();
if (!events[syncpoint_id].empty()) {
u32 value = syncpoints[syncpoint_id].load();
auto it = events[syncpoint_id].begin();
@ -82,7 +82,7 @@ void GPU::IncrementSyncPoint(const u32 syncpoint_id) {
it++;
}
}
sync_guard.unlock();
sync_mutex.unlock();
}
u32 GPU::GetSyncpointValue(const u32 syncpoint_id) const {

View File

@ -8,11 +8,11 @@
#include <atomic>
#include <list>
#include <memory>
#include <mutex>
#include "common/common_types.h"
#include "core/hle/service/nvdrv/nvdata.h"
#include "core/hle/service/nvflinger/buffer_queue.h"
#include "video_core/dma_pusher.h"
#include "common/spin_lock.h"
using CacheAddr = std::uintptr_t;
inline CacheAddr ToCacheAddr(const void* host_ptr) {
@ -178,9 +178,9 @@ public:
void Guard(bool guard_set) {
if (guard_set) {
sync_guard.lock();
sync_mutex.lock();
} else {
sync_guard.unlock();
sync_mutex.unlock();
}
}
@ -297,7 +297,7 @@ private:
std::array<std::list<Event>, Service::Nvidia::MaxSyncPoints> events;
Common::SpinLock sync_guard{};
std::mutex sync_mutex;
};
#define ASSERT_REG_POSITION(field_name, position) \