Added Signals; more runtime fixes

This commit is contained in:
Hamish Milne 2020-01-12 00:24:44 +00:00 committed by zhupengfei
parent e4f05884c3
commit 8abc5525be
17 changed files with 118 additions and 56 deletions

View File

@ -1504,19 +1504,12 @@ void GMainWindow::OnCheats() {
}
void GMainWindow::OnSave() {
Core::System& system{Core::System::GetInstance()};
auto fs = std::ofstream("save0.citrasave");
emu_thread->SetRunning(false);
Core::System::GetInstance().Save(fs);
emu_thread->SetRunning(true);
Core::System::GetInstance().SendSignal(Core::System::Signal::Save);
}
void GMainWindow::OnLoad() {
if (QFileInfo("save0.citrasave").exists()) {
auto fs = std::ifstream("save0.citrasave");
emu_thread->SetRunning(false);
Core::System::GetInstance().Load(fs);
emu_thread->SetRunning(true);
Core::System::GetInstance().SendSignal(Core::System::Signal::Load);
}
}

View File

@ -160,15 +160,33 @@ private:
// The priority level queues of thread ids.
std::array<Queue, NUM_QUEUES> queues;
s32 ToIndex(Queue* q) const {
if (q == nullptr) {
return -2;
} else if (q == UnlinkedTag()) {
return -1;
} else {
return static_cast<s32>(q - &queues[0]);
}
}
Queue* ToPointer(s32 idx) {
if (idx == -1) {
return UnlinkedTag();
} else if (idx < 0) {
return nullptr;
} else {
return &queues[idx];
}
}
friend class boost::serialization::access;
template <class Archive>
void save(Archive& ar, const unsigned int file_version) const {
s32 idx = first == UnlinkedTag() ? -1 : static_cast<s32>(first - &queues[0]);
s32 idx = ToIndex(first);
ar << idx;
for (auto i = 0; i < NUM_QUEUES; i++) {
s32 idx1 = first == UnlinkedTag()
? -1
: static_cast<s32>(queues[i].next_nonempty - &queues[0]);
s32 idx1 = ToIndex(queues[i].next_nonempty);
ar << idx1;
ar << queues[i].data;
}
@ -178,10 +196,10 @@ private:
void load(Archive& ar, const unsigned int file_version) {
s32 idx;
ar >> idx;
first = idx < 0 ? UnlinkedTag() : &queues[idx];
first = ToPointer(idx);
for (auto i = 0; i < NUM_QUEUES; i++) {
ar >> idx;
queues[i].next_nonempty = idx < 0 ? UnlinkedTag() : &queues[idx];
queues[i].next_nonempty = ToPointer(idx);
ar >> queues[i].data;
}
}

View File

@ -264,7 +264,7 @@ private:
ar >> r;
SetCP15Register(static_cast<CP15Register>(i), r);
}
// TODO: Clear caches etc?
ClearInstructionCache();
}
BOOST_SERIALIZATION_SPLIT_MEMBER()

View File

@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <fstream>
#include <memory>
#include <utility>
#include <boost/serialization/array.hpp>
@ -103,15 +104,38 @@ System::ResultStatus System::RunLoop(bool tight_loop) {
HW::Update();
Reschedule();
if (reset_requested.exchange(false)) {
auto signal = current_signal.exchange(Signal::None);
switch (signal) {
case Signal::Reset:
Reset();
} else if (shutdown_requested.exchange(false)) {
break;
case Signal::Shutdown:
return ResultStatus::ShutdownRequested;
break;
case Signal::Load: {
auto stream = std::ifstream("save0.citrasave", std::fstream::binary);
System::Load(stream);
} break;
case Signal::Save: {
auto stream = std::ofstream("save0.citrasave", std::fstream::binary);
System::Save(stream);
} break;
default:
break;
}
return status;
}
bool System::SendSignal(System::Signal signal) {
auto prev = System::Signal::None;
if (!current_signal.compare_exchange_strong(prev, signal)) {
LOG_ERROR(Core, "Unable to {} as {} is ongoing", signal, prev);
return false;
}
return true;
}
System::ResultStatus System::SingleStep() {
return RunLoop(false);
}
@ -216,8 +240,8 @@ System::ResultStatus System::Init(Frontend::EmuWindow& emu_window, u32 system_mo
timing = std::make_unique<Timing>();
kernel = std::make_unique<Kernel::KernelSystem>(
*memory, *timing, [this] { PrepareReschedule(); }, system_mode);
kernel = std::make_unique<Kernel::KernelSystem>(*memory, *timing,
[this] { PrepareReschedule(); }, system_mode);
if (Settings::values.use_cpu_jit) {
#ifdef ARCHITECTURE_x86_64
@ -409,6 +433,7 @@ void System::Reset() {
template <class Archive>
void System::serialize(Archive& ar, const unsigned int file_version) {
Memory::RasterizerFlushAndInvalidateRegion(0, 0xFFFFFFFF);
ar&* cpu_core.get();
ar&* service_manager.get();
ar& GPU::g_regs;
@ -436,11 +461,20 @@ void System::Save(std::ostream& stream) const {
}
void System::Load(std::istream& stream) {
{
iarchive ia{stream};
ia&* this;
try {
{
iarchive ia{stream};
ia&* this;
}
VideoCore::Load(stream);
// Flush state through:
Kernel().SetCurrentProcess(Kernel().GetCurrentProcess());
} catch (const std::exception& e) {
LOG_ERROR(Core, "Error loading: {}", e.what());
}
VideoCore::Load(stream);
}
} // namespace Core

View File

@ -116,14 +116,18 @@ public:
/// Shutdown and then load again
void Reset();
enum class Signal : u32 { None, Shutdown, Reset, Save, Load };
bool SendSignal(Signal signal);
/// Request reset of the system
void RequestReset() {
reset_requested = true;
SendSignal(Signal::Reset);
}
/// Request shutdown of the system
void RequestShutdown() {
shutdown_requested = true;
SendSignal(Signal::Shutdown);
}
/**
@ -341,8 +345,7 @@ private:
Frontend::EmuWindow* m_emu_window;
std::string m_filepath;
std::atomic<bool> reset_requested;
std::atomic<bool> shutdown_requested;
std::atomic<Signal> current_signal;
friend class boost::serialization::access;
template <typename Archive>

View File

@ -23,14 +23,10 @@ bool Timing::Event::operator<(const Event& right) const {
TimingEventType* Timing::RegisterEvent(const std::string& name, TimedCallback callback) {
// check for existing type with same name.
// we want event type names to remain unique so that we can use them for serialization.
ASSERT_MSG(event_types.find(name) == event_types.end(),
"CoreTiming Event \"{}\" is already registered. Events should only be registered "
"during Init to avoid breaking save states.",
name);
auto info = event_types.emplace(name, TimingEventType{callback, nullptr});
auto info = event_types.emplace(name, TimingEventType{});
TimingEventType* event_type = &info.first->second;
event_type->name = &info.first->first;
event_type->callback = callback;
return event_type;
}
@ -129,6 +125,10 @@ void Timing::Advance() {
Event evt = std::move(event_queue.front());
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
event_queue.pop_back();
if (event_types.find(*evt.type->name) == event_types.end()) {
LOG_ERROR(Core, "Unknown queued event");
continue;
}
evt.type->callback(evt.userdata, global_timer - evt.time);
}

View File

@ -14,6 +14,7 @@
// FileSys namespace
SERIALIZE_EXPORT_IMPL(FileSys::IVFCFile)
SERIALIZE_EXPORT_IMPL(FileSys::IVFCFileInMemory)
SERIALIZE_EXPORT_IMPL(FileSys::IVFCDelayGenerator)
SERIALIZE_EXPORT_IMPL(FileSys::RomFSDelayGenerator)
SERIALIZE_EXPORT_IMPL(FileSys::ExeFSDelayGenerator)

View File

@ -176,11 +176,23 @@ private:
std::vector<u8> romfs_file;
u64 data_offset;
u64 data_size;
IVFCFileInMemory() = default;
template <class Archive>
void serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<FileBackend>(*this);
ar& romfs_file;
ar& data_offset;
ar& data_size;
}
friend class boost::serialization::access;
};
} // namespace FileSys
BOOST_CLASS_EXPORT_KEY(FileSys::IVFCFile)
BOOST_CLASS_EXPORT_KEY(FileSys::IVFCFileInMemory)
BOOST_CLASS_EXPORT_KEY(FileSys::IVFCDelayGenerator)
BOOST_CLASS_EXPORT_KEY(FileSys::RomFSDelayGenerator)
BOOST_CLASS_EXPORT_KEY(FileSys::ExeFSDelayGenerator)

View File

@ -23,6 +23,9 @@ KernelSystem::KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
std::function<void()> prepare_reschedule_callback, u32 system_mode)
: memory(memory), timing(timing),
prepare_reschedule_callback(std::move(prepare_reschedule_callback)) {
for (auto i = 0; i < memory_regions.size(); i++) {
memory_regions[i] = std::make_shared<MemoryRegionInfo>();
}
MemoryInit(system_mode);
resource_limits = std::make_unique<ResourceLimitList>(*this);
@ -107,7 +110,7 @@ template <class Archive>
void KernelSystem::serialize(Archive& ar, const unsigned int file_version) {
ar& memory_regions;
ar& named_ports;
ar&* current_cpu.get();
// current_cpu set externally
// NB: subsystem references and prepare_reschedule_callback are constant
ar&* resource_limits.get();
ar& next_object_id;

View File

@ -134,7 +134,8 @@ public:
*/
ResultVal<std::shared_ptr<Thread>> CreateThread(std::string name, VAddr entry_point,
u32 priority, u32 arg, s32 processor_id,
VAddr stack_top, std::shared_ptr<Process> owner_process);
VAddr stack_top,
std::shared_ptr<Process> owner_process);
/**
* Creates a semaphore.
@ -232,11 +233,11 @@ public:
IPCDebugger::Recorder& GetIPCRecorder();
const IPCDebugger::Recorder& GetIPCRecorder() const;
MemoryRegionInfo* GetMemoryRegion(MemoryRegion region);
std::shared_ptr<MemoryRegionInfo> GetMemoryRegion(MemoryRegion region);
void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mapping);
std::array<MemoryRegionInfo, 3> memory_regions;
std::array<std::shared_ptr<MemoryRegionInfo>, 3> memory_regions{};
/// Adds a port to the named port table
void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port);

View File

@ -49,9 +49,9 @@ void KernelSystem::MemoryInit(u32 mem_type) {
// the sizes specified in the memory_region_sizes table.
VAddr base = 0;
for (int i = 0; i < 3; ++i) {
memory_regions[i].Reset(base, memory_region_sizes[mem_type][i]);
memory_regions[i]->Reset(base, memory_region_sizes[mem_type][i]);
base += memory_regions[i].size;
base += memory_regions[i]->size;
}
// We must've allocated the entire FCRAM by the end
@ -63,20 +63,20 @@ void KernelSystem::MemoryInit(u32 mem_type) {
// app_mem_malloc does not always match the configured size for memory_region[0]: in case the
// n3DS type override is in effect it reports the size the game expects, not the real one.
config_mem.app_mem_alloc = memory_region_sizes[mem_type][0];
config_mem.sys_mem_alloc = memory_regions[1].size;
config_mem.base_mem_alloc = memory_regions[2].size;
config_mem.sys_mem_alloc = memory_regions[1]->size;
config_mem.base_mem_alloc = memory_regions[2]->size;
shared_page_handler = std::make_shared<SharedPage::Handler>(timing);
}
MemoryRegionInfo* KernelSystem::GetMemoryRegion(MemoryRegion region) {
std::shared_ptr<MemoryRegionInfo> KernelSystem::GetMemoryRegion(MemoryRegion region) {
switch (region) {
case MemoryRegion::APPLICATION:
return &memory_regions[0];
return memory_regions[0];
case MemoryRegion::SYSTEM:
return &memory_regions[1];
return memory_regions[1];
case MemoryRegion::BASE:
return &memory_regions[2];
return memory_regions[2];
default:
UNREACHABLE();
}

View File

@ -200,7 +200,7 @@ public:
u32 memory_used = 0;
MemoryRegionInfo* memory_region = nullptr;
std::shared_ptr<MemoryRegionInfo> memory_region = nullptr;
/// The Thread Local Storage area is allocated as processes create threads,
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part

View File

@ -42,7 +42,7 @@ ResultVal<std::shared_ptr<SharedMemory>> KernelSystem::CreateSharedMemory(
if (address == 0) {
// We need to allocate a block from the Linear Heap ourselves.
// We'll manually allocate some memory from the linear heap in the specified region.
MemoryRegionInfo* memory_region = GetMemoryRegion(region);
auto memory_region = GetMemoryRegion(region);
auto offset = memory_region->LinearAllocate(size);
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
@ -79,7 +79,7 @@ std::shared_ptr<SharedMemory> KernelSystem::CreateSharedMemoryForApplet(
auto shared_memory{std::make_shared<SharedMemory>(*this)};
// Allocate memory in heap
MemoryRegionInfo* memory_region = GetMemoryRegion(MemoryRegion::SYSTEM);
auto memory_region = GetMemoryRegion(MemoryRegion::SYSTEM);
auto backing_blocks = memory_region->HeapAllocate(size);
ASSERT_MSG(!backing_blocks.empty(), "Not enough space in region to allocate shared memory!");
shared_memory->holding_memory = backing_blocks;

View File

@ -32,7 +32,7 @@ namespace Kernel {
template <class Archive>
void Thread::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<Object>(*this);
ar& boost::serialization::base_object<WaitObject>(*this);
ar&* context.get();
ar& thread_id;
ar& status;
@ -363,7 +363,7 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(
if (needs_allocation) {
// There are no already-allocated pages with free slots, lets allocate a new one.
// TLS pages are allocated from the BASE region in the linear heap.
MemoryRegionInfo* memory_region = GetMemoryRegion(MemoryRegion::BASE);
auto memory_region = GetMemoryRegion(MemoryRegion::BASE);
// Allocate some memory from the end of the linear heap for this region.
auto offset = memory_region->LinearAllocate(Memory::PAGE_SIZE);

View File

@ -824,10 +824,6 @@ std::unique_ptr<Kernel::SessionRequestHandler::SessionDataBase> GSP_GPU::MakeSes
return std::make_unique<SessionData>(this);
}
SessionData::SessionData() {
UNREACHABLE();
}
SessionData::SessionData(GSP_GPU* gsp) : gsp(gsp) {
// Assign a new thread id to this session when it connects. Note: In the real GSP service this
// is done through a real thread (svcCreateThread) but we have to simulate it since our HLE

View File

@ -187,7 +187,7 @@ class GSP_GPU;
class SessionData : public Kernel::SessionRequestHandler::SessionDataBase {
public:
SessionData();
SessionData() = default;
SessionData(GSP_GPU* gsp);
~SessionData();

View File

@ -15,6 +15,7 @@
SERVICE_CONSTRUCT_IMPL(Service::LDR::RO)
SERIALIZE_EXPORT_IMPL(Service::LDR::RO)
SERIALIZE_EXPORT_IMPL(Service::LDR::ClientSlot)
namespace Service::LDR {