jak-project/game/system/IOP_Kernel.cpp

339 lines
8 KiB
C++
Raw Normal View History

2020-08-22 22:30:12 -04:00
#include "IOP_Kernel.h"
#include <cstring>
#include "common/util/Assert.h"
#include <common/util/FileUtil.h>
2020-08-22 22:30:12 -04:00
#include "game/sce/iop.h"
2020-08-22 22:30:12 -04:00
/*!
* Create a new thread. Will not run the thread.
*/
s32 IOP_Kernel::CreateThread(std::string name, u32 (*func)()) {
ASSERT(_currentThread == -1); // can only create thread from kernel thread.
2020-08-22 22:30:12 -04:00
u32 ID = (u32)_nextThID++;
ASSERT(ID == threads.size());
2020-08-22 22:30:12 -04:00
// add entry
threads.emplace_back(name, func, ID, this);
// setup the thread!
// printf("[IOP Kernel] SetupThread %s...\n", name.c_str());
// allow creating a "null thread" which doesn't/can't run but occupies slot 0.
2020-08-26 01:21:33 -04:00
if (func) {
2020-08-22 22:30:12 -04:00
_currentThread = ID;
// create OS thread, will run the setupThread function
threads.back().thread = new std::thread(&IOP_Kernel::setupThread, this, ID);
// wait for thread to finish setup.
threads.back().waitForReturnToKernel();
// ensure we are back in the kernel.
_currentThread = -1;
}
return ID;
}
/*!
* Start a thread. Runs it once, then marks it to run on each dispatch of the IOP kernel.
*/
void IOP_Kernel::StartThread(s32 id) {
2020-08-26 01:21:33 -04:00
threads.at(id).started = true; // mark for run
runThread(id); // run now
2020-08-22 22:30:12 -04:00
}
/*!
* Wrapper around entry for a thread.
*/
void IOP_Kernel::setupThread(s32 id) {
// printf("\tthread %s has started!\n", threads.at(id).name.c_str());
returnToKernel();
threads.at(id).waitForDispatch();
// printf("[IOP Kernel] Thread %s first dispatch!\n", threads.at(id).name.c_str());
ASSERT(_currentThread == id); // should run in the thread.
2020-08-22 22:30:12 -04:00
(threads.at(id).function)();
// printf("Thread %s has returned!\n", threads.at(id).name.c_str());
2020-08-22 22:30:12 -04:00
threads.at(id).done = true;
returnToKernel();
}
/*!
* Run a thread (call from kernel)
*/
void IOP_Kernel::runThread(s32 id) {
ASSERT(_currentThread == -1); // should run in the kernel thread
2020-08-22 22:30:12 -04:00
_currentThread = id;
threads.at(id).dispatch();
threads.at(id).waitForReturnToKernel();
_currentThread = -1;
}
/*!
* Suspend a thread (call from user thread). Will simply allow other threads to run.
* Unless we are sleeping, in which case this will return when we are woken up
* Like yield
*/
void IOP_Kernel::SuspendThread() {
s32 oldThread = getCurrentThread();
threads.at(oldThread).returnToKernel();
threads.at(oldThread).waitForDispatch();
// check kernel resumed us correctly
ASSERT(_currentThread == oldThread);
2020-08-22 22:30:12 -04:00
}
/*!
* Sleep a thread. Must be explicitly woken up.
*/
void IOP_Kernel::SleepThread() {
2020-08-26 01:21:33 -04:00
if (getCurrentThread() == -1) {
2020-08-22 22:30:12 -04:00
mainThreadSleep = true;
2020-08-26 01:21:33 -04:00
while (mainThreadSleep) {
2020-08-22 22:30:12 -04:00
dispatchAll();
}
} else {
threads.at(getCurrentThread()).started = false;
SuspendThread();
}
}
/*!
* Wake up a thread. Doesn't run it immediately though.
*/
void IOP_Kernel::WakeupThread(s32 id) {
2020-08-26 01:21:33 -04:00
if (id == -1) {
2020-08-22 22:30:12 -04:00
mainThreadSleep = false;
} else {
threads.at(id).started = true;
}
// todo, should we ever switch directly to that thread?
}
bool IOP_Kernel::OnlyThreadAlive(s32 thid) {
bool yes = false;
for (u64 i = 0; i < threads.size(); i++) {
if (threads[i].started && !threads[i].done) {
if ((s32)i != thid) {
return false;
}
if ((s32)i == thid) {
yes = true;
}
}
}
return yes;
}
2020-08-22 22:30:12 -04:00
/*!
* Dispatch all IOP threads.
*/
void IOP_Kernel::dispatchAll() {
2020-08-26 01:21:33 -04:00
for (u64 i = 0; i < threads.size(); i++) {
if (threads[i].started && !threads[i].done) {
// printf("[IOP Kernel] Dispatch %s (%ld)\n", threads[i].name.c_str(), i);
2020-08-22 22:30:12 -04:00
_currentThread = i;
threads[i].dispatch();
threads[i].waitForReturnToKernel();
_currentThread = -1;
2020-08-26 01:21:33 -04:00
// printf("[IOP Kernel] back to kernel!\n");
2020-08-22 22:30:12 -04:00
}
}
}
/*!
* Start running kernel.
*/
void IopThreadRecord::returnToKernel() {
runThreadReady = false;
// should be called from the correct thread
ASSERT(kernel->getCurrentThread() == thID);
2020-08-22 22:30:12 -04:00
{
std::lock_guard<std::mutex> lck(*threadToKernelMutex);
syscallReady = true;
}
threadToKernelCV->notify_one();
}
/*!
* Start running thread.
*/
void IopThreadRecord::dispatch() {
syscallReady = false;
ASSERT(kernel->getCurrentThread() == thID);
2020-08-22 22:30:12 -04:00
{
std::lock_guard<std::mutex> lck(*kernelToThreadMutex);
runThreadReady = true;
}
kernelToThreadCV->notify_one();
}
/*!
* Kernel waits for thread to return
*/
void IopThreadRecord::waitForReturnToKernel() {
std::unique_lock<std::mutex> lck(*threadToKernelMutex);
2020-08-26 01:21:33 -04:00
threadToKernelCV->wait(lck, [this] { return syscallReady; });
2020-08-22 22:30:12 -04:00
}
/*!
* Thread waits for kernel to dispatch it.
*/
void IopThreadRecord::waitForDispatch() {
std::unique_lock<std::mutex> lck(*kernelToThreadMutex);
2020-08-26 01:21:33 -04:00
kernelToThreadCV->wait(lck, [this] { return runThreadReady; });
2020-08-22 22:30:12 -04:00
}
2020-08-26 01:21:33 -04:00
void IOP_Kernel::set_rpc_queue(iop::sceSifQueueData* qd, u32 thread) {
for (const auto& r : sif_records) {
ASSERT(!(r.qd == qd || r.thread_to_wake == thread));
2020-08-22 22:30:12 -04:00
}
SifRecord rec;
rec.thread_to_wake = thread;
rec.qd = qd;
sif_records.push_back(rec);
}
2020-08-26 01:21:33 -04:00
typedef void* (*sif_rpc_handler)(unsigned int, void*, int);
2020-08-22 22:30:12 -04:00
bool IOP_Kernel::sif_busy(u32 id) {
sif_mtx.lock();
bool rv = false;
bool found = false;
2020-08-26 01:21:33 -04:00
for (auto& r : sif_records) {
if (r.qd->serve_data->command == id) {
2020-08-22 22:30:12 -04:00
rv = !r.cmd.finished;
found = true;
break;
}
}
ASSERT(found);
2020-08-22 22:30:12 -04:00
sif_mtx.unlock();
return rv;
}
2020-08-26 01:21:33 -04:00
void IOP_Kernel::sif_rpc(s32 rpcChannel,
u32 fno,
bool async,
void* sendBuff,
s32 sendSize,
void* recvBuff,
2020-08-22 22:30:12 -04:00
s32 recvSize) {
ASSERT(async);
2020-08-22 22:30:12 -04:00
sif_mtx.lock();
// step 1 - find entry
SifRecord* rec = nullptr;
2020-08-26 01:21:33 -04:00
for (auto& e : sif_records) {
if (e.qd->serve_data->command == (u32)rpcChannel) {
2020-08-22 22:30:12 -04:00
rec = &e;
}
}
if (!rec) {
printf("Failed to find handler for sif channel 0x%x\n", rpcChannel);
}
ASSERT(rec);
2020-08-22 22:30:12 -04:00
// step 2 - check entry is safe to give command to
ASSERT(rec->cmd.finished && rec->cmd.started);
2020-08-22 22:30:12 -04:00
// step 3 - memcpy!
memcpy(rec->qd->serve_data->buff, sendBuff, sendSize);
// step 4 - setup command
rec->cmd.buff = rec->qd->serve_data->buff;
rec->cmd.size = sendSize;
rec->cmd.fno = fno;
rec->cmd.copy_back_buff = recvBuff;
rec->cmd.copy_back_size = recvSize;
rec->cmd.started = false;
rec->cmd.finished = false;
sif_mtx.unlock();
}
void IOP_Kernel::rpc_loop(iop::sceSifQueueData* qd) {
2020-08-26 01:21:33 -04:00
while (true) {
2020-08-22 22:30:12 -04:00
bool got_cmd = false;
SifRpcCommand cmd;
sif_rpc_handler func = nullptr;
// get command and mark it as started if we get it
sif_mtx.lock();
2020-08-26 01:21:33 -04:00
for (auto& r : sif_records) {
if (r.qd == qd) {
2020-08-22 22:30:12 -04:00
cmd = r.cmd;
got_cmd = true;
r.cmd.started = true;
func = r.qd->serve_data->func;
}
}
sif_mtx.unlock();
// handle command
2020-08-26 01:21:33 -04:00
if (got_cmd) {
if (cmd.shutdown_now) {
2020-08-22 22:30:12 -04:00
return;
}
2020-08-26 01:21:33 -04:00
if (!cmd.started) {
2020-08-22 22:30:12 -04:00
// cf
ASSERT(func);
2020-08-22 22:30:12 -04:00
auto data = func(cmd.fno, cmd.buff, cmd.size);
2020-08-26 01:21:33 -04:00
if (cmd.copy_back_buff && cmd.copy_back_size) {
2020-08-22 22:30:12 -04:00
memcpy(cmd.copy_back_buff, data, cmd.copy_back_size);
}
sif_mtx.lock();
2020-08-26 01:21:33 -04:00
for (auto& r : sif_records) {
if (r.qd == qd) {
ASSERT(r.cmd.started);
2020-08-22 22:30:12 -04:00
r.cmd.finished = true;
}
}
sif_mtx.unlock();
}
}
SuspendThread();
}
}
2020-08-26 01:21:33 -04:00
void IOP_Kernel::read_disc_sectors(u32 sector, u32 sectors, void* buffer) {
if (!iso_disc_file) {
iso_disc_file = file_util::open_file("./disc.iso", "rb");
2020-08-22 22:30:12 -04:00
}
ASSERT(iso_disc_file);
2020-08-26 01:21:33 -04:00
if (fseek(iso_disc_file, sector * 0x800, SEEK_SET)) {
ASSERT(false);
2020-08-22 22:30:12 -04:00
}
auto rv = fread(buffer, sectors * 0x800, 1, iso_disc_file);
ASSERT(rv == 1);
2020-08-22 22:30:12 -04:00
}
void IOP_Kernel::shutdown() {
// shutdown most threads
2020-08-26 01:21:33 -04:00
for (auto& r : sif_records) {
2020-08-22 22:30:12 -04:00
r.cmd.shutdown_now = true;
}
2020-08-26 01:21:33 -04:00
for (auto& t : threads) {
2020-08-22 22:30:12 -04:00
t.wantExit = true;
}
2020-08-26 01:21:33 -04:00
for (auto& t : threads) {
if (t.thID == 0)
continue;
while (!t.done) {
2020-08-22 22:30:12 -04:00
dispatchAll();
}
t.thread->join();
}
}
IOP_Kernel::~IOP_Kernel() {
2020-08-26 01:21:33 -04:00
if (iso_disc_file) {
2020-08-22 22:30:12 -04:00
fclose(iso_disc_file);
}
}