2020-08-22 22:30:12 -04:00
|
|
|
#include "IOP_Kernel.h"
|
2022-06-22 23:37:46 -04:00
|
|
|
|
|
|
|
#include <cstring>
|
|
|
|
|
2022-02-08 19:02:47 -05:00
|
|
|
#include "common/util/Assert.h"
|
2022-12-22 18:12:59 -05:00
|
|
|
#include "common/util/FileUtil.h"
|
2020-08-22 22:30:12 -04:00
|
|
|
|
2022-06-22 23:37:46 -04:00
|
|
|
#include "game/sce/iop.h"
|
|
|
|
|
2022-07-26 21:15:37 -04:00
|
|
|
using namespace std::chrono;
|
|
|
|
|
|
|
|
/*
|
|
|
|
** wrap thread entry points to ensure they don't return into libco
|
|
|
|
*/
|
|
|
|
static void (*thread_entry)() = nullptr;
|
|
|
|
static cothread_t wrap_return;
|
|
|
|
void IopThread::functionWrapper() {
|
|
|
|
void (*f)() = thread_entry;
|
|
|
|
co_switch(wrap_return);
|
|
|
|
if (f != nullptr) {
|
|
|
|
f();
|
|
|
|
}
|
|
|
|
// libco threads must not return
|
|
|
|
while (true) {
|
|
|
|
iop::ExitThread();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
** -----------------------------------------------------------------------------
|
|
|
|
** Functions callable by threads
|
|
|
|
** -----------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
2020-08-22 22:30:12 -04:00
|
|
|
/*!
|
|
|
|
* Create a new thread. Will not run the thread.
|
|
|
|
*/
|
2022-07-26 21:15:37 -04:00
|
|
|
s32 IOP_Kernel::CreateThread(std::string name, void (*func)(), u32 priority) {
|
2020-08-22 22:30:12 -04:00
|
|
|
u32 ID = (u32)_nextThID++;
|
2022-02-08 19:02:47 -05:00
|
|
|
ASSERT(ID == threads.size());
|
2020-12-28 18:37:05 -05:00
|
|
|
|
2020-08-22 22:30:12 -04:00
|
|
|
// add entry
|
2022-07-26 21:15:37 -04:00
|
|
|
threads.emplace_back(name, func, ID, priority);
|
|
|
|
|
|
|
|
// enter the function wrapper so it can put the actual thread enry on its stack
|
|
|
|
// to call it when the thread is eventually started
|
|
|
|
thread_entry = func;
|
|
|
|
wrap_return = co_active();
|
|
|
|
co_switch(threads.at(ID).thread);
|
2020-08-22 22:30:12 -04:00
|
|
|
|
|
|
|
return ID;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*!
|
2022-07-22 11:54:27 -04:00
|
|
|
* Start a thread. Marking it to run on each dispatch of the IOP kernel.
|
2020-08-22 22:30:12 -04:00
|
|
|
*/
|
|
|
|
void IOP_Kernel::StartThread(s32 id) {
|
2022-07-22 11:54:27 -04:00
|
|
|
threads.at(id).state = IopThread::State::Ready;
|
2020-08-22 22:30:12 -04:00
|
|
|
}
|
|
|
|
|
2022-07-26 21:15:37 -04:00
|
|
|
s32 IOP_Kernel::ExitThread() {
|
|
|
|
ASSERT(_currentThread);
|
|
|
|
_currentThread->state = IopThread::State::Dormant;
|
2022-07-22 11:54:27 -04:00
|
|
|
|
2022-07-26 21:15:37 -04:00
|
|
|
return 0;
|
2020-08-22 22:30:12 -04:00
|
|
|
}
|
|
|
|
|
2022-07-22 11:54:27 -04:00
|
|
|
/*!
|
2022-07-26 21:15:37 -04:00
|
|
|
* Put a thread in Wait state for desired amount of usecs.
|
2022-07-22 11:54:27 -04:00
|
|
|
*/
|
2022-07-26 21:15:37 -04:00
|
|
|
void IOP_Kernel::DelayThread(u32 usec) {
|
|
|
|
ASSERT(_currentThread);
|
2022-07-22 11:54:27 -04:00
|
|
|
|
2022-07-26 21:15:37 -04:00
|
|
|
_currentThread->state = IopThread::State::Wait;
|
|
|
|
_currentThread->waitType = IopThread::Wait::Delay;
|
|
|
|
_currentThread->resumeTime =
|
|
|
|
time_point_cast<microseconds>(steady_clock::now()) + microseconds(usec);
|
2022-08-14 13:51:00 -04:00
|
|
|
leaveThread();
|
2022-07-22 11:54:27 -04:00
|
|
|
}
|
|
|
|
|
2020-08-22 22:30:12 -04:00
|
|
|
/*!
|
|
|
|
* Sleep a thread. Must be explicitly woken up.
|
|
|
|
*/
|
|
|
|
void IOP_Kernel::SleepThread() {
|
2022-07-26 21:15:37 -04:00
|
|
|
ASSERT(_currentThread);
|
2022-07-22 11:54:27 -04:00
|
|
|
|
2022-07-26 21:15:37 -04:00
|
|
|
_currentThread->state = IopThread::State::Suspend;
|
2022-08-14 13:51:00 -04:00
|
|
|
leaveThread();
|
2020-08-22 22:30:12 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*!
|
|
|
|
* Wake up a thread. Doesn't run it immediately though.
|
|
|
|
*/
|
|
|
|
void IOP_Kernel::WakeupThread(s32 id) {
|
2022-07-22 11:54:27 -04:00
|
|
|
ASSERT(id > 0);
|
|
|
|
threads.at(id).state = IopThread::State::Ready;
|
2022-06-10 19:04:16 -04:00
|
|
|
}
|
|
|
|
|
2023-04-29 16:13:57 -04:00
|
|
|
void IOP_Kernel::iWakeupThread(s32 id) {
|
|
|
|
ASSERT(id > 0);
|
|
|
|
std::scoped_lock lock(wakeup_mtx);
|
|
|
|
wakeup_queue.push(id);
|
|
|
|
}
|
|
|
|
|
2022-08-14 13:51:00 -04:00
|
|
|
s32 IOP_Kernel::WaitSema(s32 id) {
|
|
|
|
auto& sema = semas.at(id);
|
|
|
|
if (sema.count > 0) {
|
|
|
|
sema.count--;
|
|
|
|
return KE_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
sema.wait_list.push_back(_currentThread);
|
|
|
|
_currentThread->state = IopThread::State::Wait;
|
|
|
|
_currentThread->waitType = IopThread::Wait::Semaphore;
|
|
|
|
leaveThread();
|
|
|
|
|
|
|
|
return KE_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
s32 IOP_Kernel::SignalSema(s32 id) {
|
|
|
|
auto& sema = semas.at(id);
|
|
|
|
|
|
|
|
if (sema.count >= sema.maxCount) {
|
|
|
|
return KE_SEMA_OVF;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sema.wait_list.empty()) {
|
|
|
|
sema.count++;
|
|
|
|
return KE_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
IopThread* to_run = nullptr;
|
|
|
|
|
|
|
|
if (sema.attr == Semaphore::attribute::fifo) {
|
|
|
|
to_run = sema.wait_list.front();
|
|
|
|
sema.wait_list.pop_front();
|
|
|
|
} else {
|
|
|
|
auto it =
|
|
|
|
std::max_element(sema.wait_list.begin(), sema.wait_list.end(),
|
|
|
|
[](IopThread*& a, IopThread*& b) { return a->priority < b->priority; });
|
|
|
|
to_run = *it;
|
|
|
|
sema.wait_list.erase(it);
|
|
|
|
}
|
|
|
|
|
|
|
|
to_run->waitType = IopThread::Wait::None;
|
|
|
|
to_run->state = IopThread::State::Ready;
|
|
|
|
return KE_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
s32 IOP_Kernel::PollSema(s32 id) {
|
|
|
|
auto& sema = semas.at(id);
|
|
|
|
if (sema.count > 0) {
|
|
|
|
sema.count--;
|
|
|
|
ASSERT(sema.count >= 0);
|
|
|
|
return KE_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return KE_SEMA_ZERO;
|
|
|
|
}
|
|
|
|
|
2020-08-22 22:30:12 -04:00
|
|
|
/*!
|
2022-07-26 21:15:37 -04:00
|
|
|
* Return to kernel from a thread, not to be called from the kernel thread.
|
2020-08-22 22:30:12 -04:00
|
|
|
*/
|
2022-08-14 13:51:00 -04:00
|
|
|
void IOP_Kernel::leaveThread() {
|
2022-07-26 21:15:37 -04:00
|
|
|
IopThread* oldThread = _currentThread;
|
|
|
|
co_switch(kernel_thread);
|
|
|
|
|
|
|
|
// check kernel resumed us correctly
|
|
|
|
ASSERT(_currentThread == oldThread);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
** -----------------------------------------------------------------------------
|
|
|
|
** Kernel functions.
|
|
|
|
** -----------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*!
|
|
|
|
* Run a thread (call from kernel)
|
|
|
|
*/
|
|
|
|
void IOP_Kernel::runThread(IopThread* thread) {
|
|
|
|
ASSERT(_currentThread == nullptr); // should run in the kernel thread
|
|
|
|
_currentThread = thread;
|
|
|
|
thread->state = IopThread::State::Run;
|
|
|
|
co_switch(thread->thread);
|
|
|
|
_currentThread = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*!
|
|
|
|
** Update wait states for delayed threads
|
|
|
|
*/
|
|
|
|
void IOP_Kernel::updateDelay() {
|
|
|
|
for (auto& t : threads) {
|
|
|
|
if (t.waitType == IopThread::Wait::Delay) {
|
|
|
|
if (steady_clock::now() > t.resumeTime) {
|
|
|
|
t.waitType = IopThread::Wait::None;
|
|
|
|
t.state = IopThread::State::Ready;
|
|
|
|
}
|
2020-08-22 22:30:12 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-29 16:13:57 -04:00
|
|
|
std::optional<time_stamp> IOP_Kernel::nextWakeup() {
|
|
|
|
bool found_ready = false;
|
2022-07-26 21:15:37 -04:00
|
|
|
time_stamp lowest = time_point_cast<microseconds>(steady_clock::now()) + microseconds(1000);
|
|
|
|
|
|
|
|
for (auto& t : threads) {
|
|
|
|
if (t.waitType == IopThread::Wait::Delay) {
|
|
|
|
if (t.resumeTime < lowest) {
|
|
|
|
lowest = t.resumeTime;
|
|
|
|
}
|
|
|
|
}
|
2023-04-29 16:13:57 -04:00
|
|
|
|
|
|
|
if (t.state == IopThread::State::Ready) {
|
|
|
|
found_ready = true;
|
|
|
|
}
|
2022-07-26 21:15:37 -04:00
|
|
|
}
|
|
|
|
|
2023-04-29 16:13:57 -04:00
|
|
|
if (found_ready) {
|
|
|
|
return {};
|
|
|
|
} else {
|
|
|
|
return lowest;
|
|
|
|
}
|
2022-07-26 21:15:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*!
|
|
|
|
** Get next thread to run.
|
|
|
|
** i.e. Highest prio in ready state.
|
|
|
|
*/
|
|
|
|
IopThread* IOP_Kernel::schedNext() {
|
|
|
|
IopThread* highest_prio = nullptr;
|
|
|
|
|
|
|
|
for (auto& t : threads) {
|
|
|
|
if (t.state == IopThread::State::Ready) {
|
|
|
|
if (highest_prio == nullptr) {
|
|
|
|
highest_prio = &t;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lower number = higher priority
|
|
|
|
if (t.priority < highest_prio->priority) {
|
|
|
|
highest_prio = &t;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return highest_prio;
|
|
|
|
};
|
|
|
|
|
|
|
|
void IOP_Kernel::processWakeups() {
|
|
|
|
std::scoped_lock lock(wakeup_mtx);
|
|
|
|
while (!wakeup_queue.empty()) {
|
|
|
|
WakeupThread(wakeup_queue.front());
|
|
|
|
wakeup_queue.pop();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*!
|
|
|
|
* Run the next IOP thread.
|
|
|
|
*/
|
2023-04-29 16:13:57 -04:00
|
|
|
std::optional<time_stamp> IOP_Kernel::dispatch() {
|
2022-08-14 17:21:02 -04:00
|
|
|
// Update thread states
|
2022-07-26 21:15:37 -04:00
|
|
|
updateDelay();
|
|
|
|
processWakeups();
|
|
|
|
|
2022-08-14 17:21:02 -04:00
|
|
|
// Run until all threads are idle
|
2022-07-26 21:15:37 -04:00
|
|
|
IopThread* next = schedNext();
|
|
|
|
while (next != nullptr) {
|
2023-04-29 16:13:57 -04:00
|
|
|
// Check vblank interrupt
|
|
|
|
if (vblank_handler != nullptr && vblank_recieved) {
|
|
|
|
vblank_handler(nullptr);
|
|
|
|
vblank_recieved = false;
|
|
|
|
}
|
2022-07-26 21:15:37 -04:00
|
|
|
// printf("[IOP Kernel] Dispatch %s (%d)\n", next->name.c_str(), next->thID);
|
|
|
|
runThread(next);
|
|
|
|
updateDelay();
|
2023-04-29 16:13:57 -04:00
|
|
|
processWakeups();
|
2022-07-26 21:15:37 -04:00
|
|
|
next = schedNext();
|
|
|
|
// printf("[IOP Kernel] back to kernel!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
// printf("[IOP Kernel] No runnable threads\n");
|
|
|
|
return nextWakeup();
|
|
|
|
}
|
|
|
|
|
2020-08-26 01:21:33 -04:00
|
|
|
void IOP_Kernel::set_rpc_queue(iop::sceSifQueueData* qd, u32 thread) {
|
2022-07-26 21:15:37 -04:00
|
|
|
sif_mtx.lock();
|
2020-08-26 01:21:33 -04:00
|
|
|
for (const auto& r : sif_records) {
|
2022-02-08 19:02:47 -05:00
|
|
|
ASSERT(!(r.qd == qd || r.thread_to_wake == thread));
|
2020-08-22 22:30:12 -04:00
|
|
|
}
|
|
|
|
SifRecord rec;
|
|
|
|
rec.thread_to_wake = thread;
|
|
|
|
rec.qd = qd;
|
|
|
|
sif_records.push_back(rec);
|
2022-07-26 21:15:37 -04:00
|
|
|
sif_mtx.unlock();
|
2020-08-22 22:30:12 -04:00
|
|
|
}
|
|
|
|
|
2020-08-26 01:21:33 -04:00
|
|
|
typedef void* (*sif_rpc_handler)(unsigned int, void*, int);
|
2020-08-22 22:30:12 -04:00
|
|
|
|
|
|
|
bool IOP_Kernel::sif_busy(u32 id) {
|
|
|
|
sif_mtx.lock();
|
|
|
|
bool rv = false;
|
|
|
|
bool found = false;
|
2020-08-26 01:21:33 -04:00
|
|
|
for (auto& r : sif_records) {
|
|
|
|
if (r.qd->serve_data->command == id) {
|
2020-08-22 22:30:12 -04:00
|
|
|
rv = !r.cmd.finished;
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2022-02-08 19:02:47 -05:00
|
|
|
ASSERT(found);
|
2020-08-22 22:30:12 -04:00
|
|
|
sif_mtx.unlock();
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2020-08-26 01:21:33 -04:00
|
|
|
void IOP_Kernel::sif_rpc(s32 rpcChannel,
|
|
|
|
u32 fno,
|
|
|
|
bool async,
|
|
|
|
void* sendBuff,
|
|
|
|
s32 sendSize,
|
|
|
|
void* recvBuff,
|
2020-08-22 22:30:12 -04:00
|
|
|
s32 recvSize) {
|
2022-02-08 19:02:47 -05:00
|
|
|
ASSERT(async);
|
2020-08-22 22:30:12 -04:00
|
|
|
sif_mtx.lock();
|
|
|
|
// step 1 - find entry
|
|
|
|
SifRecord* rec = nullptr;
|
2020-08-26 01:21:33 -04:00
|
|
|
for (auto& e : sif_records) {
|
|
|
|
if (e.qd->serve_data->command == (u32)rpcChannel) {
|
2020-08-22 22:30:12 -04:00
|
|
|
rec = &e;
|
|
|
|
}
|
|
|
|
}
|
2021-06-12 12:55:38 -04:00
|
|
|
if (!rec) {
|
|
|
|
printf("Failed to find handler for sif channel 0x%x\n", rpcChannel);
|
|
|
|
}
|
2022-02-08 19:02:47 -05:00
|
|
|
ASSERT(rec);
|
2020-08-22 22:30:12 -04:00
|
|
|
|
|
|
|
// step 2 - check entry is safe to give command to
|
2022-02-08 19:02:47 -05:00
|
|
|
ASSERT(rec->cmd.finished && rec->cmd.started);
|
2020-08-22 22:30:12 -04:00
|
|
|
|
|
|
|
// step 3 - memcpy!
|
|
|
|
memcpy(rec->qd->serve_data->buff, sendBuff, sendSize);
|
|
|
|
|
|
|
|
// step 4 - setup command
|
|
|
|
rec->cmd.buff = rec->qd->serve_data->buff;
|
|
|
|
rec->cmd.size = sendSize;
|
|
|
|
rec->cmd.fno = fno;
|
|
|
|
rec->cmd.copy_back_buff = recvBuff;
|
|
|
|
rec->cmd.copy_back_size = recvSize;
|
|
|
|
rec->cmd.started = false;
|
|
|
|
rec->cmd.finished = false;
|
|
|
|
|
2023-04-29 16:13:57 -04:00
|
|
|
iWakeupThread(rec->thread_to_wake);
|
2022-07-26 21:15:37 -04:00
|
|
|
|
2020-08-22 22:30:12 -04:00
|
|
|
sif_mtx.unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void IOP_Kernel::rpc_loop(iop::sceSifQueueData* qd) {
|
2020-08-26 01:21:33 -04:00
|
|
|
while (true) {
|
2020-08-22 22:30:12 -04:00
|
|
|
bool got_cmd = false;
|
|
|
|
SifRpcCommand cmd;
|
|
|
|
sif_rpc_handler func = nullptr;
|
|
|
|
|
|
|
|
// get command and mark it as started if we get it
|
|
|
|
sif_mtx.lock();
|
2020-08-26 01:21:33 -04:00
|
|
|
for (auto& r : sif_records) {
|
|
|
|
if (r.qd == qd) {
|
2020-08-22 22:30:12 -04:00
|
|
|
cmd = r.cmd;
|
|
|
|
got_cmd = true;
|
|
|
|
r.cmd.started = true;
|
|
|
|
func = r.qd->serve_data->func;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sif_mtx.unlock();
|
|
|
|
|
|
|
|
// handle command
|
2020-08-26 01:21:33 -04:00
|
|
|
if (got_cmd) {
|
|
|
|
if (!cmd.started) {
|
2020-08-22 22:30:12 -04:00
|
|
|
// cf
|
2022-02-08 19:02:47 -05:00
|
|
|
ASSERT(func);
|
2020-08-22 22:30:12 -04:00
|
|
|
auto data = func(cmd.fno, cmd.buff, cmd.size);
|
2020-08-26 01:21:33 -04:00
|
|
|
if (cmd.copy_back_buff && cmd.copy_back_size) {
|
2020-08-22 22:30:12 -04:00
|
|
|
memcpy(cmd.copy_back_buff, data, cmd.copy_back_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
sif_mtx.lock();
|
2020-08-26 01:21:33 -04:00
|
|
|
for (auto& r : sif_records) {
|
|
|
|
if (r.qd == qd) {
|
2022-02-08 19:02:47 -05:00
|
|
|
ASSERT(r.cmd.started);
|
2020-08-22 22:30:12 -04:00
|
|
|
r.cmd.finished = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sif_mtx.unlock();
|
|
|
|
}
|
|
|
|
}
|
2022-07-26 21:15:37 -04:00
|
|
|
|
|
|
|
SleepThread();
|
2020-08-22 22:30:12 -04:00
|
|
|
}
|
|
|
|
}
|