rp2: Migrate to the new mp_thread_recursive_mutex_t.

Necessary for GC support, also refactored pendsv usage.

This work was funded through GitHub Sponsors.

Signed-off-by: Angus Gratton <angus@redyak.com.au>
This commit is contained in:
Angus Gratton
2024-12-10 14:51:10 +11:00
committed by Damien George
parent 4bcbe88e74
commit 3bfedd0f4a
3 changed files with 28 additions and 10 deletions

View File

@@ -84,10 +84,10 @@ void mp_thread_deinit(void) {
assert(get_core_num() == 0);
// Must ensure that core1 is not currently holding the GC lock, otherwise
// it will be terminated while holding the lock.
mp_thread_mutex_lock(&MP_STATE_MEM(gc_mutex), 1);
mp_thread_recursive_mutex_lock(&MP_STATE_MEM(gc_mutex), 1);
multicore_reset_core1();
core1_entry = NULL;
mp_thread_mutex_unlock(&MP_STATE_MEM(gc_mutex));
mp_thread_recursive_mutex_unlock(&MP_STATE_MEM(gc_mutex));
}
void mp_thread_gc_others(void) {

View File

@@ -26,9 +26,10 @@
#ifndef MICROPY_INCLUDED_RP2_MPTHREADPORT_H
#define MICROPY_INCLUDED_RP2_MPTHREADPORT_H
#include "pico/mutex.h"
#include "mutex_extra.h"
typedef struct mutex mp_thread_mutex_t;
typedef recursive_mutex_nowait_t mp_thread_recursive_mutex_t;
extern void *core_state[2];
@@ -65,4 +66,21 @@ static inline void mp_thread_mutex_unlock(mp_thread_mutex_t *m) {
mutex_exit(m);
}
static inline void mp_thread_recursive_mutex_init(mp_thread_recursive_mutex_t *m) {
recursive_mutex_nowait_init(m);
}
static inline int mp_thread_recursive_mutex_lock(mp_thread_recursive_mutex_t *m, int wait) {
if (wait) {
recursive_mutex_nowait_enter_blocking(m);
return 1;
} else {
return recursive_mutex_nowait_try_enter(m, NULL);
}
}
static inline void mp_thread_recursive_mutex_unlock(mp_thread_recursive_mutex_t *m) {
recursive_mutex_nowait_exit(m);
}
#endif // MICROPY_INCLUDED_RP2_MPTHREADPORT_H

View File

@@ -26,7 +26,7 @@
#include <assert.h>
#include "py/mpconfig.h"
#include "mutex_extra.h"
#include "py/mpthread.h"
#include "pendsv.h"
#if PICO_RP2040
@@ -47,21 +47,21 @@ void PendSV_Handler(void);
// Using the nowait variant here as softtimer updates PendSV from the loop of mp_wfe_or_timeout(),
// where we don't want the CPU event bit to be set.
static recursive_mutex_nowait_t pendsv_mutex;
static mp_thread_recursive_mutex_t pendsv_mutex;
void pendsv_init(void) {
recursive_mutex_nowait_init(&pendsv_mutex);
mp_thread_recursive_mutex_init(&pendsv_mutex);
}
void pendsv_suspend(void) {
// Recursive Mutex here as either core may call pendsv_suspend() and expect
// both mutual exclusion (other core can't enter pendsv_suspend() at the
// same time), and that no PendSV handler will run.
recursive_mutex_nowait_enter_blocking(&pendsv_mutex);
mp_thread_recursive_mutex_lock(&pendsv_mutex, 1);
}
void pendsv_resume(void) {
recursive_mutex_nowait_exit(&pendsv_mutex);
mp_thread_recursive_mutex_unlock(&pendsv_mutex);
// Run pendsv if needed. Find an entry with a dispatch and call pendsv dispatch
// with it. If pendsv runs it will service all slots.
@@ -97,7 +97,7 @@ void pendsv_schedule_dispatch(size_t slot, pendsv_dispatch_t f) {
// PendSV interrupt handler to perform background processing.
void PendSV_Handler(void) {
if (!recursive_mutex_nowait_try_enter(&pendsv_mutex, NULL)) {
if (!mp_thread_recursive_mutex_lock(&pendsv_mutex, 0)) {
// Failure here means core 1 holds pendsv_mutex. ISR will
// run again after core 1 calls pendsv_resume().
return;
@@ -117,5 +117,5 @@ void PendSV_Handler(void) {
}
}
recursive_mutex_nowait_exit(&pendsv_mutex);
mp_thread_recursive_mutex_unlock(&pendsv_mutex);
}