From 21d3a0836a24e063a0560d6bd979a97ccdf5ae2f Mon Sep 17 00:00:00 2001 From: Daniel Rossier Date: Fri, 17 Apr 2020 10:43:45 +0200 Subject: [PATCH] Added timed-down control on semaphore --- so3/apps/main_thread.c | 26 ++++++++++++++++---- so3/arch/arm/exception.S | 8 +++++++ so3/arch/arm/fault.c | 11 +++++---- so3/include/delay.h | 3 +++ so3/include/schedule.h | 7 ++++-- so3/include/semaphore.h | 3 +++ so3/include/thread.h | 4 +++- so3/include/timer.h | 5 +++- so3/ipc/semaphore.c | 51 ++++++++++++++++++++++++++++++++++++++-- so3/kernel/delay.c | 28 ++++++++++++++++++---- so3/kernel/mutex.c | 2 +- so3/kernel/schedule.c | 24 +++++++++++++++---- so3/kernel/timer.c | 1 - 13 files changed, 148 insertions(+), 25 deletions(-) diff --git a/so3/apps/main_thread.c b/so3/apps/main_thread.c index 983e141c6..9ff07f658 100644 --- a/so3/apps/main_thread.c +++ b/so3/apps/main_thread.c @@ -100,22 +100,33 @@ int thread_example(void *arg) } int fn1(void *args) { - int i = 0; + //int i = 0; printk("Thread #1\n"); + //sem_down(&sem); while (1) { - printk("--> th 1: %d\n", i++); + sem_down(&sem); + msleep(499); + sem_up(&sem); + +// printk("--> th 1: %d\n", i++); } return 0; } int fn2(void *args) { - int i = 0; + //int i = 0 + int ret; printk("Thread #2\n"); while (1) { - printk("--> th 2: %d\n", i++); + ret = sem_timeddown(&sem, MILLISECS(500)); + + printk("## ret = %d\n", ret); + if (ret == 0) + sem_up(&sem); +// printk("--> th 2: %d\n", i++); } return 0; @@ -170,10 +181,14 @@ int main_kernel(void *args) } #endif -#if 0 +#if 1 + sem_init(&sem); + kernel_thread(fn1, "fn1", NULL, 0); kernel_thread(fn2, "fn2", NULL, 0); #endif + +#if 0 /* Another test code */ { int id[50], i; @@ -186,6 +201,7 @@ int main_kernel(void *args) while (true); } +#endif return 0; } diff --git a/so3/arch/arm/exception.S b/so3/arch/arm/exception.S index 2478960b8..6cd721b88 100644 --- a/so3/arch/arm/exception.S +++ b/so3/arch/arm/exception.S @@ -283,16 +283,24 @@ __get_syscall_args_ext: .align 5 prefetch_abort: + @ Call the C data abort handler with the following args: + @ r0 = IFAR, r1 = IFSR, r2 = LR + mrc p15, 0, r0, c6, c0, 2 @ get IFAR mrc p15, 0, r1, c5, c0, 1 @ get IFSR + mov r2, lr b __prefetch_abort .align 5 data_abort: + @ Call the C data abort handler with the following args: + @ r0 = FAR, r1 = FSR, r2 = LR + mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR + mov r2, lr b __data_abort diff --git a/so3/arch/arm/fault.c b/so3/arch/arm/fault.c index 2fc0e1fe9..9ae3f1a9d 100644 --- a/so3/arch/arm/fault.c +++ b/so3/arch/arm/fault.c @@ -30,16 +30,19 @@ void dump_backtrace_entry(unsigned long where, unsigned long from) lprintk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); } -void __prefetch_abort(uint32_t ifar, uint32_t ifsr) { - lprintk("### prefetch abort exception ifar: %x ifsr: %x ###\n", ifar, ifsr); +void __prefetch_abort(uint32_t ifar, uint32_t ifsr, uint32_t lr) { + lprintk("### prefetch abort exception ifar: %x ifsr: %x lr(r14)-8: %x ###\n", ifar, ifsr, lr-8); __backtrace(); kernel_panic(); } -void __data_abort(uint32_t far, uint32_t fsr) { - lprintk("### abort exception far: %x fsr: %x ###\n", far, fsr); +void __data_abort(uint32_t far, uint32_t fsr, uint32_t lr) { + lprintk("### abort exception far: %x fsr: %x lr(r14)-8: %x ###\n", far, fsr, lr-8); + + __backtrace(); + kernel_panic(); } diff --git a/so3/include/delay.h b/so3/include/delay.h index c6026ac96..a923a949f 100644 --- a/so3/include/delay.h +++ b/so3/include/delay.h @@ -20,11 +20,14 @@ #define DELAY_H #include +#include /** * Active wait based on the jiffy_usec */ void udelay(u64 us); + +void sleep(u64 ns); void msleep(uint32_t); void usleep(u64 us); diff --git a/so3/include/schedule.h b/so3/include/schedule.h index fb73cf3b5..178aa9d33 100644 --- a/so3/include/schedule.h +++ b/so3/include/schedule.h @@ -28,8 +28,6 @@ extern u64 jiffies; extern u64 jiffies_ref; -struct tcb; - extern struct tcb *tcb_idle; void scheduler_init(void); @@ -62,6 +60,11 @@ static inline struct tcb *current(void) { } struct tcb *current(void); + +static inline void reset_thread_timeout(void) { + current_thread->timeout = 0ull; +} + void remove_ready(struct tcb *tcb); void schedule_isr(void); diff --git a/so3/include/semaphore.h b/so3/include/semaphore.h index 735c4cc04..f53e5fbf7 100644 --- a/so3/include/semaphore.h +++ b/so3/include/semaphore.h @@ -21,6 +21,7 @@ #include #include +#include #include @@ -43,6 +44,8 @@ typedef struct { void sem_up(sem_t *sem); void sem_down(sem_t *sem); +int sem_timeddown(sem_t *sem, uint64_t timeout); + void sem_init(sem_t *sem); #endif /* SEMAPHORE_H */ diff --git a/so3/include/thread.h b/so3/include/thread.h index 9f76a39c1..e88a9fe10 100644 --- a/so3/include/thread.h +++ b/so3/include/thread.h @@ -35,7 +35,6 @@ #include #include -#include typedef enum { THREAD_STATE_NEW, THREAD_STATE_READY, THREAD_STATE_RUNNING, THREAD_STATE_WAITING, THREAD_STATE_ZOMBIE } thread_state_t; typedef unsigned int thread_t; @@ -60,6 +59,9 @@ struct tcb { * the default priority is used. */ uint32_t prio; + /* Timeout value to keep track of possible scheduling after a timeout. */ + int64_t timeout; + /* Threaded function */ int (*th_fn)(void *); void *th_arg; diff --git a/so3/include/timer.h b/so3/include/timer.h index 6e2dca8ed..394c275ec 100644 --- a/so3/include/timer.h +++ b/so3/include/timer.h @@ -21,8 +21,10 @@ #include -#define NSECS 1000000000ull +#ifndef TIMER_H +#define TIMER_H +#define NSECS 1000000000ull #define NOW() ((u64) get_s_time()) #define SECONDS(_s) ((u64)((_s) * 1000000000ull)) @@ -115,3 +117,4 @@ void clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) int do_nanosleep(const struct timespec *req, struct timespec *rem); +#endif /* TIMER_H */ diff --git a/so3/ipc/semaphore.c b/so3/ipc/semaphore.c index b80d152bb..091b7cf1a 100644 --- a/so3/ipc/semaphore.c +++ b/so3/ipc/semaphore.c @@ -19,19 +19,26 @@ #include #include #include +#include +#include /* * Sempahore down operation - Prepare to enter a critical section * by means of the semaphore paradigm. + * The timeout is a delay until the thread will be woken up even if the semaphore is not acquired. + * The timeout is expressed in nanoseconds. + * Returns the value 0 in case of successful semaphore acquisition, -1 in case of timeout. */ -void sem_down(sem_t *sem) { +int sem_timeddown(sem_t *sem, uint64_t timeout) { queue_thread_t q_tcb; + struct list_head *pos; for (;;) { mutex_lock(&sem->lock); if (sem->val <= 0) { + q_tcb.tcb = current(); /* * We only attempt the xchg if the count is non-negative in order @@ -46,17 +53,57 @@ void sem_down(sem_t *sem) { mutex_unlock(&sem->lock); - waiting(); + if (!timeout) + waiting(); + else { + sleep(timeout); + + /* If the semaphore got valid, we will have a positive timeout and + * we can go ahead. However, it might be the case that the semaphore + * gets busy again, and we start a new suspending with an up-to-date timeout. + */ + + if (current()->timeout <= 0) { + /* We have to remove ourself from the waiting list. */ + + mutex_lock(&sem->lock); + + /* Make sure the entry has not been deleted, right before the timeout... it might happen! */ + list_for_each(pos, &sem->tcb_list) + if (pos == &q_tcb.list) + break; + + if (pos == &q_tcb.list) + list_del(&q_tcb.list); + + mutex_unlock(&sem->lock); + + /* This is possible only if we were woken up by the timer deadline. */ + return -1; + } + + timeout = current()->timeout; + } } else { atomic_set(&sem->count, 0); sem->val--; + mutex_unlock(&sem->lock); break; } } + return 0; +} + +/* + * Sempahore down operation - Prepare to enter a critical section + * by means of the semaphore paradigm. + */ +void sem_down(sem_t *sem) { + sem_timeddown(sem, 0ull); } void sem_up(sem_t *sem) { diff --git a/so3/kernel/delay.c b/so3/kernel/delay.c index 1da0b3992..6871536c1 100644 --- a/so3/kernel/delay.c +++ b/so3/kernel/delay.c @@ -40,12 +40,13 @@ void udelay(u64 us) { /* * Timer callback which will awake the thread. + * IRQs are off. */ void delay_handler(void *arg) { tcb_t *tcb = (tcb_t *) arg; /* * delay_handler may be called two ways differently; the first one (and more standard way) - * is from an interrupt context during the softirq action processing. In this case, + * is right after an interrupt context during the softirq action processing. In this case, * it is *sure* that the thread is in waiting state (issued from a previous sleep function which * is set with IRQs off). The second case corresponds to a call along the msleep() path during the set_timer() * initialization. In this case, the handler can be called if the deadline already expired and the thread will @@ -53,11 +54,17 @@ void delay_handler(void *arg) { */ if (tcb->state == THREAD_STATE_WAITING) { + + /* If the thread is submitted to a waiting timeout, + * the value is re-adjusted here. + */ + + tcb->timeout = tcb->timeout - NOW(); + ready(tcb); /* Trigger a schedule to give a change to the waiter */ raise_softirq(SCHEDULE_SOFTIRQ); - } } @@ -70,12 +77,21 @@ static void __sleep(u64 ns) { /* Create a specific timer attached to this thread */ init_timer(&__timer, delay_handler, current()); - set_timer(&__timer, NOW() + ns); + current()->timeout = NOW() + ns; + set_timer(&__timer, current()->timeout); /* Put the thread in waiting state *only* if the timer still makes sense. */ - if (__timer.status == TIMER_STATUS_in_list) + if (__timer.status == TIMER_STATUS_in_list) { waiting(); + /* We are resumed, but not necessarly by the timer handler (in case of a semaphore timeout based synchronization + * mechanism, we might get the lock *before* the timeout. + * In this case, we have to clean the timer. + */ + stop_timer(&__timer); + + } + local_irq_restore(flags); } @@ -94,6 +110,10 @@ void usleep(u64 us) { __sleep(MICROSECS(us)); } +void sleep(u64 ns) { + __sleep(ns); +} + int do_nanosleep(const struct timespec *req, struct timespec *rem) { if (req->tv_nsec != 0) diff --git a/so3/kernel/mutex.c b/so3/kernel/mutex.c index e88ddb3a7..076feea33 100644 --- a/so3/kernel/mutex.c +++ b/so3/kernel/mutex.c @@ -138,7 +138,7 @@ void mutex_unlock(struct mutex *lock) { list_del(&curr->list); - wake_up(curr->tcb); + ready(curr->tcb); } diff --git a/so3/kernel/schedule.c b/so3/kernel/schedule.c index a504a696d..de6630166 100644 --- a/so3/kernel/schedule.c +++ b/so3/kernel/schedule.c @@ -96,8 +96,16 @@ void preempt_enable(void) { void ready(tcb_t *tcb) { uint32_t flags; queue_thread_t *cur; + bool already_locked; - spin_lock_irqsave(&schedule_lock, flags); + /* We check if we are in a call path where the lock was already acquired. + * These are rare cases where consistency check is performed before calling + * ready() like waking up threads (see wake_up()). + */ + already_locked = (spin_is_locked(&schedule_lock) ? true : false); + + if (!already_locked) + spin_lock_irqsave(&schedule_lock, flags); tcb->state = THREAD_STATE_READY; @@ -109,7 +117,8 @@ void ready(tcb_t *tcb) { /* Insert the thread at the end of the list */ list_add_tail(&cur->list, &readyThreads); - spin_unlock_irqrestore(&schedule_lock, flags); + if (!already_locked) + spin_unlock_irqrestore(&schedule_lock, flags); } /* @@ -191,10 +200,17 @@ void remove_zombie(struct tcb *tcb) { /* * Wake up a thread which is in waiting state. - * If the thread passed as argument is not sleeping, we just call schedule(). + * If the thread passed as argument is not sleeping, we just skip it. */ void wake_up(struct tcb *tcb) { - ready(tcb); + uint32_t flags; + + spin_lock_irqsave(&schedule_lock, flags); + + if (tcb->state == THREAD_STATE_WAITING) + ready(tcb); + + spin_unlock_irqrestore(&schedule_lock, flags); } /* diff --git a/so3/kernel/timer.c b/so3/kernel/timer.c index b88445fa5..85f0b59d9 100644 --- a/so3/kernel/timer.c +++ b/so3/kernel/timer.c @@ -22,7 +22,6 @@ #include #include -#include #include #include #include -- GitLab