From fcebe4c657300a19bf6288b643b06ac8ae07c81c Mon Sep 17 00:00:00 2001 From: yueling hu <502966985@qq.com> Date: Tue, 27 May 2025 19:12:27 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dcmake=20=E7=9A=84=20tuple?= =?UTF-8?q?=E9=94=99=E8=AF=AF=20=20=E4=B8=8E=20llvm-arm=E7=BC=96=E8=AF=91?= =?UTF-8?q?=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tools/cmake.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/cmake.py b/tools/cmake.py index e998a48e8cd..6e8b9dcfc8d 100644 --- a/tools/cmake.py +++ b/tools/cmake.py @@ -45,7 +45,7 @@ def GenerateCFiles(env, project, project_name): tool_path_conv["CMAKE_ASM_COMPILER"] = tool_path_conv_helper(rtconfig.AS) tool_path_conv["CMAKE_AR"] = tool_path_conv_helper(rtconfig.AR) tool_path_conv["CMAKE_LINKER"] = tool_path_conv_helper(rtconfig.LINK) - if rtconfig.PLATFORM in ['gcc']: + if rtconfig.PLATFORM in ['gcc','llvm-arm']: tool_path_conv["CMAKE_SIZE"] = tool_path_conv_helper(rtconfig.SIZE) tool_path_conv["CMAKE_OBJDUMP"] = tool_path_conv_helper(rtconfig.OBJDUMP) tool_path_conv["CMAKE_OBJCOPY"] = tool_path_conv_helper(rtconfig.OBJCPY) @@ -99,7 +99,7 @@ def GenerateCFiles(env, project, project_name): AS += ".exe" AR += ".exe" LINK += ".exe" - if rtconfig.PLATFORM in ['gcc']: + if rtconfig.PLATFORM in ['gcc','llvm-arm']: SIZE += ".exe" OBJDUMP += ".exe" OBJCOPY += ".exe" @@ -129,7 +129,7 @@ def GenerateCFiles(env, project, project_name): cm_file.write("SET(CMAKE_CXX_FLAGS \""+ CXXFLAGS + "\")\n") cm_file.write("SET(CMAKE_CXX_COMPILER_WORKS TRUE)\n\n") - if rtconfig.PLATFORM in ['gcc']: + if rtconfig.PLATFORM in ['gcc','llvm-arm']: cm_file.write("SET(CMAKE_OBJCOPY \""+ OBJCOPY + "\")\n") cm_file.write("SET(CMAKE_SIZE \""+ SIZE + "\")\n\n") elif rtconfig.PLATFORM in ['armcc', 'armclang']: @@ -137,7 +137,7 @@ def GenerateCFiles(env, project, project_name): LINKER_FLAGS = '' LINKER_LIBS = '' - if rtconfig.PLATFORM in ['gcc']: + if rtconfig.PLATFORM in ['gcc','llvm-arm']: LINKER_FLAGS += '-T' elif rtconfig.PLATFORM in ['armcc', 'armclang']: LINKER_FLAGS += '--scatter' @@ -186,7 +186,7 @@ def GenerateCFiles(env, project, project_name): cm_file.write("ADD_DEFINITIONS(\n") for i in env['CPPDEFINES']: - cm_file.write("\t-D" + i + "\n") + cm_file.write("\t-D" + str(i).replace("(", "").replace(")","").replace(",", " ") + "\n") cm_file.write(")\n\n") libgroups = [] @@ -290,7 +290,7 @@ def GenerateCFiles(env, project, project_name): cm_file.write("\n") cm_file.write("# Interface library search paths\n") - if rtconfig.PLATFORM in ['gcc']: + if rtconfig.PLATFORM in ['gcc','llvm-arm']: for group in libgroups: if not 'LIBPATH' in group.keys(): continue From c3d99902994c14cb046a3466503a805414650ac7 Mon Sep 17 00:00:00 2001 From: yueling hu <502966985@qq.com> Date: Wed, 28 May 2025 22:06:55 +0800 Subject: [PATCH 2/2] =?UTF-8?q?=E6=97=B6=E9=97=B4=E6=BA=A2=E5=87=BAfix?= =?UTF-8?q?=EF=BC=8C=E4=BC=98=E5=8C=96=E8=B0=83=E5=BA=A6=E6=80=A7=E8=83=BD?= =?UTF-8?q?=EF=BC=8C=E8=A7=A3=E5=86=B3=E8=B0=83=E5=BA=A6=E4=B8=A2=E5=A4=B1?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/ipc.c | 116 +++------------- src/mempool.c | 8 +- src/scheduler_up.c | 328 ++++++++++++++++++++++++--------------------- src/signal.c | 7 - src/thread.c | 20 +-- src/timer.c | 8 +- 6 files changed, 203 insertions(+), 284 deletions(-) diff --git a/src/ipc.c b/src/ipc.c index 35c0146dd33..ae807ddf228 100644 --- a/src/ipc.c +++ b/src/ipc.c @@ -626,9 +626,6 @@ static rt_err_t _rt_sem_take(rt_sem_t sem, rt_int32_t timeout, int suspend_flag) /* enable interrupt */ rt_spin_unlock_irqrestore(&(sem->spinlock), level); - /* do schedule */ - rt_schedule(); - if (thread->error != RT_EOK) { return thread->error > 0 ? -thread->error : thread->error; @@ -695,16 +692,12 @@ RTM_EXPORT(rt_sem_trytake); rt_err_t rt_sem_release(rt_sem_t sem) { rt_base_t level; - rt_bool_t need_schedule; - /* parameter check */ RT_ASSERT(sem != RT_NULL); RT_ASSERT(rt_object_get_type(&sem->parent.parent) == RT_Object_Class_Semaphore); RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(sem->parent.parent))); - need_schedule = RT_FALSE; - level = rt_spin_lock_irqsave(&(sem->spinlock)); LOG_D("thread %s releases sem:%s, which value is: %d", @@ -716,7 +709,6 @@ rt_err_t rt_sem_release(rt_sem_t sem) { /* resume the suspended thread */ rt_susp_list_dequeue(&(sem->parent.suspend_thread), RT_EOK); - need_schedule = RT_TRUE; } else { @@ -733,10 +725,6 @@ rt_err_t rt_sem_release(rt_sem_t sem) rt_spin_unlock_irqrestore(&(sem->spinlock), level); - /* resume a thread, re-schedule */ - if (need_schedule == RT_TRUE) - rt_schedule(); - return RT_EOK; } RTM_EXPORT(rt_sem_release); @@ -778,14 +766,12 @@ rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg) /* set new value */ sem->value = (rt_uint16_t)value; rt_spin_unlock_irqrestore(&(sem->spinlock), level); - rt_schedule(); return RT_EOK; } else if (cmd == RT_IPC_CMD_SET_VLIMIT) { rt_ubase_t max_value; - rt_bool_t need_schedule = RT_FALSE; max_value = (rt_uint16_t)((rt_uintptr_t)arg); if (max_value > RT_SEM_VALUE_MAX || max_value < 1) @@ -800,18 +786,12 @@ rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg) { /* resume all waiting thread */ rt_susp_list_resume_all(&sem->parent.suspend_thread, RT_ERROR); - need_schedule = RT_TRUE; } } /* set new value */ sem->max_value = max_value; rt_spin_unlock_irqrestore(&(sem->spinlock), level); - if (need_schedule) - { - rt_schedule(); - } - return RT_EOK; } @@ -1445,9 +1425,6 @@ static rt_err_t _rt_mutex_take(rt_mutex_t mutex, rt_int32_t timeout, int suspend rt_spin_unlock(&(mutex->spinlock)); - /* do schedule */ - rt_schedule(); - rt_spin_lock(&(mutex->spinlock)); if (mutex->owner == thread) @@ -1590,14 +1567,10 @@ rt_err_t rt_mutex_release(rt_mutex_t mutex) { rt_sched_lock_level_t slvl; struct rt_thread *thread; - rt_bool_t need_schedule; - /* parameter check */ RT_ASSERT(mutex != RT_NULL); RT_ASSERT(rt_object_get_type(&mutex->parent.parent) == RT_Object_Class_Mutex); - need_schedule = RT_FALSE; - /* only thread could release mutex because we need test the ownership */ RT_DEBUG_IN_THREAD_CONTEXT; @@ -1631,8 +1604,7 @@ rt_err_t rt_mutex_release(rt_mutex_t mutex) rt_list_remove(&mutex->taken_list); /* whether change the thread priority */ - need_schedule = _check_and_update_prio(thread, mutex); - + _check_and_update_prio(thread, mutex); /* wakeup suspended thread */ if (!rt_list_isempty(&mutex->parent.suspend_thread)) { @@ -1683,8 +1655,6 @@ rt_err_t rt_mutex_release(rt_mutex_t mutex) { mutex->priority = 0xff; } - - need_schedule = RT_TRUE; } else { @@ -1707,10 +1677,6 @@ rt_err_t rt_mutex_release(rt_mutex_t mutex) rt_spin_unlock(&(mutex->spinlock)); - /* perform a schedule */ - if (need_schedule == RT_TRUE) - rt_schedule(); - return RT_EOK; } RTM_EXPORT(rt_mutex_release); @@ -1968,7 +1934,6 @@ rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set) rt_sched_lock_level_t slvl; rt_base_t level; rt_base_t status; - rt_bool_t need_schedule; rt_uint32_t need_clear_set = 0; /* parameter check */ @@ -1978,8 +1943,6 @@ rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set) if (set == 0) return -RT_ERROR; - need_schedule = RT_FALSE; - level = rt_spin_lock_irqsave(&(event->spinlock)); /* set event */ @@ -2039,8 +2002,6 @@ rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set) rt_sched_thread_ready(thread); thread->error = RT_EOK; - /* need do a scheduling */ - need_schedule = RT_TRUE; } } if (need_clear_set) @@ -2052,10 +2013,6 @@ rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set) rt_sched_unlock(slvl); rt_spin_unlock_irqrestore(&(event->spinlock), level); - /* do a schedule */ - if (need_schedule == RT_TRUE) - rt_schedule(); - return RT_EOK; } RTM_EXPORT(rt_event_send); @@ -2195,9 +2152,6 @@ static rt_err_t _rt_event_recv(rt_event_t event, rt_spin_unlock_irqrestore(&(event->spinlock), level); - /* do a schedule */ - rt_schedule(); - if (thread->error != RT_EOK) { /* return error */ @@ -2284,8 +2238,6 @@ rt_err_t rt_event_control(rt_event_t event, int cmd, void *arg) rt_spin_unlock_irqrestore(&(event->spinlock), level); - rt_schedule(); - return RT_EOK; } @@ -2567,7 +2519,7 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, { struct rt_thread *thread; rt_base_t level; - rt_uint32_t tick_delta; + rt_uint32_t tick_stamp; rt_err_t ret; /* parameter check */ @@ -2578,7 +2530,7 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0); /* initialize delta tick */ - tick_delta = 0; + tick_stamp = 0; /* get current thread */ thread = rt_thread_self(); @@ -2622,7 +2574,7 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, if (timeout > 0) { /* get the start tick of timer */ - tick_delta = rt_tick_get(); + tick_stamp = rt_tick_get(); LOG_D("mb_send_wait: start timer of thread:%s", thread->parent.name); @@ -2635,9 +2587,6 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, } rt_spin_unlock_irqrestore(&(mb->spinlock), level); - /* re-schedule */ - rt_schedule(); - /* resume from suspend state */ if (thread->error != RT_EOK) { @@ -2650,8 +2599,7 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, /* if it's not waiting forever and then re-calculate timeout tick */ if (timeout > 0) { - tick_delta = rt_tick_get() - tick_delta; - timeout -= tick_delta; + timeout -= rt_tick_get_delta(tick_stamp); if (timeout < 0) timeout = 0; } @@ -2682,8 +2630,6 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, rt_spin_unlock_irqrestore(&(mb->spinlock), level); - rt_schedule(); - return RT_EOK; } rt_spin_unlock_irqrestore(&(mb->spinlock), level); @@ -2806,8 +2752,6 @@ rt_err_t rt_mb_urgent(rt_mailbox_t mb, rt_ubase_t value) rt_spin_unlock_irqrestore(&(mb->spinlock), level); - rt_schedule(); - return RT_EOK; } rt_spin_unlock_irqrestore(&(mb->spinlock), level); @@ -2846,7 +2790,7 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo { struct rt_thread *thread; rt_base_t level; - rt_uint32_t tick_delta; + rt_uint32_t tick_stamp; rt_err_t ret; /* parameter check */ @@ -2857,7 +2801,7 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0); /* initialize delta tick */ - tick_delta = 0; + tick_stamp = 0; /* get current thread */ thread = rt_thread_self(); @@ -2902,7 +2846,7 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo if (timeout > 0) { /* get the start tick of timer */ - tick_delta = rt_tick_get(); + tick_stamp = rt_tick_get(); LOG_D("mb_recv: start timer of thread:%s", thread->parent.name); @@ -2916,9 +2860,6 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo rt_spin_unlock_irqrestore(&(mb->spinlock), level); - /* re-schedule */ - rt_schedule(); - /* resume from suspend state */ if (thread->error != RT_EOK) { @@ -2930,8 +2871,7 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo /* if it's not waiting forever and then re-calculate timeout tick */ if (timeout > 0) { - tick_delta = rt_tick_get() - tick_delta; - timeout -= tick_delta; + timeout -= rt_tick_get_delta(tick_stamp); if (timeout < 0) timeout = 0; } @@ -2960,8 +2900,6 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mb->parent.parent))); - rt_schedule(); - return RT_EOK; } rt_spin_unlock_irqrestore(&(mb->spinlock), level); @@ -3029,8 +2967,6 @@ rt_err_t rt_mb_control(rt_mailbox_t mb, int cmd, void *arg) rt_spin_unlock_irqrestore(&(mb->spinlock), level); - rt_schedule(); - return RT_EOK; } @@ -3382,7 +3318,7 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, { rt_base_t level; struct rt_mq_message *msg; - rt_uint32_t tick_delta; + rt_uint32_t tick_stamp; struct rt_thread *thread; rt_err_t ret; @@ -3402,7 +3338,7 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, return -RT_ERROR; /* initialize delta tick */ - tick_delta = 0; + tick_stamp = 0; /* get current thread */ thread = rt_thread_self(); @@ -3447,7 +3383,7 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, if (timeout > 0) { /* get the start tick of timer */ - tick_delta = rt_tick_get(); + tick_stamp = rt_tick_get(); LOG_D("mq_send_wait: start timer of thread:%s", thread->parent.name); @@ -3461,9 +3397,6 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, rt_spin_unlock_irqrestore(&(mq->spinlock), level); - /* re-schedule */ - rt_schedule(); - /* resume from suspend state */ if (thread->error != RT_EOK) { @@ -3475,8 +3408,7 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, /* if it's not waiting forever and then re-calculate timeout tick */ if (timeout > 0) { - tick_delta = rt_tick_get() - tick_delta; - timeout -= tick_delta; + timeout -= rt_tick_get_delta(tick_stamp); if (timeout < 0) timeout = 0; } @@ -3556,8 +3488,6 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, rt_spin_unlock_irqrestore(&(mq->spinlock), level); - rt_schedule(); - return RT_EOK; } rt_spin_unlock_irqrestore(&(mq->spinlock), level); @@ -3714,8 +3644,6 @@ rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size) rt_spin_unlock_irqrestore(&(mq->spinlock), level); - rt_schedule(); - return RT_EOK; } @@ -3765,7 +3693,7 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, struct rt_thread *thread; rt_base_t level; struct rt_mq_message *msg; - rt_uint32_t tick_delta; + rt_uint32_t tick_stamp; rt_err_t ret; rt_size_t len; @@ -3781,7 +3709,7 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0); /* initialize delta tick */ - tick_delta = 0; + tick_stamp = 0; /* get current thread */ thread = rt_thread_self(); RT_OBJECT_HOOK_CALL(rt_object_trytake_hook, (&(mq->parent.parent))); @@ -3826,7 +3754,7 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, if (timeout > 0) { /* get the start tick of timer */ - tick_delta = rt_tick_get(); + tick_stamp = rt_tick_get(); LOG_D("set thread:%s to timer list", thread->parent.name); @@ -3840,9 +3768,6 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, rt_spin_unlock_irqrestore(&(mq->spinlock), level); - /* re-schedule */ - rt_schedule(); - /* recv message */ if (thread->error != RT_EOK) { @@ -3855,8 +3780,7 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, /* if it's not waiting forever and then re-calculate timeout tick */ if (timeout > 0) { - tick_delta = rt_tick_get() - tick_delta; - timeout -= tick_delta; + timeout -= rt_tick_get_delta(tick_stamp); if (timeout < 0) timeout = 0; } @@ -3905,8 +3829,6 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mq->parent.parent))); - rt_schedule(); - return len; } @@ -4018,9 +3940,7 @@ rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg) mq->entry = 0; rt_spin_unlock_irqrestore(&(mq->spinlock), level); - - rt_schedule(); - + return RT_EOK; } diff --git a/src/mempool.c b/src/mempool.c index 28bdb6cf896..91dbbc58413 100644 --- a/src/mempool.c +++ b/src/mempool.c @@ -327,15 +327,12 @@ void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time) /* enable interrupt */ rt_spin_unlock_irqrestore(&(mp->spinlock), level); - /* do a schedule */ - rt_schedule(); - if (thread->error != RT_EOK) return RT_NULL; if (time > 0) { - time -= rt_tick_get() - before_sleep; + time -= rt_tick_get_delta(before_sleep); if (time < 0) time = 0; } @@ -397,9 +394,6 @@ void rt_mp_free(void *block) { rt_spin_unlock_irqrestore(&(mp->spinlock), level); - /* do a schedule */ - rt_schedule(); - return; } rt_spin_unlock_irqrestore(&(mp->spinlock), level); diff --git a/src/scheduler_up.c b/src/scheduler_up.c index 95e3cc7fb64..44adbaf8d00 100644 --- a/src/scheduler_up.c +++ b/src/scheduler_up.c @@ -36,20 +36,21 @@ #include #include -#define DBG_TAG "kernel.scheduler" -#define DBG_LVL DBG_INFO +#define DBG_TAG "kernel.scheduler" +#define DBG_LVL DBG_INFO #include -rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]; +rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]; rt_uint32_t rt_thread_ready_priority_group; +rt_base_t rt_thread_ready_highest_priority; #if RT_THREAD_PRIORITY_MAX > 32 /* Maximum priority level, 256 */ rt_uint8_t rt_thread_ready_table[32]; #endif /* RT_THREAD_PRIORITY_MAX > 32 */ extern volatile rt_atomic_t rt_interrupt_nest; -static rt_int16_t rt_scheduler_lock_nest; -rt_uint8_t rt_current_priority; +static rt_int16_t rt_scheduler_lock_nest; +rt_uint8_t rt_current_priority; #if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR) static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to); @@ -86,28 +87,25 @@ void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid)) /**@}*/ #endif /* RT_USING_HOOK */ -static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *highest_prio) +static inline void _scheduler_update_highest_priority(void) { - struct rt_thread *highest_priority_thread; - rt_ubase_t highest_ready_priority; - #if RT_THREAD_PRIORITY_MAX > 32 rt_ubase_t number; - - number = __rt_ffs(rt_thread_ready_priority_group) - 1; - highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1; + number = __rt_ffs(rt_thread_ready_priority_group) - 1; + rt_thread_ready_highest_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1; #else - highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1; + rt_thread_ready_highest_priority = __rt_ffs(rt_thread_ready_priority_group) - 1; #endif /* RT_THREAD_PRIORITY_MAX > 32 */ +} - /* get highest ready priority thread */ - highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(rt_thread_priority_table[highest_ready_priority].next); - - *highest_prio = highest_ready_priority; - return highest_priority_thread; +static inline struct rt_thread *_scheduler_get_priority_thread(rt_ubase_t priority) +{ + /* get highest ready priority thread */ + return RT_THREAD_LIST_NODE_ENTRY(rt_thread_priority_table[priority].next); } + rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl) { rt_base_t level; @@ -150,7 +148,7 @@ void rt_system_scheduler_init(void) LOG_D("start scheduler: max priority 0x%02x", RT_THREAD_PRIORITY_MAX); - for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++) + for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset++) { rt_list_init(&rt_thread_priority_table[offset]); } @@ -171,13 +169,14 @@ void rt_system_scheduler_init(void) void rt_system_scheduler_start(void) { struct rt_thread *to_thread; - rt_ubase_t highest_ready_priority; - to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority); + _scheduler_update_highest_priority(); + to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority); rt_cpu_self()->current_thread = to_thread; rt_sched_remove_thread(to_thread); + RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING; /* switch to new thread */ @@ -187,6 +186,70 @@ void rt_system_scheduler_start(void) /* never come back */ } +/** + * @brief This function will insert a thread to the system ready queue. The state of + * thread will be set as READY and the thread will be removed from suspend queue. + * + * @param thread is the thread to be inserted. + * + * @note Please do not invoke this function in user application. + */ +static inline void _rt_sched_insert_thread(struct rt_thread *thread) +{ + /* READY thread, insert to ready queue */ + RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK); + /* there is no time slices left(YIELD), inserting thread before ready list*/ + if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0) + { + rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]), + &RT_THREAD_LIST_NODE(thread)); + } + /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/ + else + { + rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]), + &RT_THREAD_LIST_NODE(thread)); + } + + LOG_D("insert thread[%.*s], the priority: %d", + RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(rt_current_thread).current_priority); + + /* set priority mask */ +#if RT_THREAD_PRIORITY_MAX > 32 + rt_thread_ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask; +#endif /* RT_THREAD_PRIORITY_MAX > 32 */ + rt_thread_ready_priority_group |= RT_SCHED_PRIV(thread).number_mask; +} + +/** + * @brief This function will remove a thread from system ready queue. + * + * @param thread is the thread to be removed. + * + * @note Please do not invoke this function in user application. + */ +static inline void _rt_sched_remove_thread(struct rt_thread *thread) +{ + LOG_D("remove thread[%.*s], the priority: %d", + RT_NAME_MAX, thread->parent.name, + RT_SCHED_PRIV(rt_current_thread).current_priority); + + /* remove thread from ready list */ + rt_list_remove(&RT_THREAD_LIST_NODE(thread)); + if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]))) + { +#if RT_THREAD_PRIORITY_MAX > 32 + rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask; + if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0) + { + rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask; + } +#else + rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask; +#endif /* RT_THREAD_PRIORITY_MAX > 32 */ + } +} + /** * @addtogroup group_Thread * @cond @@ -201,127 +264,122 @@ void rt_system_scheduler_start(void) void rt_schedule(void) { rt_base_t level; + /* need_insert_from_thread: need to insert from_thread to ready queue */ + int need_insert_from_thread; + /* using local variable to avoid unecessary function call */ + struct rt_thread *curr_thread; struct rt_thread *to_thread; struct rt_thread *from_thread; - /* using local variable to avoid unecessary function call */ - struct rt_thread *curr_thread = rt_thread_self(); - /* disable interrupt */ level = rt_hw_interrupt_disable(); /* check the scheduler is enabled or not */ - if (rt_scheduler_lock_nest == 0) + if (rt_scheduler_lock_nest == 0 && rt_thread_ready_priority_group) { - rt_ubase_t highest_ready_priority; + curr_thread = rt_thread_self(); + + if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING) + { + if (RT_SCHED_PRIV(curr_thread).current_priority < rt_thread_ready_highest_priority) + { + to_thread = curr_thread; + } + else if (RT_SCHED_PRIV(curr_thread).current_priority == rt_thread_ready_highest_priority + && (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0) + { + to_thread = curr_thread; + } + else + { + to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority); + need_insert_from_thread = 1; + } + } + else + { + to_thread = _scheduler_get_priority_thread(rt_thread_ready_highest_priority); + need_insert_from_thread = 0; + } - if (rt_thread_ready_priority_group != 0) + if (to_thread != curr_thread) { - /* need_insert_from_thread: need to insert from_thread to ready queue */ - int need_insert_from_thread = 0; + /* if the destination thread is not the same as current thread */ + rt_current_priority = (rt_uint8_t)rt_thread_ready_highest_priority; + from_thread = curr_thread; + rt_cpu_self()->current_thread = to_thread; - to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority); + RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread)); - if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING) + if (need_insert_from_thread) { - if (RT_SCHED_PRIV(curr_thread).current_priority < highest_ready_priority) - { - to_thread = curr_thread; - } - else if (RT_SCHED_PRIV(curr_thread).current_priority == highest_ready_priority - && (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0) - { - to_thread = curr_thread; - } - else - { - need_insert_from_thread = 1; - } + _rt_sched_remove_thread(from_thread); + _rt_sched_insert_thread(from_thread); } - if (to_thread != curr_thread) + if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0) { - /* if the destination thread is not the same as current thread */ - rt_current_priority = (rt_uint8_t)highest_ready_priority; - from_thread = curr_thread; - rt_cpu_self()->current_thread = to_thread; + RT_SCHED_CTX(from_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK; + } - RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread)); + _rt_sched_remove_thread(to_thread); + RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK); - if (need_insert_from_thread) - { - rt_sched_insert_thread(from_thread); - } + _scheduler_update_highest_priority(); - if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0) - { - RT_SCHED_CTX(from_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK; - } + /* switch to new thread */ + LOG_D("[%d]switch to priority#%d " + "thread:%.*s(sp:0x%08x), " + "from thread:%.*s(sp: 0x%08x)", + rt_interrupt_nest, highest_ready_priority, + RT_NAME_MAX, to_thread->parent.name, to_thread->sp, + RT_NAME_MAX, from_thread->parent.name, from_thread->sp); - rt_sched_remove_thread(to_thread); - RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK); + RT_SCHEDULER_STACK_CHECK(to_thread); + + if (rt_interrupt_nest == 0) + { + extern void rt_thread_handle_sig(rt_bool_t clean_state); - /* switch to new thread */ - LOG_D("[%d]switch to priority#%d " - "thread:%.*s(sp:0x%08x), " - "from thread:%.*s(sp: 0x%08x)", - rt_interrupt_nest, highest_ready_priority, - RT_NAME_MAX, to_thread->parent.name, to_thread->sp, - RT_NAME_MAX, from_thread->parent.name, from_thread->sp); + RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread)); - RT_SCHEDULER_STACK_CHECK(to_thread); + rt_hw_context_switch((rt_uintptr_t)&from_thread->sp, + (rt_uintptr_t)&to_thread->sp); - if (rt_interrupt_nest == 0) + /* enable interrupt */ + rt_hw_interrupt_enable(level); + +#ifdef RT_USING_SIGNALS + /* check stat of thread for signal */ + level = rt_hw_interrupt_disable(); + if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING) { extern void rt_thread_handle_sig(rt_bool_t clean_state); - RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread)); + RT_SCHED_CTX(curr_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING; - rt_hw_context_switch((rt_uintptr_t)&from_thread->sp, - (rt_uintptr_t)&to_thread->sp); - - /* enable interrupt */ rt_hw_interrupt_enable(level); -#ifdef RT_USING_SIGNALS - /* check stat of thread for signal */ - level = rt_hw_interrupt_disable(); - if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING) - { - extern void rt_thread_handle_sig(rt_bool_t clean_state); - - RT_SCHED_CTX(curr_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING; - - rt_hw_interrupt_enable(level); - - /* check signal status */ - rt_thread_handle_sig(RT_TRUE); - } - else - { - rt_hw_interrupt_enable(level); - } -#endif /* RT_USING_SIGNALS */ - goto __exit; + /* check signal status */ + rt_thread_handle_sig(RT_TRUE); } else { - LOG_D("switch in interrupt"); - - rt_hw_context_switch_interrupt((rt_uintptr_t)&from_thread->sp, - (rt_uintptr_t)&to_thread->sp, from_thread, to_thread); + rt_hw_interrupt_enable(level); } +#endif /* RT_USING_SIGNALS */ + goto __exit; } else { - rt_sched_remove_thread(curr_thread); - RT_SCHED_CTX(curr_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(curr_thread).stat & ~RT_THREAD_STAT_MASK); + LOG_D("switch in interrupt"); + + rt_hw_context_switch_interrupt((rt_uintptr_t)&from_thread->sp, + (rt_uintptr_t)&to_thread->sp, from_thread, to_thread); } } } - /* enable interrupt */ rt_hw_interrupt_enable(level); - __exit: return; } @@ -330,9 +388,9 @@ void rt_schedule(void) void rt_sched_thread_startup(struct rt_thread *thread) { #if RT_THREAD_PRIORITY_MAX > 32 - RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */ + RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */ RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).number; - RT_SCHED_PRIV(thread).high_mask = 1L << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */ + RT_SCHED_PRIV(thread).high_mask = 1L << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */ #else RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).current_priority; #endif /* RT_THREAD_PRIORITY_MAX > 32 */ @@ -353,12 +411,12 @@ void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_ui /* don't add to scheduler queue as init thread */ RT_SCHED_PRIV(thread).number_mask = 0; #if RT_THREAD_PRIORITY_MAX > 32 - RT_SCHED_PRIV(thread).number = 0; + RT_SCHED_PRIV(thread).number = 0; RT_SCHED_PRIV(thread).high_mask = 0; #endif /* RT_THREAD_PRIORITY_MAX > 32 */ /* tick init */ - RT_SCHED_PRIV(thread).init_tick = tick; + RT_SCHED_PRIV(thread).init_tick = tick; RT_SCHED_PRIV(thread).remaining_tick = tick; } @@ -386,29 +444,9 @@ void rt_sched_insert_thread(struct rt_thread *thread) goto __exit; } - /* READY thread, insert to ready queue */ - RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK); - /* there is no time slices left(YIELD), inserting thread before ready list*/ - if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0) - { - rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]), - &RT_THREAD_LIST_NODE(thread)); - } - /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/ - else - { - rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]), - &RT_THREAD_LIST_NODE(thread)); - } - - LOG_D("insert thread[%.*s], the priority: %d", - RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(rt_current_thread).current_priority); + _rt_sched_insert_thread(thread); - /* set priority mask */ -#if RT_THREAD_PRIORITY_MAX > 32 - rt_thread_ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask; -#endif /* RT_THREAD_PRIORITY_MAX > 32 */ - rt_thread_ready_priority_group |= RT_SCHED_PRIV(thread).number_mask; + _scheduler_update_highest_priority(); __exit: /* enable interrupt */ @@ -431,24 +469,9 @@ void rt_sched_remove_thread(struct rt_thread *thread) /* disable interrupt */ level = rt_hw_interrupt_disable(); - LOG_D("remove thread[%.*s], the priority: %d", - RT_NAME_MAX, thread->parent.name, - RT_SCHED_PRIV(rt_current_thread).current_priority); + _rt_sched_remove_thread(thread); - /* remove thread from ready list */ - rt_list_remove(&RT_THREAD_LIST_NODE(thread)); - if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]))) - { -#if RT_THREAD_PRIORITY_MAX > 32 - rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask; - if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0) - { - rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask; - } -#else - rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask; -#endif /* RT_THREAD_PRIORITY_MAX > 32 */ - } + _scheduler_update_highest_priority(); /* enable interrupt */ rt_hw_interrupt_enable(level); @@ -468,16 +491,16 @@ void rt_exit_critical_safe(rt_base_t critical_level) { if (critical_level != rt_scheduler_lock_nest) { - int dummy = 1; + int dummy = 1; _critical_error_occurred = 1; - rt_kprintf("%s: un-compatible critical level\n" \ + rt_kprintf("%s: un-compatible critical level\n" "\tCurrent %d\n\tCaller %d\n", __func__, rt_scheduler_lock_nest, critical_level); rt_backtrace(); - while (dummy) ; + while (dummy); } } rt_hw_interrupt_enable(level); @@ -485,14 +508,14 @@ void rt_exit_critical_safe(rt_base_t critical_level) rt_exit_critical(); } -#else /* !RT_DEBUGING_CRITICAL */ +#else /* !RT_DEBUGING_CRITICAL */ void rt_exit_critical_safe(rt_base_t critical_level) { rt_exit_critical(); } -#endif/* RT_DEBUGING_CRITICAL */ +#endif /* RT_DEBUGING_CRITICAL */ RTM_EXPORT(rt_exit_critical_safe); /** @@ -505,12 +528,11 @@ rt_base_t rt_enter_critical(void) /* disable interrupt */ level = rt_hw_interrupt_disable(); - /* * the maximal number of nest is RT_UINT16_MAX, which is big * enough and does not check here */ - rt_scheduler_lock_nest ++; + rt_scheduler_lock_nest++; critical_level = rt_scheduler_lock_nest; /* enable interrupt */ @@ -530,7 +552,7 @@ void rt_exit_critical(void) /* disable interrupt */ level = rt_hw_interrupt_disable(); - rt_scheduler_lock_nest --; + rt_scheduler_lock_nest--; if (rt_scheduler_lock_nest <= 0) { rt_scheduler_lock_nest = 0; diff --git a/src/signal.c b/src/signal.c index b33e24220e4..69c79800a9e 100644 --- a/src/signal.c +++ b/src/signal.c @@ -118,8 +118,6 @@ static void _signal_deliver(rt_thread_t tid) rt_spin_unlock_irqrestore(&_thread_signal_lock, level); - /* re-schedule */ - rt_schedule(); } else { @@ -165,8 +163,6 @@ static void _signal_deliver(rt_thread_t tid) rt_spin_unlock_irqrestore(&_thread_signal_lock, level); LOG_D("signal stack pointer @ 0x%08x", tid->sp); - /* re-schedule */ - rt_schedule(); } else { @@ -377,9 +373,6 @@ int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout) } rt_spin_unlock_irqrestore(&_thread_signal_lock, level); - /* do thread scheduling */ - rt_schedule(); - level = rt_spin_lock_irqsave(&_thread_signal_lock); /* remove signal waiting flag */ diff --git a/src/thread.c b/src/thread.c index ed9dca13b61..d1ba2a9d311 100644 --- a/src/thread.c +++ b/src/thread.c @@ -133,8 +133,6 @@ static void _thread_exit(void) rt_exit_critical_safe(critical_level); - /* switch to next task */ - rt_schedule(); } /** @@ -647,9 +645,6 @@ static rt_err_t _thread_sleep(rt_tick_t tick) thread->error = -RT_EINTR; - /* notify a pending rescheduling */ - rt_schedule(); - /* exit critical and do a rescheduling */ rt_exit_critical_safe(critical_level); @@ -692,7 +687,6 @@ RTM_EXPORT(rt_thread_delay); rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick) { struct rt_thread *thread; - rt_tick_t cur_tick; rt_base_t critical_level; RT_ASSERT(tick != RT_NULL); @@ -708,13 +702,15 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick) /* disable interrupt */ critical_level = rt_enter_critical(); - cur_tick = rt_tick_get(); - if (cur_tick - *tick < inc_tick) + if (rt_tick_get_delta(*tick) < inc_tick) { rt_tick_t left_tick; + rt_tick_t target_tick; + target_tick = *tick + inc_tick; + left_tick = target_tick - rt_tick_get(); - *tick += inc_tick; - left_tick = *tick - cur_tick; + if (left_tick > target_tick) + left_tick = RT_TICK_MAX - left_tick + 1; /* suspend thread */ rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE); @@ -725,8 +721,6 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick) rt_exit_critical_safe(critical_level); - rt_schedule(); - /* clear error number of this thread to RT_EOK */ if (thread->error == -RT_ETIMEOUT) { @@ -735,7 +729,7 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick) } else { - *tick = cur_tick; + *tick = rt_tick_get(); rt_exit_critical_safe(critical_level); } diff --git a/src/timer.c b/src/timer.c index ae6da29f760..2a8dd54b04b 100644 --- a/src/timer.c +++ b/src/timer.c @@ -494,8 +494,6 @@ static void _timer_check(rt_list_t *timer_list, struct rt_spinlock *lock) level = rt_spin_lock_irqsave(lock); - current_tick = rt_tick_get(); - rt_list_init(&list); while (!rt_list_isempty(&timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1])) @@ -539,8 +537,7 @@ static void _timer_check(rt_list_t *timer_list, struct rt_spinlock *lock) continue; } rt_list_remove(&(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1])); - if ((t->parent.flag & RT_TIMER_FLAG_PERIODIC) && - (t->parent.flag & RT_TIMER_FLAG_ACTIVATED)) + if ((t->parent.flag & (RT_TIMER_FLAG_PERIODIC | RT_TIMER_FLAG_ACTIVATED)) == (RT_TIMER_FLAG_PERIODIC | RT_TIMER_FLAG_ACTIVATED)) { /* start it */ t->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED; @@ -747,7 +744,6 @@ RTM_EXPORT(rt_timer_control); */ void rt_timer_check(void) { - RT_ASSERT(rt_interrupt_get_nest() > 0); #ifdef RT_USING_SMP /* Running on core 0 only */ @@ -762,7 +758,7 @@ void rt_timer_check(void) rt_tick_t next_timeout; ret = _timer_list_next_timeout(_soft_timer_list, &next_timeout); - if ((ret == RT_EOK) && (next_timeout <= rt_tick_get())) + if ((ret == RT_EOK) && ((rt_tick_get() - next_timeout) < RT_TICK_MAX / 2)) { rt_sem_release(&_soft_timer_sem); }