30
30
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to scheduler.c
31
31
* 2023-03-27 rose_man Split into scheduler upc and scheduler_mp.c
32
32
* 2023-10-17 ChuShicheng Modify the timing of clearing RT_THREAD_STAT_YIELD flag bits
33
+ * 2025-08-04 Pillar Add rt_scheduler_critical_switch_flag
33
34
*/
34
35
35
36
#define __RT_IPC_SOURCE__
@@ -51,6 +52,11 @@ extern volatile rt_atomic_t rt_interrupt_nest;
51
52
static rt_int16_t rt_scheduler_lock_nest ;
52
53
rt_uint8_t rt_current_priority ;
53
54
55
+ static rt_int8_t rt_scheduler_critical_switch_flag ;
56
+ #define IS_CRITICAL_SWITCH_PEND () (rt_scheduler_critical_switch_flag == 1)
57
+ #define SET_CRITICAL_SWITCH_FLAG () (rt_scheduler_critical_switch_flag = 1)
58
+ #define CLR_CRITICAL_SWITCH_FLAG () (rt_scheduler_critical_switch_flag = 0)
59
+
54
60
#if defined(RT_USING_HOOK ) && defined(RT_HOOK_USING_FUNC_PTR )
55
61
static void (* rt_scheduler_hook )(struct rt_thread * from , struct rt_thread * to );
56
62
static void (* rt_scheduler_switch_hook )(struct rt_thread * tid );
@@ -236,6 +242,9 @@ void rt_system_scheduler_start(void)
236
242
237
243
rt_cpu_self ()-> current_thread = to_thread ;
238
244
245
+ /* flush critical switch flag */
246
+ CLR_CRITICAL_SWITCH_FLAG ();
247
+
239
248
rt_sched_remove_thread (to_thread );
240
249
RT_SCHED_CTX (to_thread ).stat = RT_THREAD_RUNNING ;
241
250
@@ -387,6 +396,10 @@ void rt_schedule(void)
387
396
}
388
397
}
389
398
}
399
+ else
400
+ {
401
+ SET_CRITICAL_SWITCH_FLAG ();
402
+ }
390
403
391
404
/* enable interrupt */
392
405
rt_hw_interrupt_enable (level );
@@ -604,6 +617,7 @@ void rt_exit_critical_safe(rt_base_t critical_level)
604
617
605
618
/**
606
619
* @brief Safely exit critical section (non-debug version)
620
+ * If the scheduling function is called before exiting, it will be scheduled in this function.
607
621
*
608
622
* @param critical_level The expected critical level (unused in non-debug build)
609
623
*
@@ -657,6 +671,7 @@ RTM_EXPORT(rt_enter_critical);
657
671
658
672
/**
659
673
* @brief Exit critical section and unlock scheduler
674
+ * If the scheduling function is called before exiting, it will be scheduled in this function.
660
675
*
661
676
* @details This function:
662
677
* - Decrements the scheduler lock nesting count
@@ -685,9 +700,10 @@ void rt_exit_critical(void)
685
700
/* enable interrupt */
686
701
rt_hw_interrupt_enable (level );
687
702
688
- if (rt_current_thread )
703
+ if (IS_CRITICAL_SWITCH_PEND () )
689
704
{
690
- /* if scheduler is started, do a schedule */
705
+ CLR_CRITICAL_SWITCH_FLAG ();
706
+ /* if scheduler is started and needs to be scheduled, do a schedule */
691
707
rt_schedule ();
692
708
}
693
709
}
0 commit comments