|
34 | 34 | .GLB __tx_thread_execute_ptr
|
35 | 35 | .GLB __tx_thread_current_ptr
|
36 | 36 | .GLB __tx_timer_time_slice
|
| 37 | +.IF TX_LOW_POWER==1 |
| 38 | + .GLB _tx_low_power_enter |
| 39 | + .GLB _tx_low_power_exit |
| 40 | + .GLB __tx_thread_preempt_disable |
| 41 | +.ENDIF |
37 | 42 | ;
|
38 | 43 | .SECTION P,CODE
|
39 | 44 |
|
|
42 | 47 | ;/* FUNCTION RELEASE */
|
43 | 48 | ;/* */
|
44 | 49 | ;/* _tx_thread_schedule RXv1/CCRX */
|
45 |
| -;/* 6.1.10 */ |
| 50 | +;/* 6.1.11 */ |
46 | 51 | ;/* AUTHOR */
|
47 | 52 | ;/* */
|
48 | 53 | ;/* William E. Lamie, Microsoft Corporation */
|
|
82 | 87 | ;/* resulting in version 6.1.9 */
|
83 | 88 | ;/* 01-31-2022 William E. Lamie Modified comment(s), */
|
84 | 89 | ;/* resulting in version 6.1.10 */
|
| 90 | +;/* 04-25-2022 William E. Lamie Modified comment(s), and */ |
| 91 | +;/* added low power support, */ |
| 92 | +;/* resulting in version 6.1.11 */ |
85 | 93 | ;/* */
|
86 | 94 | ;/**************************************************************************/
|
87 | 95 | ;VOID _tx_thread_schedule(VOID)
|
88 | 96 | ;{
|
89 | 97 | .GLB __tx_thread_schedule
|
90 | 98 | __tx_thread_schedule:
|
91 | 99 | ;
|
92 |
| -; /* Enable interrupts. */ |
93 |
| -; |
94 |
| - SETPSW I |
95 | 100 | ;
|
96 | 101 | ; /* Wait for a thread to execute. */
|
97 | 102 | ; do
|
98 | 103 | ; {
|
99 | 104 | MOV.L #__tx_thread_execute_ptr, R1 ; Address of thread to executer ptr
|
100 | 105 | __tx_thread_schedule_loop:
|
| 106 | + SETPSW I ; Enable interrupts |
| 107 | + CLRPSW I ; Disable interrupts |
101 | 108 | MOV.L [R1],R2 ; Pickup next thread to execute
|
102 | 109 | CMP #0,R2 ; Is it NULL?
|
103 |
| - BEQ __tx_thread_schedule_loop ; Yes, idle system, keep checking |
| 110 | + BNE __tx_thread_thread_ready ; Not NULL, schedule the thread |
| 111 | + ; Idle system - no thread is ready |
| 112 | +.IF TX_LOW_POWER==1 |
| 113 | + MOV.L #__tx_thread_preempt_disable, R1 ; Load prempt disable flag. |
| 114 | + MOV.L [R1], R2 |
| 115 | + ADD #1, R2 ; Disable preemption while enter/exit |
| 116 | + MOV.L R2, [R1] |
| 117 | + BSR _tx_low_power_enter ; Possibly enter low power mode |
| 118 | +.ENDIF |
| 119 | + |
| 120 | +.IF TX_ENABLE_WAIT==1 |
| 121 | + WAIT ; Wait for interrupt |
| 122 | +.ENDIF |
| 123 | + |
| 124 | +.IF TX_LOW_POWER==1 |
| 125 | + CLRPSW I ; Disable interrupts (because WAIT enables interrupts) |
| 126 | + BSR _tx_low_power_exit ; Possibly exit low power mode |
| 127 | + MOV.L #__tx_thread_preempt_disable, R1 ; Load prempt disable flag. |
| 128 | + MOV.L [R1], R2 |
| 129 | + SUB #1, R2 ; Enable preemption |
| 130 | + MOV.L R2, [R1] |
| 131 | + MOV.L #__tx_thread_execute_ptr, R1 ; Address of thread to executer ptr |
| 132 | +.ENDIF |
| 133 | + |
| 134 | + BRA __tx_thread_schedule_loop ; Idle system, keep checking |
| 135 | + |
| 136 | +__tx_thread_thread_ready: |
104 | 137 | ;
|
105 | 138 | ; }
|
106 | 139 | ; while(_tx_thread_execute_ptr == TX_NULL);
|
107 | 140 | ;
|
108 |
| -; /* Yes! We have a thread to execute. Lockout interrupts and |
109 |
| -; transfer control to it. */ |
110 |
| -; |
111 |
| - CLRPSW I ; Disable interrupts |
| 141 | +; /* Yes! We have a thread to execute. Note that interrupts are locked out at this point. */ |
112 | 142 | ;
|
113 | 143 | ; /* Setup the current thread pointer. */
|
114 | 144 | ; _tx_thread_current_ptr = _tx_thread_execute_ptr;
|
|
0 commit comments