Skip to content

Commit be0ea3f

Browse files
authored
Merge pull request #336 from laijs/lkl/idle_host_task
switch to idle_host_task when idle
2 parents 32b25c3 + a7d380d commit be0ea3f

File tree

8 files changed

+55
-109
lines changed

8 files changed

+55
-109
lines changed

arch/lkl/include/asm/cpu.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,7 @@ int lkl_cpu_try_run_irq(int irq);
77
int lkl_cpu_init(void);
88
void lkl_cpu_shutdown(void);
99
void lkl_cpu_wait_shutdown(void);
10-
void lkl_cpu_wakeup_idle(void);
1110
void lkl_cpu_change_owner(lkl_thread_t owner);
1211
void lkl_cpu_set_irqs_pending(void);
13-
void lkl_idle_tail_schedule(void);
14-
int lkl_cpu_idle_pending(void);
15-
extern void do_idle(void);
1612

1713
#endif /* _ASM_LKL_CPU_H */

arch/lkl/include/asm/sched.h

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,17 @@
22
#define _ASM_LKL_SCHED_H
33

44
#include <linux/sched.h>
5+
#include <uapi/asm/host_ops.h>
56

67
static inline void thread_sched_jb(void)
78
{
8-
set_ti_thread_flag(current_thread_info(), TIF_SCHED_JB);
9-
109
if (test_ti_thread_flag(current_thread_info(), TIF_HOST_THREAD)) {
10+
set_ti_thread_flag(current_thread_info(), TIF_SCHED_JB);
1111
set_current_state(TASK_UNINTERRUPTIBLE);
1212
lkl_ops->jmp_buf_set(&current_thread_info()->sched_jb,
1313
schedule);
14-
} else {
15-
lkl_ops->jmp_buf_set(&current_thread_info()->sched_jb,
16-
lkl_idle_tail_schedule);
14+
} else {
15+
lkl_bug("thread_sched_jb() can be used only for host task");
1716
}
1817
}
1918

arch/lkl/include/asm/syscalls.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
int syscalls_init(void);
55
void syscalls_cleanup(void);
66
long lkl_syscall(long no, long *params);
7+
void wakeup_idle_host_task(void);
78

89
#define sys_mmap sys_mmap_pgoff
910
#define sys_mmap2 sys_mmap_pgoff

arch/lkl/include/asm/thread_info.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,6 @@ void threads_cleanup(void);
5959
#define TIF_NOHZ 6
6060
#define TIF_SCHED_JB 7
6161
#define TIF_HOST_THREAD 8
62-
#define TIF_IDLE 9
6362

6463
#define __HAVE_THREAD_FUNCTIONS
6564

arch/lkl/kernel/cpu.c

Lines changed: 5 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
1-
#include <linux/cpu.h>
2-
#include <linux/cpuidle.h>
31
#include <linux/kernel.h>
42
#include <linux/sched.h>
5-
#include <linux/tick.h>
63
#include <asm/host_ops.h>
74
#include <asm/cpu.h>
85
#include <asm/thread_info.h>
@@ -52,12 +49,6 @@ struct lkl_cpu {
5249
lkl_thread_t owner;
5350
/* semaphore for threads waiting the CPU */
5451
struct lkl_sem *sem;
55-
/* semaphore for the idle thread */
56-
struct lkl_sem *idle_sem;
57-
/* if the idle thread is pending */
58-
bool idle_pending;
59-
/* jmp_buf used for idle thread to restart */
60-
struct lkl_jmp_buf idle_jb;
6152
/* semaphore used for shutdown */
6253
struct lkl_sem *shutdown_sem;
6354
} cpu;
@@ -134,7 +125,8 @@ void lkl_cpu_put(void)
134125
lkl_ops->mutex_lock(cpu.lock);
135126
}
136127

137-
if (need_resched() && cpu.count == 1) {
128+
if (test_ti_thread_flag(current_thread_info(), TIF_HOST_THREAD) &&
129+
!single_task_running() && cpu.count == 1) {
138130
if (in_interrupt())
139131
lkl_bug("%s: in interrupt\n", __func__);
140132
lkl_ops->mutex_unlock(cpu.lock);
@@ -191,8 +183,6 @@ static void lkl_cpu_cleanup(bool shutdown)
191183
lkl_ops->sem_up(cpu.shutdown_sem);
192184
else if (cpu.shutdown_sem)
193185
lkl_ops->sem_free(cpu.shutdown_sem);
194-
if (cpu.idle_sem)
195-
lkl_ops->sem_free(cpu.idle_sem);
196186
if (cpu.sem)
197187
lkl_ops->sem_free(cpu.sem);
198188
if (cpu.lock)
@@ -215,91 +205,20 @@ void arch_cpu_idle(void)
215205
/* enable irqs now to allow direct irqs to run */
216206
local_irq_enable();
217207

218-
if (need_resched())
219-
return;
220-
221-
cpu.idle_pending = true;
222-
lkl_cpu_put();
223-
224-
lkl_ops->sem_down(cpu.idle_sem);
225-
226-
cpu.idle_pending = false;
227-
/* to match that of schedule_preempt_disabled() */
228-
preempt_disable();
229-
lkl_ops->jmp_buf_longjmp(&cpu.idle_jb, 1);
230-
}
231-
232-
void arch_cpu_idle_prepare(void)
233-
{
234-
set_ti_thread_flag(current_thread_info(), TIF_IDLE);
235-
/*
236-
* We hijack the idle loop here so that we can let the idle thread
237-
* jump back to the beginning.
238-
*/
239-
while (1)
240-
lkl_ops->jmp_buf_set(&cpu.idle_jb, do_idle);
241-
}
242-
243-
void lkl_cpu_wakeup_idle(void)
244-
{
245-
lkl_ops->sem_up(cpu.idle_sem);
208+
/* switch to idle_host_task */
209+
wakeup_idle_host_task();
246210
}
247211

248212
int lkl_cpu_init(void)
249213
{
250214
cpu.lock = lkl_ops->mutex_alloc(0);
251215
cpu.sem = lkl_ops->sem_alloc(0);
252-
cpu.idle_sem = lkl_ops->sem_alloc(0);
253216
cpu.shutdown_sem = lkl_ops->sem_alloc(0);
254217

255-
if (!cpu.lock || !cpu.sem || !cpu.idle_sem || !cpu.shutdown_sem) {
218+
if (!cpu.lock || !cpu.sem || !cpu.shutdown_sem) {
256219
lkl_cpu_cleanup(false);
257220
return -ENOMEM;
258221
}
259222

260223
return 0;
261224
}
262-
263-
/*
264-
* Simulate the exit path of idle loop so that we can schedule when LKL is
265-
* in idle.
266-
* It's just a duplication of those in idle.c so a better way is to refactor
267-
* idle.c to expose such function.
268-
*/
269-
void lkl_idle_tail_schedule(void)
270-
{
271-
272-
if (!cpu.idle_pending ||
273-
!test_bit(TIF_IDLE, &current_thread_info()->flags))
274-
lkl_bug("%s: not in idle\n", __func__);
275-
276-
start_critical_timings();
277-
__current_set_polling();
278-
279-
if (WARN_ON_ONCE(irqs_disabled()))
280-
local_irq_enable();
281-
282-
rcu_idle_exit();
283-
arch_cpu_idle_exit();
284-
preempt_set_need_resched();
285-
tick_nohz_idle_exit();
286-
__current_clr_polling();
287-
288-
/*
289-
* memory barrier copied from idle.c
290-
*/
291-
smp_mb__after_atomic();
292-
293-
/*
294-
* Didn't find a way to include kernel/sched/sched.h for
295-
* sched_ttwu_pending().
296-
* Anyway, it's no op when not CONFIG_SMP.
297-
*/
298-
299-
schedule_preempt_disabled();
300-
}
301-
302-
int lkl_cpu_idle_pending(void)
303-
{
304-
return cpu.idle_pending;
305-
}

arch/lkl/kernel/syscalls.c

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,34 @@ long lkl_syscall(long no, long *params)
130130
return ret;
131131
}
132132

133+
static struct task_struct *idle_host_task;
134+
135+
/* called from idle, don't failed, don't block */
136+
void wakeup_idle_host_task(void)
137+
{
138+
if (!need_resched() && idle_host_task)
139+
wake_up_process(idle_host_task);
140+
}
141+
142+
static int idle_host_task_loop(void *unused)
143+
{
144+
struct thread_info *ti = task_thread_info(current);
145+
146+
snprintf(current->comm, sizeof(current->comm), "idle_host_task");
147+
set_thread_flag(TIF_HOST_THREAD);
148+
idle_host_task = current;
149+
150+
for (;;) {
151+
lkl_cpu_put();
152+
lkl_ops->sem_down(ti->sched_sem);
153+
if (idle_host_task == NULL) {
154+
lkl_ops->thread_exit();
155+
return 0;
156+
}
157+
schedule_tail(ti->prev_sched);
158+
}
159+
}
160+
133161
int syscalls_init(void)
134162
{
135163
snprintf(current->comm, sizeof(current->comm), "host0");
@@ -142,11 +170,25 @@ int syscalls_init(void)
142170
return -1;
143171
}
144172

173+
if (kernel_thread(idle_host_task_loop, NULL, CLONE_FLAGS) < 0) {
174+
if (lkl_ops->tls_free)
175+
lkl_ops->tls_free(task_key);
176+
return -1;
177+
}
178+
145179
return 0;
146180
}
147181

148182
void syscalls_cleanup(void)
149183
{
184+
if (idle_host_task) {
185+
struct thread_info *ti = task_thread_info(idle_host_task);
186+
187+
idle_host_task = NULL;
188+
lkl_ops->sem_up(ti->sched_sem);
189+
lkl_ops->thread_join(ti->tid);
190+
}
191+
150192
if (lkl_ops->tls_free)
151193
lkl_ops->tls_free(task_key);
152194
}

arch/lkl/kernel/threads.c

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -89,31 +89,22 @@ struct task_struct *__switch_to(struct task_struct *prev,
8989
struct thread_info *_prev = task_thread_info(prev);
9090
struct thread_info *_next = task_thread_info(next);
9191
unsigned long _prev_flags = _prev->flags;
92-
bool wakeup_idle = test_bit(TIF_IDLE, &_next->flags) &&
93-
lkl_cpu_idle_pending();
9492
struct lkl_jmp_buf _prev_jb;
9593

9694
_current_thread_info = task_thread_info(next);
9795
_next->prev_sched = prev;
9896
abs_prev = prev;
9997

10098
BUG_ON(!_next->tid);
99+
lkl_cpu_change_owner(_next->tid);
101100

102101
if (test_bit(TIF_SCHED_JB, &_prev_flags)) {
103102
/* Atomic. Must be done before wakeup next */
104103
clear_ti_thread_flag(_prev, TIF_SCHED_JB);
105104
_prev_jb = _prev->sched_jb;
106105
}
107-
if (wakeup_idle)
108-
schedule_tail(abs_prev);
109-
lkl_cpu_change_owner(_next->tid);
110-
111-
/* No kernel code is allowed after wakeup next */
112-
if (wakeup_idle)
113-
lkl_cpu_wakeup_idle();
114-
else
115-
lkl_ops->sem_up(_next->sched_sem);
116106

107+
lkl_ops->sem_up(_next->sched_sem);
117108
if (test_bit(TIF_SCHED_JB, &_prev_flags)) {
118109
lkl_ops->jmp_buf_longjmp(&_prev_jb, 1);
119110
} else {

kernel/sched/idle.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ static void cpuidle_idle_call(void)
205205
*
206206
* Called with polling cleared.
207207
*/
208-
void do_idle(void)
208+
static void do_idle(void)
209209
{
210210
/*
211211
* If the arch has a polling bit, we maintain an invariant:
@@ -265,7 +265,6 @@ void do_idle(void)
265265
sched_ttwu_pending();
266266
schedule_preempt_disabled();
267267
}
268-
EXPORT_SYMBOL(do_idle);
269268

270269
bool cpu_in_idle(unsigned long pc)
271270
{

0 commit comments

Comments
 (0)