Run idle threads with preempt disabled.
Also corrected a bugs in arm26's cpu_idle (make it actually call schedule()).
How did it ever work before?
Might fix the CPU hotplugging hang which Nigel Cunningham noted.
We think the bug hits if the idle thread is preempted after checking
need_resched() and before going to sleep, then the CPU offlined.
After calling stop_machine_run, the CPU eventually returns from preemption and
into the idle thread and goes to sleep. The CPU will continue executing
previous idle and have no chance to call play_dead.
By disabling preemption until we are ready to explicitly schedule, this bug is
fixed and the idle threads generally become more robust.
From: alexs <ashepard@u.washington.edu>
PPC build fix
From: Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
MIPS build fix
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
if (!idle)
idle = default_idle;
- preempt_disable();
leds_event(led_idle_start);
while (!need_resched())
idle();
leds_event(led_idle_end);
- preempt_enable();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
asmlinkage void __cpuinit secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ cpu = smp_processor_id();
printk("CPU%u: Booted secondary processor\n", cpu);
local_flush_tlb_all();
cpu_init();
+ preempt_disable();
/*
* Give the platform a chance to do its own initialisation.
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
- preempt_disable();
while (1) {
- while (!need_resched()) {
- local_irq_disable();
- if (!need_resched() && !hlt_counter)
- local_irq_enable();
- }
+ while (!need_resched())
+ cpu_relax();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
}
- schedule();
}
static char reboot_mode = 'h';
REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask);
unmask_irq(IPI_INTR_VECT);
unmask_irq(TIMER_INTR_VECT);
+ preempt_disable();
local_irq_enable();
cpu_set(cpu, cpu_online_map);
idle = default_idle;
idle();
}
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
*/
void cpu_idle(void)
{
+ int cpu = smp_processor_id();
+
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched()) {
- irq_stat[smp_processor_id()].idle_timestamp = jiffies;
+ irq_stat[cpu].idle_timestamp = jiffies;
if (!frv_dma_inprogress && idle)
idle();
}
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
void default_idle(void)
{
- while(1) {
- if (!need_resched()) {
- local_irq_enable();
- __asm__("sleep");
- local_irq_disable();
- }
- schedule();
- }
+ local_irq_disable();
+ if (!need_resched()) {
+ local_irq_enable();
+ /* XXX: race here! What if need_resched() gets set now? */
+ __asm__("sleep");
+ } else
+ local_irq_enable();
}
#else
void default_idle(void)
{
- while(1) {
- if (need_resched())
- schedule();
- }
+ cpu_relax();
}
#endif
void (*idle)(void) = default_idle;
*/
void cpu_idle(void)
{
- idle();
+ while (1) {
+ while (!need_resched())
+ idle();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
}
void machine_restart(char * __unused)
*/
void cpu_idle(void)
{
- int cpu = raw_smp_processor_id();
+ int cpu = smp_processor_id();
/* endless idle loop with no priority at all */
while (1) {
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
idle();
}
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
* things done here to the most necessary things.
*/
cpu_init();
+ preempt_disable();
smp_callin();
while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
rep_nop();
#ifdef CONFIG_SMP
normal_xtp();
#endif
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
check_pgt_cache();
if (cpu_is_offline(smp_processor_id()))
play_dead();
Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
efi_map_pal_code();
cpu_init();
+ preempt_disable();
smp_callin();
cpu_idle();
idle();
}
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
int __init start_secondary(void *unused)
{
cpu_init();
+ preempt_disable();
smp_callin();
while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
cpu_relax();
while (1) {
while (!need_resched())
idle();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
while (!need_resched())
if (cpu_wait)
(*cpu_wait)();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
*/
asmlinkage void start_secondary(void)
{
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
cpu_probe();
cpu_report();
*/
calibrate_delay();
+ preempt_disable();
+ cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
prom_smp_finish();
while (1) {
while (!need_resched())
barrier();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
check_pgt_cache();
}
}
#endif
smp_cpu_init(slave_id);
+ preempt_disable();
#if 0 /* NOT WORKING YET - see entry.S */
istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
if (hvlpevent_is_pending())
process_iSeries_events();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
}
ppc64_runlatch_on();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
lpaca->lppaca.idle = 0;
ppc64_runlatch_on();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
cpu_die();
lpaca->lppaca.idle = 0;
ppc64_runlatch_on();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
cpu_die();
}
#endif
}
- if (need_resched())
- schedule();
- if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
- cpu_die();
}
/*
*/
void cpu_idle(void)
{
- for (;;)
+ int cpu = smp_processor_id();
+
+ for (;;) {
if (ppc_md.idle != NULL)
ppc_md.idle();
else
default_idle();
+ if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
+ cpu_die();
+ if (need_resched()) {
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+
+ }
}
#if defined(CONFIG_SYSCTL) && defined(CONFIG_6xx)
cpu = smp_processor_id();
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
+ preempt_disable();
cpu_callin_map[cpu] = 1;
printk("CPU %d done callin...\n", cpu);
}
ppc64_runlatch_on();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
cpu_die();
}
if (need_resched()) {
ppc64_runlatch_on();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
if (cpu_is_offline(smp_processor_id()) &&
local_irq_disable();
if (need_resched()) {
local_irq_enable();
- schedule();
return;
}
void cpu_idle(void)
{
- for (;;)
- default_idle();
+ for (;;) {
+ while (!need_resched())
+ default_idle();
+
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
}
void show_regs(struct pt_regs *regs)
{
/* Setup the cpu */
cpu_init();
+ preempt_disable();
/* init per CPU timer */
init_cpu_timer();
#ifdef CONFIG_VIRT_TIMER
cpu_sleep();
}
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
int start_secondary(void *unused)
{
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ cpu = smp_processor_id();
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
smp_store_cpu_info(cpu);
__smp_slave_init(cpu);
+ preempt_disable();
per_cpu_trap_init();
atomic_inc(&cpus_booted);
}
local_irq_enable();
}
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
(*pm_idle)();
}
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
check_pgt_cache();
}
}
/* endless idle loop with no priority at all */
while(1) {
if(need_resched()) {
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
check_pgt_cache();
}
barrier(); /* or else gcc optimizes... */
while (!need_resched())
barrier();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
check_pgt_cache();
}
}
if (need_resched()) {
unidle_me();
clear_thread_flag(TIF_POLLING_NRFLAG);
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
set_thread_flag(TIF_POLLING_NRFLAG);
check_pgt_cache();
}
rmb();
cpu_set(cpuid, cpu_online_map);
+
+ /* idle thread is expected to have preempt disabled */
+ preempt_disable();
}
void cpu_panic(void)
/* The idle loop. */
void default_idle (void)
{
- while (1) {
- while (! need_resched ())
- asm ("halt; nop; nop; nop; nop; nop" ::: "cc");
- schedule ();
- }
+ while (! need_resched ())
+ asm ("halt; nop; nop; nop; nop; nop" ::: "cc");
}
void (*idle)(void) = default_idle;
void cpu_idle (void)
{
/* endless idle loop with no priority at all */
- (*idle) ();
+ while (1) {
+ while (!need_resched())
+ (*idle) ();
+
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
}
/*
idle();
}
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
* things done here to the most necessary things.
*/
cpu_init();
+ preempt_disable();
smp_callin();
/* otherwise gcc will move up the smp_processor_id before the cpu_init */
while (1) {
while (!need_resched())
platform_idle();
- preempt_enable();
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
}
}
kernel_thread(init, NULL, CLONE_FS | CLONE_SIGHAND);
numa_default_policy();
unlock_kernel();
- preempt_enable_no_resched();
/*
* The boot idle thread must execute schedule()
* at least one to get things moving:
*/
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
+ /* Call into cpu_idle with preempt disabled */
cpu_idle();
}