diff options
| author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2025-11-27 15:47:23 +0100 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2025-12-10 15:49:11 +0900 |
| commit | c94291914b200e10c72cef23c8e4c67eb4fdbcd9 (patch) | |
| tree | c5b084ea2821f2be4607cab4e5b83d4bd182dd6d | |
| parent | cb015814f8b6eebcbb8e46e111d108892c5e6821 (diff) | |
| download | tip-smp/urgent.tar.gz | |
cpu: Make atomic hotplug callbacks run with interrupts disabled on UPsmp/urgent
On SMP systems the CPU hotplug callbacks in the "starting" range are
invoked while the CPU is brought up and interrupts are still
disabled. Callbacks which are added later are invoked via the
hotplug-thread on the target CPU and interrupts are explicitly disabled.
In the UP case callbacks which are added later are invoked directly without
the thread indirection. This is in principle okay since there is just one
CPU but those callbacks are invoked with interrupt disabled code. That's
incorrect as those callbacks assume interrupt disabled context.
Disable interrupts before invoking the callbacks on UP if the state is
atomic and interrupts are expected to be disabled. The "save" part is
required because this is also invoked early in the boot process while
interrupts are disabled and must not be enabled prematurely.
Fixes: 06ddd17521bf1 ("sched/smp: Always define is_percpu_thread() and scheduler_ipi()")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://patch.msgid.link/20251127144723.ev9DuXXR@linutronix.de
| -rw-r--r-- | kernel/cpu.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index b674fdf96208be..8df2d773fe3b52 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -249,6 +249,14 @@ err: return ret; } +/* + * The former STARTING/DYING states, ran with IRQs disabled and must not fail. + */ +static bool cpuhp_is_atomic_state(enum cpuhp_state state) +{ + return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; +} + #ifdef CONFIG_SMP static bool cpuhp_is_ap_state(enum cpuhp_state state) { @@ -271,14 +279,6 @@ static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup) complete(done); } -/* - * The former STARTING/DYING states, ran with IRQs disabled and must not fail. - */ -static bool cpuhp_is_atomic_state(enum cpuhp_state state) -{ - return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; -} - /* Synchronization state management */ enum cpuhp_sync_state { SYNC_STATE_DEAD, @@ -2364,7 +2364,14 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, else ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); #else - ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); + if (cpuhp_is_atomic_state(state)) { + guard(irqsave)(); + ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); + /* STARTING/DYING must not fail! */ + WARN_ON_ONCE(ret); + } else { + ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); + } #endif BUG_ON(ret && !bringup); return ret; |
