diff options
author | Avi Kivity <avi@qumranet.com> | 2007-05-24 05:23:10 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-07-16 05:05:49 -0400 |
commit | db912f963909b3cbc3a059b7528f6a1a1eb6ffae (patch) | |
tree | 85d112b2879782fda2b2ca77413ac73361d0d3b0 /kernel/cpu.c | |
parent | e495606dd09d79f9fa496334ac3958f6ff179d82 (diff) |
HOTPLUG: Add CPU_DYING notifier
KVM wants a notification when a cpu is about to die, so it can disable
hardware extensions, but at a time when user processes cannot be scheduled
on the cpu, so it doesn't try to use virtualization extensions after they
have been disabled.
This adds a CPU_DYING notification. The notification is called in atomic
context on the doomed cpu.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 208cf3497c10..181ae7086029 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -103,11 +103,19 @@ static inline void check_for_tasks(int cpu) | |||
103 | write_unlock_irq(&tasklist_lock); | 103 | write_unlock_irq(&tasklist_lock); |
104 | } | 104 | } |
105 | 105 | ||
106 | struct take_cpu_down_param { | ||
107 | unsigned long mod; | ||
108 | void *hcpu; | ||
109 | }; | ||
110 | |||
106 | /* Take this CPU down. */ | 111 | /* Take this CPU down. */ |
107 | static int take_cpu_down(void *unused) | 112 | static int take_cpu_down(void *_param) |
108 | { | 113 | { |
114 | struct take_cpu_down_param *param = _param; | ||
109 | int err; | 115 | int err; |
110 | 116 | ||
117 | raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, | ||
118 | param->hcpu); | ||
111 | /* Ensure this CPU doesn't handle any more interrupts. */ | 119 | /* Ensure this CPU doesn't handle any more interrupts. */ |
112 | err = __cpu_disable(); | 120 | err = __cpu_disable(); |
113 | if (err < 0) | 121 | if (err < 0) |
@@ -127,6 +135,10 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) | |||
127 | cpumask_t old_allowed, tmp; | 135 | cpumask_t old_allowed, tmp; |
128 | void *hcpu = (void *)(long)cpu; | 136 | void *hcpu = (void *)(long)cpu; |
129 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | 137 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
138 | struct take_cpu_down_param tcd_param = { | ||
139 | .mod = mod, | ||
140 | .hcpu = hcpu, | ||
141 | }; | ||
130 | 142 | ||
131 | if (num_online_cpus() == 1) | 143 | if (num_online_cpus() == 1) |
132 | return -EBUSY; | 144 | return -EBUSY; |
@@ -153,7 +165,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) | |||
153 | set_cpus_allowed(current, tmp); | 165 | set_cpus_allowed(current, tmp); |
154 | 166 | ||
155 | mutex_lock(&cpu_bitmask_lock); | 167 | mutex_lock(&cpu_bitmask_lock); |
156 | p = __stop_machine_run(take_cpu_down, NULL, cpu); | 168 | p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); |
157 | mutex_unlock(&cpu_bitmask_lock); | 169 | mutex_unlock(&cpu_bitmask_lock); |
158 | 170 | ||
159 | if (IS_ERR(p) || cpu_online(cpu)) { | 171 | if (IS_ERR(p) || cpu_online(cpu)) { |