diff options
Diffstat (limited to 'kernel/stop_machine.c')
| -rw-r--r-- | kernel/stop_machine.c | 286 |
1 files changed, 130 insertions, 156 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index ba9b2054ecbd..af3c7cea258b 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. | 1 | /* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. |
| 2 | * GPL v2 and any later version. | 2 | * GPL v2 and any later version. |
| 3 | */ | 3 | */ |
| 4 | #include <linux/cpu.h> | 4 | #include <linux/cpu.h> |
| @@ -13,203 +13,177 @@ | |||
| 13 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
| 14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
| 15 | 15 | ||
| 16 | /* Since we effect priority and affinity (both of which are visible | 16 | /* This controls the threads on each CPU. */ |
| 17 | * to, and settable by outside processes) we do indirection via a | ||
| 18 | * kthread. */ | ||
| 19 | |||
| 20 | /* Thread to stop each CPU in user context. */ | ||
| 21 | enum stopmachine_state { | 17 | enum stopmachine_state { |
| 22 | STOPMACHINE_WAIT, | 18 | /* Dummy starting state for thread. */ |
| 19 | STOPMACHINE_NONE, | ||
| 20 | /* Awaiting everyone to be scheduled. */ | ||
| 23 | STOPMACHINE_PREPARE, | 21 | STOPMACHINE_PREPARE, |
| 22 | /* Disable interrupts. */ | ||
| 24 | STOPMACHINE_DISABLE_IRQ, | 23 | STOPMACHINE_DISABLE_IRQ, |
| 24 | /* Run the function */ | ||
| 25 | STOPMACHINE_RUN, | ||
| 26 | /* Exit */ | ||
| 25 | STOPMACHINE_EXIT, | 27 | STOPMACHINE_EXIT, |
| 26 | }; | 28 | }; |
| 29 | static enum stopmachine_state state; | ||
| 27 | 30 | ||
| 28 | static enum stopmachine_state stopmachine_state; | 31 | struct stop_machine_data { |
| 29 | static unsigned int stopmachine_num_threads; | 32 | int (*fn)(void *); |
| 30 | static atomic_t stopmachine_thread_ack; | 33 | void *data; |
| 31 | 34 | int fnret; | |
| 32 | static int stopmachine(void *cpu) | 35 | }; |
| 33 | { | ||
| 34 | int irqs_disabled = 0; | ||
| 35 | int prepared = 0; | ||
| 36 | |||
| 37 | set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); | ||
| 38 | |||
| 39 | /* Ack: we are alive */ | ||
| 40 | smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ | ||
| 41 | atomic_inc(&stopmachine_thread_ack); | ||
| 42 | |||
| 43 | /* Simple state machine */ | ||
| 44 | while (stopmachine_state != STOPMACHINE_EXIT) { | ||
| 45 | if (stopmachine_state == STOPMACHINE_DISABLE_IRQ | ||
| 46 | && !irqs_disabled) { | ||
| 47 | local_irq_disable(); | ||
| 48 | hard_irq_disable(); | ||
| 49 | irqs_disabled = 1; | ||
| 50 | /* Ack: irqs disabled. */ | ||
| 51 | smp_mb(); /* Must read state first. */ | ||
| 52 | atomic_inc(&stopmachine_thread_ack); | ||
| 53 | } else if (stopmachine_state == STOPMACHINE_PREPARE | ||
| 54 | && !prepared) { | ||
| 55 | /* Everyone is in place, hold CPU. */ | ||
| 56 | preempt_disable(); | ||
| 57 | prepared = 1; | ||
| 58 | smp_mb(); /* Must read state first. */ | ||
| 59 | atomic_inc(&stopmachine_thread_ack); | ||
| 60 | } | ||
| 61 | /* Yield in first stage: migration threads need to | ||
| 62 | * help our sisters onto their CPUs. */ | ||
| 63 | if (!prepared && !irqs_disabled) | ||
| 64 | yield(); | ||
| 65 | cpu_relax(); | ||
| 66 | } | ||
| 67 | |||
| 68 | /* Ack: we are exiting. */ | ||
| 69 | smp_mb(); /* Must read state first. */ | ||
| 70 | atomic_inc(&stopmachine_thread_ack); | ||
| 71 | |||
| 72 | if (irqs_disabled) | ||
| 73 | local_irq_enable(); | ||
| 74 | if (prepared) | ||
| 75 | preempt_enable(); | ||
| 76 | 36 | ||
| 77 | return 0; | 37 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ |
| 78 | } | 38 | static unsigned int num_threads; |
| 39 | static atomic_t thread_ack; | ||
| 40 | static struct completion finished; | ||
| 41 | static DEFINE_MUTEX(lock); | ||
| 79 | 42 | ||
| 80 | /* Change the thread state */ | 43 | static void set_state(enum stopmachine_state newstate) |
| 81 | static void stopmachine_set_state(enum stopmachine_state state) | ||
| 82 | { | 44 | { |
| 83 | atomic_set(&stopmachine_thread_ack, 0); | 45 | /* Reset ack counter. */ |
| 46 | atomic_set(&thread_ack, num_threads); | ||
| 84 | smp_wmb(); | 47 | smp_wmb(); |
| 85 | stopmachine_state = state; | 48 | state = newstate; |
| 86 | while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) | ||
| 87 | cpu_relax(); | ||
| 88 | } | 49 | } |
| 89 | 50 | ||
| 90 | static int stop_machine(void) | 51 | /* Last one to ack a state moves to the next state. */ |
| 52 | static void ack_state(void) | ||
| 91 | { | 53 | { |
| 92 | int i, ret = 0; | 54 | if (atomic_dec_and_test(&thread_ack)) { |
| 93 | 55 | /* If we're the last one to ack the EXIT, we're finished. */ | |
| 94 | atomic_set(&stopmachine_thread_ack, 0); | 56 | if (state == STOPMACHINE_EXIT) |
| 95 | stopmachine_num_threads = 0; | 57 | complete(&finished); |
| 96 | stopmachine_state = STOPMACHINE_WAIT; | 58 | else |
| 97 | 59 | set_state(state + 1); | |
| 98 | for_each_online_cpu(i) { | ||
| 99 | if (i == raw_smp_processor_id()) | ||
| 100 | continue; | ||
| 101 | ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); | ||
| 102 | if (ret < 0) | ||
| 103 | break; | ||
| 104 | stopmachine_num_threads++; | ||
| 105 | } | ||
| 106 | |||
| 107 | /* Wait for them all to come to life. */ | ||
| 108 | while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) { | ||
| 109 | yield(); | ||
| 110 | cpu_relax(); | ||
| 111 | } | 60 | } |
| 61 | } | ||
| 112 | 62 | ||
| 113 | /* If some failed, kill them all. */ | 63 | /* This is the actual thread which stops the CPU. It exits by itself rather |
| 114 | if (ret < 0) { | 64 | * than waiting for kthread_stop(), because it's easier for hotplug CPU. */ |
| 115 | stopmachine_set_state(STOPMACHINE_EXIT); | 65 | static int stop_cpu(struct stop_machine_data *smdata) |
| 116 | return ret; | 66 | { |
| 117 | } | 67 | enum stopmachine_state curstate = STOPMACHINE_NONE; |
| 118 | 68 | ||
| 119 | /* Now they are all started, make them hold the CPUs, ready. */ | 69 | /* Simple state machine */ |
| 120 | preempt_disable(); | 70 | do { |
| 121 | stopmachine_set_state(STOPMACHINE_PREPARE); | 71 | /* Chill out and ensure we re-read stopmachine_state. */ |
| 72 | cpu_relax(); | ||
| 73 | if (state != curstate) { | ||
| 74 | curstate = state; | ||
| 75 | switch (curstate) { | ||
| 76 | case STOPMACHINE_DISABLE_IRQ: | ||
| 77 | local_irq_disable(); | ||
| 78 | hard_irq_disable(); | ||
| 79 | break; | ||
| 80 | case STOPMACHINE_RUN: | ||
| 81 | /* |= allows error detection if functions on | ||
| 82 | * multiple CPUs. */ | ||
| 83 | smdata->fnret |= smdata->fn(smdata->data); | ||
| 84 | break; | ||
| 85 | default: | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | ack_state(); | ||
| 89 | } | ||
| 90 | } while (curstate != STOPMACHINE_EXIT); | ||
| 122 | 91 | ||
| 123 | /* Make them disable irqs. */ | 92 | local_irq_enable(); |
| 124 | local_irq_disable(); | 93 | do_exit(0); |
| 125 | hard_irq_disable(); | 94 | } |
| 126 | stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); | ||
| 127 | 95 | ||
| 96 | /* Callback for CPUs which aren't supposed to do anything. */ | ||
| 97 | static int chill(void *unused) | ||
| 98 | { | ||
| 128 | return 0; | 99 | return 0; |
| 129 | } | 100 | } |
| 130 | 101 | ||
| 131 | static void restart_machine(void) | 102 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
| 132 | { | 103 | { |
| 133 | stopmachine_set_state(STOPMACHINE_EXIT); | 104 | int i, err; |
| 134 | local_irq_enable(); | 105 | struct stop_machine_data active, idle; |
| 135 | preempt_enable_no_resched(); | 106 | struct task_struct **threads; |
| 136 | } | 107 | |
| 108 | active.fn = fn; | ||
| 109 | active.data = data; | ||
| 110 | active.fnret = 0; | ||
| 111 | idle.fn = chill; | ||
| 112 | idle.data = NULL; | ||
| 113 | |||
| 114 | /* This could be too big for stack on large machines. */ | ||
| 115 | threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL); | ||
| 116 | if (!threads) | ||
| 117 | return -ENOMEM; | ||
| 118 | |||
| 119 | /* Set up initial state. */ | ||
| 120 | mutex_lock(&lock); | ||
| 121 | init_completion(&finished); | ||
| 122 | num_threads = num_online_cpus(); | ||
| 123 | set_state(STOPMACHINE_PREPARE); | ||
| 137 | 124 | ||
| 138 | struct stop_machine_data { | 125 | for_each_online_cpu(i) { |
| 139 | int (*fn)(void *); | 126 | struct stop_machine_data *smdata = &idle; |
| 140 | void *data; | 127 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
| 141 | struct completion done; | ||
| 142 | }; | ||
| 143 | 128 | ||
| 144 | static int do_stop(void *_smdata) | 129 | if (!cpus) { |
| 145 | { | 130 | if (i == first_cpu(cpu_online_map)) |
| 146 | struct stop_machine_data *smdata = _smdata; | 131 | smdata = &active; |
| 147 | int ret; | 132 | } else { |
| 133 | if (cpu_isset(i, *cpus)) | ||
| 134 | smdata = &active; | ||
| 135 | } | ||
| 148 | 136 | ||
| 149 | ret = stop_machine(); | 137 | threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u", |
| 150 | if (ret == 0) { | 138 | i); |
| 151 | ret = smdata->fn(smdata->data); | 139 | if (IS_ERR(threads[i])) { |
| 152 | restart_machine(); | 140 | err = PTR_ERR(threads[i]); |
| 153 | } | 141 | threads[i] = NULL; |
| 142 | goto kill_threads; | ||
| 143 | } | ||
| 154 | 144 | ||
| 155 | /* We're done: you can kthread_stop us now */ | 145 | /* Place it onto correct cpu. */ |
| 156 | complete(&smdata->done); | 146 | kthread_bind(threads[i], i); |
| 157 | 147 | ||
| 158 | /* Wait for kthread_stop */ | 148 | /* Make it highest prio. */ |
| 159 | set_current_state(TASK_INTERRUPTIBLE); | 149 | if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, ¶m)) |
| 160 | while (!kthread_should_stop()) { | 150 | BUG(); |
| 161 | schedule(); | ||
| 162 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 163 | } | 151 | } |
| 164 | __set_current_state(TASK_RUNNING); | ||
| 165 | return ret; | ||
| 166 | } | ||
| 167 | 152 | ||
| 168 | struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, | 153 | /* We've created all the threads. Wake them all: hold this CPU so one |
| 169 | unsigned int cpu) | 154 | * doesn't hit this CPU until we're ready. */ |
| 170 | { | 155 | get_cpu(); |
| 171 | static DEFINE_MUTEX(stopmachine_mutex); | 156 | for_each_online_cpu(i) |
| 172 | struct stop_machine_data smdata; | 157 | wake_up_process(threads[i]); |
| 173 | struct task_struct *p; | ||
| 174 | 158 | ||
| 175 | smdata.fn = fn; | 159 | /* This will release the thread on our CPU. */ |
| 176 | smdata.data = data; | 160 | put_cpu(); |
| 177 | init_completion(&smdata.done); | 161 | wait_for_completion(&finished); |
| 162 | mutex_unlock(&lock); | ||
| 178 | 163 | ||
| 179 | mutex_lock(&stopmachine_mutex); | 164 | kfree(threads); |
| 180 | 165 | ||
| 181 | /* If they don't care which CPU fn runs on, bind to any online one. */ | 166 | return active.fnret; |
| 182 | if (cpu == NR_CPUS) | ||
| 183 | cpu = raw_smp_processor_id(); | ||
| 184 | 167 | ||
| 185 | p = kthread_create(do_stop, &smdata, "kstopmachine"); | 168 | kill_threads: |
| 186 | if (!IS_ERR(p)) { | 169 | for_each_online_cpu(i) |
| 187 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 170 | if (threads[i]) |
| 171 | kthread_stop(threads[i]); | ||
| 172 | mutex_unlock(&lock); | ||
| 188 | 173 | ||
| 189 | /* One high-prio thread per cpu. We'll do this one. */ | 174 | kfree(threads); |
| 190 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | 175 | return err; |
| 191 | kthread_bind(p, cpu); | ||
| 192 | wake_up_process(p); | ||
| 193 | wait_for_completion(&smdata.done); | ||
| 194 | } | ||
| 195 | mutex_unlock(&stopmachine_mutex); | ||
| 196 | return p; | ||
| 197 | } | 176 | } |
| 198 | 177 | ||
| 199 | int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) | 178 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
| 200 | { | 179 | { |
| 201 | struct task_struct *p; | ||
| 202 | int ret; | 180 | int ret; |
| 203 | 181 | ||
| 204 | /* No CPUs can come up or down during this. */ | 182 | /* No CPUs can come up or down during this. */ |
| 205 | get_online_cpus(); | 183 | get_online_cpus(); |
| 206 | p = __stop_machine_run(fn, data, cpu); | 184 | ret = __stop_machine(fn, data, cpus); |
| 207 | if (!IS_ERR(p)) | ||
| 208 | ret = kthread_stop(p); | ||
| 209 | else | ||
| 210 | ret = PTR_ERR(p); | ||
| 211 | put_online_cpus(); | 185 | put_online_cpus(); |
| 212 | 186 | ||
| 213 | return ret; | 187 | return ret; |
| 214 | } | 188 | } |
| 215 | EXPORT_SYMBOL_GPL(stop_machine_run); | 189 | EXPORT_SYMBOL_GPL(stop_machine); |
