diff options
Diffstat (limited to 'kernel/stop_machine.c')
| -rw-r--r-- | kernel/stop_machine.c | 120 |
1 files changed, 47 insertions, 73 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index af3c7cea258..9bc4c00872c 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -37,9 +37,13 @@ struct stop_machine_data { | |||
| 37 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ | 37 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ |
| 38 | static unsigned int num_threads; | 38 | static unsigned int num_threads; |
| 39 | static atomic_t thread_ack; | 39 | static atomic_t thread_ack; |
| 40 | static struct completion finished; | ||
| 41 | static DEFINE_MUTEX(lock); | 40 | static DEFINE_MUTEX(lock); |
| 42 | 41 | ||
| 42 | static struct workqueue_struct *stop_machine_wq; | ||
| 43 | static struct stop_machine_data active, idle; | ||
| 44 | static const cpumask_t *active_cpus; | ||
| 45 | static void *stop_machine_work; | ||
| 46 | |||
| 43 | static void set_state(enum stopmachine_state newstate) | 47 | static void set_state(enum stopmachine_state newstate) |
| 44 | { | 48 | { |
| 45 | /* Reset ack counter. */ | 49 | /* Reset ack counter. */ |
| @@ -51,21 +55,26 @@ static void set_state(enum stopmachine_state newstate) | |||
| 51 | /* Last one to ack a state moves to the next state. */ | 55 | /* Last one to ack a state moves to the next state. */ |
| 52 | static void ack_state(void) | 56 | static void ack_state(void) |
| 53 | { | 57 | { |
| 54 | if (atomic_dec_and_test(&thread_ack)) { | 58 | if (atomic_dec_and_test(&thread_ack)) |
| 55 | /* If we're the last one to ack the EXIT, we're finished. */ | 59 | set_state(state + 1); |
| 56 | if (state == STOPMACHINE_EXIT) | ||
| 57 | complete(&finished); | ||
| 58 | else | ||
| 59 | set_state(state + 1); | ||
| 60 | } | ||
| 61 | } | 60 | } |
| 62 | 61 | ||
| 63 | /* This is the actual thread which stops the CPU. It exits by itself rather | 62 | /* This is the actual function which stops the CPU. It runs |
| 64 | * than waiting for kthread_stop(), because it's easier for hotplug CPU. */ | 63 | * in the context of a dedicated stopmachine workqueue. */ |
| 65 | static int stop_cpu(struct stop_machine_data *smdata) | 64 | static void stop_cpu(struct work_struct *unused) |
| 66 | { | 65 | { |
| 67 | enum stopmachine_state curstate = STOPMACHINE_NONE; | 66 | enum stopmachine_state curstate = STOPMACHINE_NONE; |
| 68 | 67 | struct stop_machine_data *smdata = &idle; | |
| 68 | int cpu = smp_processor_id(); | ||
| 69 | int err; | ||
| 70 | |||
| 71 | if (!active_cpus) { | ||
| 72 | if (cpu == first_cpu(cpu_online_map)) | ||
| 73 | smdata = &active; | ||
| 74 | } else { | ||
| 75 | if (cpu_isset(cpu, *active_cpus)) | ||
| 76 | smdata = &active; | ||
| 77 | } | ||
| 69 | /* Simple state machine */ | 78 | /* Simple state machine */ |
| 70 | do { | 79 | do { |
| 71 | /* Chill out and ensure we re-read stopmachine_state. */ | 80 | /* Chill out and ensure we re-read stopmachine_state. */ |
| @@ -78,9 +87,11 @@ static int stop_cpu(struct stop_machine_data *smdata) | |||
| 78 | hard_irq_disable(); | 87 | hard_irq_disable(); |
| 79 | break; | 88 | break; |
| 80 | case STOPMACHINE_RUN: | 89 | case STOPMACHINE_RUN: |
| 81 | /* |= allows error detection if functions on | 90 | /* On multiple CPUs only a single error code |
| 82 | * multiple CPUs. */ | 91 | * is needed to tell that something failed. */ |
| 83 | smdata->fnret |= smdata->fn(smdata->data); | 92 | err = smdata->fn(smdata->data); |
| 93 | if (err) | ||
| 94 | smdata->fnret = err; | ||
| 84 | break; | 95 | break; |
| 85 | default: | 96 | default: |
| 86 | break; | 97 | break; |
| @@ -90,7 +101,6 @@ static int stop_cpu(struct stop_machine_data *smdata) | |||
| 90 | } while (curstate != STOPMACHINE_EXIT); | 101 | } while (curstate != STOPMACHINE_EXIT); |
| 91 | 102 | ||
| 92 | local_irq_enable(); | 103 | local_irq_enable(); |
| 93 | do_exit(0); | ||
| 94 | } | 104 | } |
| 95 | 105 | ||
| 96 | /* Callback for CPUs which aren't supposed to do anything. */ | 106 | /* Callback for CPUs which aren't supposed to do anything. */ |
| @@ -101,78 +111,34 @@ static int chill(void *unused) | |||
| 101 | 111 | ||
| 102 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | 112 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
| 103 | { | 113 | { |
| 104 | int i, err; | 114 | struct work_struct *sm_work; |
| 105 | struct stop_machine_data active, idle; | 115 | int i; |
| 106 | struct task_struct **threads; | ||
| 107 | 116 | ||
| 117 | /* Set up initial state. */ | ||
| 118 | mutex_lock(&lock); | ||
| 119 | num_threads = num_online_cpus(); | ||
| 120 | active_cpus = cpus; | ||
| 108 | active.fn = fn; | 121 | active.fn = fn; |
| 109 | active.data = data; | 122 | active.data = data; |
| 110 | active.fnret = 0; | 123 | active.fnret = 0; |
| 111 | idle.fn = chill; | 124 | idle.fn = chill; |
| 112 | idle.data = NULL; | 125 | idle.data = NULL; |
| 113 | 126 | ||
| 114 | /* This could be too big for stack on large machines. */ | ||
| 115 | threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL); | ||
| 116 | if (!threads) | ||
| 117 | return -ENOMEM; | ||
| 118 | |||
| 119 | /* Set up initial state. */ | ||
| 120 | mutex_lock(&lock); | ||
| 121 | init_completion(&finished); | ||
| 122 | num_threads = num_online_cpus(); | ||
| 123 | set_state(STOPMACHINE_PREPARE); | 127 | set_state(STOPMACHINE_PREPARE); |
| 124 | 128 | ||
| 125 | for_each_online_cpu(i) { | 129 | /* Schedule the stop_cpu work on all cpus: hold this CPU so one |
| 126 | struct stop_machine_data *smdata = &idle; | ||
| 127 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | ||
| 128 | |||
| 129 | if (!cpus) { | ||
| 130 | if (i == first_cpu(cpu_online_map)) | ||
| 131 | smdata = &active; | ||
| 132 | } else { | ||
| 133 | if (cpu_isset(i, *cpus)) | ||
| 134 | smdata = &active; | ||
| 135 | } | ||
| 136 | |||
| 137 | threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u", | ||
| 138 | i); | ||
| 139 | if (IS_ERR(threads[i])) { | ||
| 140 | err = PTR_ERR(threads[i]); | ||
| 141 | threads[i] = NULL; | ||
| 142 | goto kill_threads; | ||
| 143 | } | ||
| 144 | |||
| 145 | /* Place it onto correct cpu. */ | ||
| 146 | kthread_bind(threads[i], i); | ||
| 147 | |||
| 148 | /* Make it highest prio. */ | ||
| 149 | if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, ¶m)) | ||
| 150 | BUG(); | ||
| 151 | } | ||
| 152 | |||
| 153 | /* We've created all the threads. Wake them all: hold this CPU so one | ||
| 154 | * doesn't hit this CPU until we're ready. */ | 130 | * doesn't hit this CPU until we're ready. */ |
| 155 | get_cpu(); | 131 | get_cpu(); |
| 156 | for_each_online_cpu(i) | 132 | for_each_online_cpu(i) { |
| 157 | wake_up_process(threads[i]); | 133 | sm_work = percpu_ptr(stop_machine_work, i); |
| 158 | 134 | INIT_WORK(sm_work, stop_cpu); | |
| 135 | queue_work_on(i, stop_machine_wq, sm_work); | ||
| 136 | } | ||
| 159 | /* This will release the thread on our CPU. */ | 137 | /* This will release the thread on our CPU. */ |
| 160 | put_cpu(); | 138 | put_cpu(); |
| 161 | wait_for_completion(&finished); | 139 | flush_workqueue(stop_machine_wq); |
| 162 | mutex_unlock(&lock); | 140 | mutex_unlock(&lock); |
| 163 | |||
| 164 | kfree(threads); | ||
| 165 | |||
| 166 | return active.fnret; | 141 | return active.fnret; |
| 167 | |||
| 168 | kill_threads: | ||
| 169 | for_each_online_cpu(i) | ||
| 170 | if (threads[i]) | ||
| 171 | kthread_stop(threads[i]); | ||
| 172 | mutex_unlock(&lock); | ||
| 173 | |||
| 174 | kfree(threads); | ||
| 175 | return err; | ||
| 176 | } | 142 | } |
| 177 | 143 | ||
| 178 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | 144 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
| @@ -187,3 +153,11 @@ int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | |||
| 187 | return ret; | 153 | return ret; |
| 188 | } | 154 | } |
| 189 | EXPORT_SYMBOL_GPL(stop_machine); | 155 | EXPORT_SYMBOL_GPL(stop_machine); |
| 156 | |||
| 157 | static int __init stop_machine_init(void) | ||
| 158 | { | ||
| 159 | stop_machine_wq = create_rt_workqueue("kstop"); | ||
| 160 | stop_machine_work = alloc_percpu(struct work_struct); | ||
| 161 | return 0; | ||
| 162 | } | ||
| 163 | core_initcall(stop_machine_init); | ||
