diff options
| author | Frederic Weisbecker <fweisbec@gmail.com> | 2014-02-24 10:39:58 -0500 |
|---|---|---|
| committer | Jens Axboe <axboe@fb.com> | 2014-02-24 17:46:58 -0500 |
| commit | 8b28499a71d3431c9128abc743e2d2bfbdae3ed4 (patch) | |
| tree | 3abe5d789fef0e0862d9568494268dc279c81bb1 /kernel | |
| parent | 08eed44c7249d381a099bc55577e55c6bb533160 (diff) | |
smp: Consolidate the various smp_call_function_single() declensions
__smp_call_function_single() and smp_call_function_single() share some
code that can be factorized: execute inline when the target is local,
check if the target is online, lock the csd, call generic_exec_single().
Lets move the common parts to generic_exec_single().
Reviewed-by: Jan Kara <jack@suse.cz>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Jens Axboe <axboe@fb.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/smp.c | 80 |
1 files changed, 39 insertions, 41 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 5ff14e3739ca..64bb0d48e96f 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd) | |||
| 117 | csd->flags &= ~CSD_FLAG_LOCK; | 117 | csd->flags &= ~CSD_FLAG_LOCK; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | ||
| 121 | |||
| 120 | /* | 122 | /* |
| 121 | * Insert a previously allocated call_single_data element | 123 | * Insert a previously allocated call_single_data element |
| 122 | * for execution on the given CPU. data must already have | 124 | * for execution on the given CPU. data must already have |
| 123 | * ->func, ->info, and ->flags set. | 125 | * ->func, ->info, and ->flags set. |
| 124 | */ | 126 | */ |
| 125 | static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | 127 | static int generic_exec_single(int cpu, struct call_single_data *csd, |
| 128 | smp_call_func_t func, void *info, int wait) | ||
| 126 | { | 129 | { |
| 130 | struct call_single_data csd_stack = { .flags = 0 }; | ||
| 131 | unsigned long flags; | ||
| 132 | |||
| 133 | |||
| 134 | if (cpu == smp_processor_id()) { | ||
| 135 | local_irq_save(flags); | ||
| 136 | func(info); | ||
| 137 | local_irq_restore(flags); | ||
| 138 | return 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | |||
| 142 | if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) | ||
| 143 | return -ENXIO; | ||
| 144 | |||
| 145 | |||
| 146 | if (!csd) { | ||
| 147 | csd = &csd_stack; | ||
| 148 | if (!wait) | ||
| 149 | csd = &__get_cpu_var(csd_data); | ||
| 150 | } | ||
| 151 | |||
| 152 | csd_lock(csd); | ||
| 153 | |||
| 154 | csd->func = func; | ||
| 155 | csd->info = info; | ||
| 156 | |||
| 127 | if (wait) | 157 | if (wait) |
| 128 | csd->flags |= CSD_FLAG_WAIT; | 158 | csd->flags |= CSD_FLAG_WAIT; |
| 129 | 159 | ||
| @@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | |||
| 143 | 173 | ||
| 144 | if (wait) | 174 | if (wait) |
| 145 | csd_lock_wait(csd); | 175 | csd_lock_wait(csd); |
| 176 | |||
| 177 | return 0; | ||
| 146 | } | 178 | } |
| 147 | 179 | ||
| 148 | /* | 180 | /* |
| @@ -168,8 +200,6 @@ void generic_smp_call_function_single_interrupt(void) | |||
| 168 | } | 200 | } |
| 169 | } | 201 | } |
| 170 | 202 | ||
| 171 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | ||
| 172 | |||
| 173 | /* | 203 | /* |
| 174 | * smp_call_function_single - Run a function on a specific CPU | 204 | * smp_call_function_single - Run a function on a specific CPU |
| 175 | * @func: The function to run. This must be fast and non-blocking. | 205 | * @func: The function to run. This must be fast and non-blocking. |
| @@ -181,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | |||
| 181 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, | 211 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
| 182 | int wait) | 212 | int wait) |
| 183 | { | 213 | { |
| 184 | struct call_single_data d = { | ||
| 185 | .flags = 0, | ||
| 186 | }; | ||
| 187 | unsigned long flags; | ||
| 188 | int this_cpu; | 214 | int this_cpu; |
| 189 | int err = 0; | 215 | int err; |
| 190 | 216 | ||
| 191 | /* | 217 | /* |
| 192 | * prevent preemption and reschedule on another processor, | 218 | * prevent preemption and reschedule on another processor, |
| @@ -203,26 +229,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, | |||
| 203 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | 229 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
| 204 | && !oops_in_progress); | 230 | && !oops_in_progress); |
| 205 | 231 | ||
| 206 | if (cpu == this_cpu) { | 232 | err = generic_exec_single(cpu, NULL, func, info, wait); |
| 207 | local_irq_save(flags); | ||
| 208 | func(info); | ||
| 209 | local_irq_restore(flags); | ||
| 210 | } else { | ||
| 211 | if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { | ||
| 212 | struct call_single_data *csd = &d; | ||
| 213 | |||
| 214 | if (!wait) | ||
| 215 | csd = &__get_cpu_var(csd_data); | ||
| 216 | |||
| 217 | csd_lock(csd); | ||
| 218 | |||
| 219 | csd->func = func; | ||
| 220 | csd->info = info; | ||
| 221 | generic_exec_single(cpu, csd, wait); | ||
| 222 | } else { | ||
| 223 | err = -ENXIO; /* CPU not online */ | ||
| 224 | } | ||
| 225 | } | ||
| 226 | 233 | ||
| 227 | put_cpu(); | 234 | put_cpu(); |
| 228 | 235 | ||
| @@ -285,9 +292,8 @@ EXPORT_SYMBOL_GPL(smp_call_function_any); | |||
| 285 | */ | 292 | */ |
| 286 | int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait) | 293 | int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait) |
| 287 | { | 294 | { |
| 288 | unsigned int this_cpu; | ||
| 289 | unsigned long flags; | ||
| 290 | int err = 0; | 295 | int err = 0; |
| 296 | int this_cpu; | ||
| 291 | 297 | ||
| 292 | this_cpu = get_cpu(); | 298 | this_cpu = get_cpu(); |
| 293 | /* | 299 | /* |
| @@ -296,20 +302,12 @@ int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait) | |||
| 296 | * send smp call function interrupt to this cpu and as such deadlocks | 302 | * send smp call function interrupt to this cpu and as such deadlocks |
| 297 | * can't happen. | 303 | * can't happen. |
| 298 | */ | 304 | */ |
| 299 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | 305 | WARN_ON_ONCE(cpu_online(this_cpu) && wait && irqs_disabled() |
| 300 | && !oops_in_progress); | 306 | && !oops_in_progress); |
| 301 | 307 | ||
| 302 | if (cpu == this_cpu) { | 308 | err = generic_exec_single(cpu, csd, csd->func, csd->info, wait); |
| 303 | local_irq_save(flags); | ||
| 304 | csd->func(csd->info); | ||
| 305 | local_irq_restore(flags); | ||
| 306 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { | ||
| 307 | csd_lock(csd); | ||
| 308 | generic_exec_single(cpu, csd, wait); | ||
| 309 | } else { | ||
| 310 | err = -ENXIO; /* CPU not online */ | ||
| 311 | } | ||
| 312 | put_cpu(); | 309 | put_cpu(); |
| 310 | |||
| 313 | return err; | 311 | return err; |
| 314 | } | 312 | } |
| 315 | EXPORT_SYMBOL_GPL(__smp_call_function_single); | 313 | EXPORT_SYMBOL_GPL(__smp_call_function_single); |
