diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 12:19:38 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 12:19:38 -0400 |
| commit | 227423904c709a8e60245c97081bbeb4fb500655 (patch) | |
| tree | 97db1b8df1e4518334aea2fdf60363e0a691eb1e /kernel | |
| parent | 1aaf2e59135fd67321f47c11c64a54aac27014e9 (diff) | |
| parent | fa526d0d641b5365676a1fb821ce359e217c9b85 (diff) | |
Merge branch 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, pat: Fix cacheflush address in change_page_attr_set_clr()
mm: remove !NUMA condition from PAGEFLAGS_EXTENDED condition set
x86: Fix earlyprintk=dbgp for machines without NX
x86, pat: Sanity check remap_pfn_range for RAM region
x86, pat: Lookup the protection from memtype list on vm_insert_pfn()
x86, pat: Add lookup_memtype to get the current memtype of a paddr
x86, pat: Use page flags to track memtypes of RAM pages
x86, pat: Generalize the use of page flag PG_uncached
x86, pat: Add rbtree to do quick lookup in memtype tracking
x86, pat: Add PAT reserve free to io_mapping* APIs
x86, pat: New i/f for driver to request memtype for IO regions
x86, pat: ioremap to follow same PAT restrictions as other PAT users
x86, pat: Keep identity maps consistent with mmaps even when pat_disabled
x86, mtrr: make mtrr_aps_delayed_init static bool
x86, pat/mtrr: Rendezvous all the cpus for MTRR/PAT init
generic-ipi: Allow cpus not yet online to call smp_call_function with irqs disabled
x86: Fix an incorrect argument of reserve_bootmem()
x86: Fix system crash when loading with "reservetop" parameter
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpu.c | 14 | ||||
| -rw-r--r-- | kernel/smp.c | 40 |
2 files changed, 48 insertions, 6 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 67a60076dd7..6ba0f1ecb21 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -414,6 +414,14 @@ int disable_nonboot_cpus(void) | |||
| 414 | return error; | 414 | return error; |
| 415 | } | 415 | } |
| 416 | 416 | ||
| 417 | void __weak arch_enable_nonboot_cpus_begin(void) | ||
| 418 | { | ||
| 419 | } | ||
| 420 | |||
| 421 | void __weak arch_enable_nonboot_cpus_end(void) | ||
| 422 | { | ||
| 423 | } | ||
| 424 | |||
| 417 | void __ref enable_nonboot_cpus(void) | 425 | void __ref enable_nonboot_cpus(void) |
| 418 | { | 426 | { |
| 419 | int cpu, error; | 427 | int cpu, error; |
| @@ -425,6 +433,9 @@ void __ref enable_nonboot_cpus(void) | |||
| 425 | goto out; | 433 | goto out; |
| 426 | 434 | ||
| 427 | printk("Enabling non-boot CPUs ...\n"); | 435 | printk("Enabling non-boot CPUs ...\n"); |
| 436 | |||
| 437 | arch_enable_nonboot_cpus_begin(); | ||
| 438 | |||
| 428 | for_each_cpu(cpu, frozen_cpus) { | 439 | for_each_cpu(cpu, frozen_cpus) { |
| 429 | error = _cpu_up(cpu, 1); | 440 | error = _cpu_up(cpu, 1); |
| 430 | if (!error) { | 441 | if (!error) { |
| @@ -433,6 +444,9 @@ void __ref enable_nonboot_cpus(void) | |||
| 433 | } | 444 | } |
| 434 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); | 445 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); |
| 435 | } | 446 | } |
| 447 | |||
| 448 | arch_enable_nonboot_cpus_end(); | ||
| 449 | |||
| 436 | cpumask_clear(frozen_cpus); | 450 | cpumask_clear(frozen_cpus); |
| 437 | out: | 451 | out: |
| 438 | cpu_maps_update_done(); | 452 | cpu_maps_update_done(); |
diff --git a/kernel/smp.c b/kernel/smp.c index 94188b8ecc3..8e218500ab1 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -177,6 +177,11 @@ void generic_smp_call_function_interrupt(void) | |||
| 177 | int cpu = get_cpu(); | 177 | int cpu = get_cpu(); |
| 178 | 178 | ||
| 179 | /* | 179 | /* |
| 180 | * Shouldn't receive this interrupt on a cpu that is not yet online. | ||
| 181 | */ | ||
| 182 | WARN_ON_ONCE(!cpu_online(cpu)); | ||
| 183 | |||
| 184 | /* | ||
| 180 | * Ensure entry is visible on call_function_queue after we have | 185 | * Ensure entry is visible on call_function_queue after we have |
| 181 | * entered the IPI. See comment in smp_call_function_many. | 186 | * entered the IPI. See comment in smp_call_function_many. |
| 182 | * If we don't have this, then we may miss an entry on the list | 187 | * If we don't have this, then we may miss an entry on the list |
| @@ -230,6 +235,11 @@ void generic_smp_call_function_single_interrupt(void) | |||
| 230 | unsigned int data_flags; | 235 | unsigned int data_flags; |
| 231 | LIST_HEAD(list); | 236 | LIST_HEAD(list); |
| 232 | 237 | ||
| 238 | /* | ||
| 239 | * Shouldn't receive this interrupt on a cpu that is not yet online. | ||
| 240 | */ | ||
| 241 | WARN_ON_ONCE(!cpu_online(smp_processor_id())); | ||
| 242 | |||
| 233 | spin_lock(&q->lock); | 243 | spin_lock(&q->lock); |
| 234 | list_replace_init(&q->list, &list); | 244 | list_replace_init(&q->list, &list); |
| 235 | spin_unlock(&q->lock); | 245 | spin_unlock(&q->lock); |
| @@ -285,8 +295,14 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 285 | */ | 295 | */ |
| 286 | this_cpu = get_cpu(); | 296 | this_cpu = get_cpu(); |
| 287 | 297 | ||
| 288 | /* Can deadlock when called with interrupts disabled */ | 298 | /* |
| 289 | WARN_ON_ONCE(irqs_disabled() && !oops_in_progress); | 299 | * Can deadlock when called with interrupts disabled. |
| 300 | * We allow cpu's that are not yet online though, as no one else can | ||
| 301 | * send smp call function interrupt to this cpu and as such deadlocks | ||
| 302 | * can't happen. | ||
| 303 | */ | ||
| 304 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | ||
| 305 | && !oops_in_progress); | ||
| 290 | 306 | ||
| 291 | if (cpu == this_cpu) { | 307 | if (cpu == this_cpu) { |
| 292 | local_irq_save(flags); | 308 | local_irq_save(flags); |
| @@ -329,8 +345,14 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
| 329 | { | 345 | { |
| 330 | csd_lock(data); | 346 | csd_lock(data); |
| 331 | 347 | ||
| 332 | /* Can deadlock when called with interrupts disabled */ | 348 | /* |
| 333 | WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress); | 349 | * Can deadlock when called with interrupts disabled. |
| 350 | * We allow cpu's that are not yet online though, as no one else can | ||
| 351 | * send smp call function interrupt to this cpu and as such deadlocks | ||
| 352 | * can't happen. | ||
| 353 | */ | ||
| 354 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | ||
| 355 | && !oops_in_progress); | ||
| 334 | 356 | ||
| 335 | generic_exec_single(cpu, data, wait); | 357 | generic_exec_single(cpu, data, wait); |
| 336 | } | 358 | } |
| @@ -365,8 +387,14 @@ void smp_call_function_many(const struct cpumask *mask, | |||
| 365 | unsigned long flags; | 387 | unsigned long flags; |
| 366 | int cpu, next_cpu, this_cpu = smp_processor_id(); | 388 | int cpu, next_cpu, this_cpu = smp_processor_id(); |
| 367 | 389 | ||
| 368 | /* Can deadlock when called with interrupts disabled */ | 390 | /* |
| 369 | WARN_ON_ONCE(irqs_disabled() && !oops_in_progress); | 391 | * Can deadlock when called with interrupts disabled. |
| 392 | * We allow cpu's that are not yet online though, as no one else can | ||
| 393 | * send smp call function interrupt to this cpu and as such deadlocks | ||
| 394 | * can't happen. | ||
| 395 | */ | ||
| 396 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | ||
| 397 | && !oops_in_progress); | ||
| 370 | 398 | ||
| 371 | /* So, what's a CPU they want? Ignoring this one. */ | 399 | /* So, what's a CPU they want? Ignoring this one. */ |
| 372 | cpu = cpumask_first_and(mask, cpu_online_mask); | 400 | cpu = cpumask_first_and(mask, cpu_online_mask); |
