diff options
| -rw-r--r-- | arch/x86/kernel/microcode_intel_early.c | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/process.c | 5 | ||||
| -rw-r--r-- | arch/x86/mm/init.c | 19 |
3 files changed, 23 insertions, 6 deletions
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index d893e8ed8ac9..2e9e12871c2b 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c | |||
| @@ -487,6 +487,7 @@ static inline void show_saved_mc(void) | |||
| 487 | #endif | 487 | #endif |
| 488 | 488 | ||
| 489 | #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) | 489 | #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) |
| 490 | static DEFINE_MUTEX(x86_cpu_microcode_mutex); | ||
| 490 | /* | 491 | /* |
| 491 | * Save this mc into mc_saved_data. So it will be loaded early when a CPU is | 492 | * Save this mc into mc_saved_data. So it will be loaded early when a CPU is |
| 492 | * hot added or resumes. | 493 | * hot added or resumes. |
| @@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc) | |||
| 507 | * Hold hotplug lock so mc_saved_data is not accessed by a CPU in | 508 | * Hold hotplug lock so mc_saved_data is not accessed by a CPU in |
| 508 | * hotplug. | 509 | * hotplug. |
| 509 | */ | 510 | */ |
| 510 | cpu_hotplug_driver_lock(); | 511 | mutex_lock(&x86_cpu_microcode_mutex); |
| 511 | 512 | ||
| 512 | mc_saved_count_init = mc_saved_data.mc_saved_count; | 513 | mc_saved_count_init = mc_saved_data.mc_saved_count; |
| 513 | mc_saved_count = mc_saved_data.mc_saved_count; | 514 | mc_saved_count = mc_saved_data.mc_saved_count; |
| @@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc) | |||
| 544 | } | 545 | } |
| 545 | 546 | ||
| 546 | out: | 547 | out: |
| 547 | cpu_hotplug_driver_unlock(); | 548 | mutex_unlock(&x86_cpu_microcode_mutex); |
| 548 | 549 | ||
| 549 | return ret; | 550 | return ret; |
| 550 | } | 551 | } |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 607af0d4d5ef..4e7a37ff03ab 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -312,6 +312,8 @@ void arch_cpu_idle(void) | |||
| 312 | { | 312 | { |
| 313 | if (cpuidle_idle_call()) | 313 | if (cpuidle_idle_call()) |
| 314 | x86_idle(); | 314 | x86_idle(); |
| 315 | else | ||
| 316 | local_irq_enable(); | ||
| 315 | } | 317 | } |
| 316 | 318 | ||
| 317 | /* | 319 | /* |
| @@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu) | |||
| 368 | */ | 370 | */ |
| 369 | static void amd_e400_idle(void) | 371 | static void amd_e400_idle(void) |
| 370 | { | 372 | { |
| 371 | if (need_resched()) | ||
| 372 | return; | ||
| 373 | |||
| 374 | if (!amd_e400_c1e_detected) { | 373 | if (!amd_e400_c1e_detected) { |
| 375 | u32 lo, hi; | 374 | u32 lo, hi; |
| 376 | 375 | ||
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fdc5dca14fb3..eaac1743def7 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
| @@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
| 359 | } | 359 | } |
| 360 | 360 | ||
| 361 | /* | 361 | /* |
| 362 | * would have hole in the middle or ends, and only ram parts will be mapped. | 362 | * We need to iterate through the E820 memory map and create direct mappings |
| 363 | * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply | ||
| 364 | * create direct mappings for all pfns from [0 to max_low_pfn) and | ||
| 365 | * [4GB to max_pfn) because of possible memory holes in high addresses | ||
| 366 | * that cannot be marked as UC by fixed/variable range MTRRs. | ||
| 367 | * Depending on the alignment of E820 ranges, this may possibly result | ||
| 368 | * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. | ||
| 369 | * | ||
| 370 | * init_mem_mapping() calls init_range_memory_mapping() with big range. | ||
| 371 | * That range would have hole in the middle or ends, and only ram parts | ||
| 372 | * will be mapped in init_range_memory_mapping(). | ||
| 363 | */ | 373 | */ |
| 364 | static unsigned long __init init_range_memory_mapping( | 374 | static unsigned long __init init_range_memory_mapping( |
| 365 | unsigned long r_start, | 375 | unsigned long r_start, |
| @@ -419,6 +429,13 @@ void __init init_mem_mapping(void) | |||
| 419 | max_pfn_mapped = 0; /* will get exact value next */ | 429 | max_pfn_mapped = 0; /* will get exact value next */ |
| 420 | min_pfn_mapped = real_end >> PAGE_SHIFT; | 430 | min_pfn_mapped = real_end >> PAGE_SHIFT; |
| 421 | last_start = start = real_end; | 431 | last_start = start = real_end; |
| 432 | |||
| 433 | /* | ||
| 434 | * We start from the top (end of memory) and go to the bottom. | ||
| 435 | * The memblock_find_in_range() gets us a block of RAM from the | ||
| 436 | * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages | ||
| 437 | * for page table. | ||
| 438 | */ | ||
| 422 | while (last_start > ISA_END_ADDRESS) { | 439 | while (last_start > ISA_END_ADDRESS) { |
| 423 | if (last_start > step_size) { | 440 | if (last_start > step_size) { |
| 424 | start = round_down(last_start - 1, step_size); | 441 | start = round_down(last_start - 1, step_size); |
