summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-05-24 19:48:28 -0400
committerDavid S. Miller <davem@davemloft.net>2013-05-24 19:48:28 -0400
commite6ff4c75f9095f61b3a66c2a78e47b62864022dd (patch)
tree425ea9463cbec0b1975b8a33d9a56817143055d0 /arch/x86
parentee9c799c231324de681eb21e06d8bf4842768b75 (diff)
parent0e255f1c0c9add2f0c920240ac4cadc28ae274c3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Merge net into net-next because some upcoming net-next changes build on top of bug fixes that went into net. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/microcode_intel_early.c5
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/mm/init.c19
-rw-r--r--arch/x86/pci/mrst.c10
6 files changed, 28 insertions, 14 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a154a91c7e7..685692c94f05 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -108,7 +108,6 @@ config X86
108 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) 108 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
109 select GENERIC_TIME_VSYSCALL if X86_64 109 select GENERIC_TIME_VSYSCALL if X86_64
110 select KTIME_SCALAR if X86_32 110 select KTIME_SCALAR if X86_32
111 select ALWAYS_USE_PERSISTENT_CLOCK
112 select GENERIC_STRNCPY_FROM_USER 111 select GENERIC_STRNCPY_FROM_USER
113 select GENERIC_STRNLEN_USER 112 select GENERIC_STRNLEN_USER
114 select HAVE_CONTEXT_TRACKING if X86_64 113 select HAVE_CONTEXT_TRACKING if X86_64
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index dab95a85f7f8..55b67614ed94 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -34,7 +34,7 @@
34extern pgd_t early_level4_pgt[PTRS_PER_PGD]; 34extern pgd_t early_level4_pgt[PTRS_PER_PGD];
35extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; 35extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
36static unsigned int __initdata next_early_pgt = 2; 36static unsigned int __initdata next_early_pgt = 2;
37pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); 37pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
38 38
39/* Wipe all early page tables except for the kernel symbol map */ 39/* Wipe all early page tables except for the kernel symbol map */
40static void __init reset_early_page_tables(void) 40static void __init reset_early_page_tables(void)
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index d893e8ed8ac9..2e9e12871c2b 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -487,6 +487,7 @@ static inline void show_saved_mc(void)
487#endif 487#endif
488 488
489#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) 489#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
490static DEFINE_MUTEX(x86_cpu_microcode_mutex);
490/* 491/*
491 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is 492 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
492 * hot added or resumes. 493 * hot added or resumes.
@@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc)
507 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in 508 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
508 * hotplug. 509 * hotplug.
509 */ 510 */
510 cpu_hotplug_driver_lock(); 511 mutex_lock(&x86_cpu_microcode_mutex);
511 512
512 mc_saved_count_init = mc_saved_data.mc_saved_count; 513 mc_saved_count_init = mc_saved_data.mc_saved_count;
513 mc_saved_count = mc_saved_data.mc_saved_count; 514 mc_saved_count = mc_saved_data.mc_saved_count;
@@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc)
544 } 545 }
545 546
546out: 547out:
547 cpu_hotplug_driver_unlock(); 548 mutex_unlock(&x86_cpu_microcode_mutex);
548 549
549 return ret; 550 return ret;
550} 551}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 607af0d4d5ef..4e7a37ff03ab 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -312,6 +312,8 @@ void arch_cpu_idle(void)
312{ 312{
313 if (cpuidle_idle_call()) 313 if (cpuidle_idle_call())
314 x86_idle(); 314 x86_idle();
315 else
316 local_irq_enable();
315} 317}
316 318
317/* 319/*
@@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu)
368 */ 370 */
369static void amd_e400_idle(void) 371static void amd_e400_idle(void)
370{ 372{
371 if (need_resched())
372 return;
373
374 if (!amd_e400_c1e_detected) { 373 if (!amd_e400_c1e_detected) {
375 u32 lo, hi; 374 u32 lo, hi;
376 375
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fdc5dca14fb3..eaac1743def7 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
359} 359}
360 360
361/* 361/*
362 * would have hole in the middle or ends, and only ram parts will be mapped. 362 * We need to iterate through the E820 memory map and create direct mappings
363 * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
364 * create direct mappings for all pfns from [0 to max_low_pfn) and
365 * [4GB to max_pfn) because of possible memory holes in high addresses
366 * that cannot be marked as UC by fixed/variable range MTRRs.
367 * Depending on the alignment of E820 ranges, this may possibly result
368 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
369 *
370 * init_mem_mapping() calls init_range_memory_mapping() with big range.
371 * That range would have hole in the middle or ends, and only ram parts
372 * will be mapped in init_range_memory_mapping().
363 */ 373 */
364static unsigned long __init init_range_memory_mapping( 374static unsigned long __init init_range_memory_mapping(
365 unsigned long r_start, 375 unsigned long r_start,
@@ -419,6 +429,13 @@ void __init init_mem_mapping(void)
419 max_pfn_mapped = 0; /* will get exact value next */ 429 max_pfn_mapped = 0; /* will get exact value next */
420 min_pfn_mapped = real_end >> PAGE_SHIFT; 430 min_pfn_mapped = real_end >> PAGE_SHIFT;
421 last_start = start = real_end; 431 last_start = start = real_end;
432
433 /*
434 * We start from the top (end of memory) and go to the bottom.
435 * The memblock_find_in_range() gets us a block of RAM from the
436 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
437 * for page table.
438 */
422 while (last_start > ISA_END_ADDRESS) { 439 while (last_start > ISA_END_ADDRESS) {
423 if (last_start > step_size) { 440 if (last_start > step_size) {
424 start = round_down(last_start - 1, step_size); 441 start = round_down(last_start - 1, step_size);
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 0e0fabf17342..6eb18c42a28a 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -141,11 +141,6 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
141 */ 141 */
142static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) 142static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
143{ 143{
144 if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
145 || devfn == PCI_DEVFN(0, 0)
146 || devfn == PCI_DEVFN(3, 0)))
147 return 1;
148
149 /* This is a workaround for A0 LNC bug where PCI status register does 144 /* This is a workaround for A0 LNC bug where PCI status register does
150 * not have new CAP bit set. can not be written by SW either. 145 * not have new CAP bit set. can not be written by SW either.
151 * 146 *
@@ -155,7 +150,10 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
155 */ 150 */
156 if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) 151 if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
157 return 0; 152 return 0;
158 153 if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
154 || devfn == PCI_DEVFN(0, 0)
155 || devfn == PCI_DEVFN(3, 0)))
156 return 1;
159 return 0; /* langwell on others */ 157 return 0; /* langwell on others */
160} 158}
161 159