aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig.cpu1
-rw-r--r--arch/x86/include/asm/vmi.h8
-rw-r--r--arch/x86/kernel/amd_iommu.c18
-rw-r--r--arch/x86/kernel/amd_iommu_init.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c3
-rw-r--r--arch/x86/kernel/microcode_core.c19
-rw-r--r--arch/x86/kernel/microcode_intel.c6
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c3
-rw-r--r--arch/x86/kernel/pci-gart_64.c4
-rw-r--r--arch/x86/kernel/setup.c12
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/vmi_32.c16
12 files changed, 63 insertions, 36 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index b815664fe370..8e99073b9e0f 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -520,6 +520,7 @@ config X86_PTRACE_BTS
520 bool "Branch Trace Store" 520 bool "Branch Trace Store"
521 default y 521 default y
522 depends on X86_DEBUGCTLMSR 522 depends on X86_DEBUGCTLMSR
523 depends on BROKEN
523 help 524 help
524 This adds a ptrace interface to the hardware's branch trace store. 525 This adds a ptrace interface to the hardware's branch trace store.
525 526
diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
index b7c0dea119fe..61e08c0a2907 100644
--- a/arch/x86/include/asm/vmi.h
+++ b/arch/x86/include/asm/vmi.h
@@ -223,9 +223,15 @@ struct pci_header {
223} __attribute__((packed)); 223} __attribute__((packed));
224 224
225/* Function prototypes for bootstrapping */ 225/* Function prototypes for bootstrapping */
226#ifdef CONFIG_VMI
226extern void vmi_init(void); 227extern void vmi_init(void);
228extern void vmi_activate(void);
227extern void vmi_bringup(void); 229extern void vmi_bringup(void);
228extern void vmi_apply_boot_page_allocations(void); 230#else
231static inline void vmi_init(void) {}
232static inline void vmi_activate(void) {}
233static inline void vmi_bringup(void) {}
234#endif
229 235
230/* State needed to start an application processor in an SMP system. */ 236/* State needed to start an application processor in an SMP system. */
231struct vmi_ap_state { 237struct vmi_ap_state {
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 5662e226b0c9..0a60d60ed036 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -235,8 +235,9 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
235 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; 235 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
236 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); 236 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
237 237
238 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) 238 if (unlikely(i == EXIT_LOOP_COUNT))
239 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); 239 panic("AMD IOMMU: Completion wait loop failed\n");
240
240out: 241out:
241 spin_unlock_irqrestore(&iommu->lock, flags); 242 spin_unlock_irqrestore(&iommu->lock, flags);
242 243
@@ -344,7 +345,7 @@ static int iommu_map(struct protection_domain *dom,
344 u64 __pte, *pte, *page; 345 u64 __pte, *pte, *page;
345 346
346 bus_addr = PAGE_ALIGN(bus_addr); 347 bus_addr = PAGE_ALIGN(bus_addr);
347 phys_addr = PAGE_ALIGN(bus_addr); 348 phys_addr = PAGE_ALIGN(phys_addr);
348 349
349 /* only support 512GB address spaces for now */ 350 /* only support 512GB address spaces for now */
350 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) 351 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
@@ -600,7 +601,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
600 continue; 601 continue;
601 602
602 p2 = IOMMU_PTE_PAGE(p1[i]); 603 p2 = IOMMU_PTE_PAGE(p1[i]);
603 for (j = 0; j < 512; ++i) { 604 for (j = 0; j < 512; ++j) {
604 if (!IOMMU_PTE_PRESENT(p2[j])) 605 if (!IOMMU_PTE_PRESENT(p2[j]))
605 continue; 606 continue;
606 p3 = IOMMU_PTE_PAGE(p2[j]); 607 p3 = IOMMU_PTE_PAGE(p2[j]);
@@ -910,7 +911,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
910 if (address >= dom->aperture_size) 911 if (address >= dom->aperture_size)
911 return; 912 return;
912 913
913 WARN_ON(address & 0xfffULL || address > dom->aperture_size); 914 WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
914 915
915 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; 916 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
916 pte += IOMMU_PTE_L0_INDEX(address); 917 pte += IOMMU_PTE_L0_INDEX(address);
@@ -922,8 +923,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
922 923
923/* 924/*
924 * This function contains common code for mapping of a physically 925 * This function contains common code for mapping of a physically
925 * contiguous memory region into DMA address space. It is uses by all 926 * contiguous memory region into DMA address space. It is used by all
926 * mapping functions provided by this IOMMU driver. 927 * mapping functions provided with this IOMMU driver.
927 * Must be called with the domain lock held. 928 * Must be called with the domain lock held.
928 */ 929 */
929static dma_addr_t __map_single(struct device *dev, 930static dma_addr_t __map_single(struct device *dev,
@@ -983,7 +984,8 @@ static void __unmap_single(struct amd_iommu *iommu,
983 dma_addr_t i, start; 984 dma_addr_t i, start;
984 unsigned int pages; 985 unsigned int pages;
985 986
986 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 987 if ((dma_addr == bad_dma_address) ||
988 (dma_addr + size > dma_dom->aperture_size))
987 return; 989 return;
988 990
989 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 991 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 30ae2701b3df..c6cc22815d35 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -427,6 +427,10 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
427 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 427 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
428 &entry, sizeof(entry)); 428 &entry, sizeof(entry));
429 429
430 /* set head and tail to zero manually */
431 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
432 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
433
430 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 434 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
431 435
432 return cmd_buf; 436 return cmd_buf;
@@ -1074,7 +1078,8 @@ int __init amd_iommu_init(void)
1074 goto free; 1078 goto free;
1075 1079
1076 /* IOMMU rlookup table - find the IOMMU for a specific device */ 1080 /* IOMMU rlookup table - find the IOMMU for a specific device */
1077 amd_iommu_rlookup_table = (void *)__get_free_pages(GFP_KERNEL, 1081 amd_iommu_rlookup_table = (void *)__get_free_pages(
1082 GFP_KERNEL | __GFP_ZERO,
1078 get_order(rlookup_table_size)); 1083 get_order(rlookup_table_size));
1079 if (amd_iommu_rlookup_table == NULL) 1084 if (amd_iommu_rlookup_table == NULL)
1080 goto free; 1085 goto free;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 4b031a4ac856..1c838032fd37 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -510,12 +510,9 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
510 */ 510 */
511void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 511void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
512{ 512{
513 static cpumask_t mce_cpus = CPU_MASK_NONE;
514
515 mce_cpu_quirks(c); 513 mce_cpu_quirks(c);
516 514
517 if (mce_dont_init || 515 if (mce_dont_init ||
518 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
519 !mce_available(c)) 516 !mce_available(c))
520 return; 517 return;
521 518
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 82fb2809ce32..c4b5b24e0217 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -272,13 +272,18 @@ static struct attribute_group mc_attr_group = {
272 .name = "microcode", 272 .name = "microcode",
273}; 273};
274 274
275static void microcode_fini_cpu(int cpu) 275static void __microcode_fini_cpu(int cpu)
276{ 276{
277 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 277 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
278 278
279 mutex_lock(&microcode_mutex);
280 microcode_ops->microcode_fini_cpu(cpu); 279 microcode_ops->microcode_fini_cpu(cpu);
281 uci->valid = 0; 280 uci->valid = 0;
281}
282
283static void microcode_fini_cpu(int cpu)
284{
285 mutex_lock(&microcode_mutex);
286 __microcode_fini_cpu(cpu);
282 mutex_unlock(&microcode_mutex); 287 mutex_unlock(&microcode_mutex);
283} 288}
284 289
@@ -306,12 +311,16 @@ static int microcode_resume_cpu(int cpu)
306 * to this cpu (a bit of paranoia): 311 * to this cpu (a bit of paranoia):
307 */ 312 */
308 if (microcode_ops->collect_cpu_info(cpu, &nsig)) { 313 if (microcode_ops->collect_cpu_info(cpu, &nsig)) {
309 microcode_fini_cpu(cpu); 314 __microcode_fini_cpu(cpu);
315 printk(KERN_ERR "failed to collect_cpu_info for resuming cpu #%d\n",
316 cpu);
310 return -1; 317 return -1;
311 } 318 }
312 319
313 if (memcmp(&nsig, &uci->cpu_sig, sizeof(nsig))) { 320 if ((nsig.sig != uci->cpu_sig.sig) || (nsig.pf != uci->cpu_sig.pf)) {
314 microcode_fini_cpu(cpu); 321 __microcode_fini_cpu(cpu);
322 printk(KERN_ERR "cached ucode doesn't match the resuming cpu #%d\n",
323 cpu);
315 /* Should we look for a new ucode here? */ 324 /* Should we look for a new ucode here? */
316 return 1; 325 return 1;
317 } 326 }
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 622dc4a21784..a8e62792d171 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -155,6 +155,7 @@ static DEFINE_SPINLOCK(microcode_update_lock);
155static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) 155static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
156{ 156{
157 struct cpuinfo_x86 *c = &cpu_data(cpu_num); 157 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
158 unsigned long flags;
158 unsigned int val[2]; 159 unsigned int val[2];
159 160
160 memset(csig, 0, sizeof(*csig)); 161 memset(csig, 0, sizeof(*csig));
@@ -174,11 +175,16 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
174 csig->pf = 1 << ((val[1] >> 18) & 7); 175 csig->pf = 1 << ((val[1] >> 18) & 7);
175 } 176 }
176 177
178 /* serialize access to the physical write to MSR 0x79 */
179 spin_lock_irqsave(&microcode_update_lock, flags);
180
177 wrmsr(MSR_IA32_UCODE_REV, 0, 0); 181 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
178 /* see notes above for revision 1.07. Apparent chip bug */ 182 /* see notes above for revision 1.07. Apparent chip bug */
179 sync_core(); 183 sync_core();
180 /* get the current revision from MSR 0x8B */ 184 /* get the current revision from MSR 0x8B */
181 rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); 185 rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev);
186 spin_unlock_irqrestore(&microcode_update_lock, flags);
187
182 pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", 188 pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
183 csig->sig, csig->pf, csig->rev); 189 csig->sig, csig->pf, csig->rev);
184 190
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 0e9f1982b1dd..95777b0faa73 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -7,7 +7,8 @@
7 7
8#include <asm/paravirt.h> 8#include <asm/paravirt.h>
9 9
10static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) 10static inline void
11default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
11{ 12{
12 __raw_spin_lock(lock); 13 __raw_spin_lock(lock);
13} 14}
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index ba7ad83e20a8..a35eaa379ff6 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -745,10 +745,8 @@ void __init gart_iommu_init(void)
745 unsigned long scratch; 745 unsigned long scratch;
746 long i; 746 long i;
747 747
748 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { 748 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
749 printk(KERN_INFO "PCI-GART: No AMD GART found.\n");
750 return; 749 return;
751 }
752 750
753#ifndef CONFIG_AGP_AMD64 751#ifndef CONFIG_AGP_AMD64
754 no_agp = 1; 752 no_agp = 1;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index a3834f123206..5e028e1926eb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -794,6 +794,9 @@ void __init setup_arch(char **cmdline_p)
794 printk(KERN_INFO "Command line: %s\n", boot_command_line); 794 printk(KERN_INFO "Command line: %s\n", boot_command_line);
795#endif 795#endif
796 796
797 /* VMI may relocate the fixmap; do this before touching ioremap area */
798 vmi_init();
799
797 early_cpu_init(); 800 early_cpu_init();
798 early_ioremap_init(); 801 early_ioremap_init();
799 802
@@ -880,13 +883,8 @@ void __init setup_arch(char **cmdline_p)
880 check_efer(); 883 check_efer();
881#endif 884#endif
882 885
883#if defined(CONFIG_VMI) && defined(CONFIG_X86_32) 886 /* Must be before kernel pagetables are setup */
884 /* 887 vmi_activate();
885 * Must be before kernel pagetables are setup
886 * or fixmap area is touched.
887 */
888 vmi_init();
889#endif
890 888
891 /* after early param, so could get panic from serial */ 889 /* after early param, so could get panic from serial */
892 reserve_early_setup_data(); 890 reserve_early_setup_data();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7b1093397319..f71f96fc9e62 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -294,9 +294,7 @@ static void __cpuinit start_secondary(void *unused)
294 * fragile that we want to limit the things done here to the 294 * fragile that we want to limit the things done here to the
295 * most necessary things. 295 * most necessary things.
296 */ 296 */
297#ifdef CONFIG_VMI
298 vmi_bringup(); 297 vmi_bringup();
299#endif
300 cpu_init(); 298 cpu_init();
301 preempt_disable(); 299 preempt_disable();
302 smp_callin(); 300 smp_callin();
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 8b6c393ab9fd..22fd6577156a 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -960,8 +960,6 @@ static inline int __init activate_vmi(void)
960 960
961void __init vmi_init(void) 961void __init vmi_init(void)
962{ 962{
963 unsigned long flags;
964
965 if (!vmi_rom) 963 if (!vmi_rom)
966 probe_vmi_rom(); 964 probe_vmi_rom();
967 else 965 else
@@ -973,13 +971,21 @@ void __init vmi_init(void)
973 971
974 reserve_top_address(-vmi_rom->virtual_top); 972 reserve_top_address(-vmi_rom->virtual_top);
975 973
976 local_irq_save(flags);
977 activate_vmi();
978
979#ifdef CONFIG_X86_IO_APIC 974#ifdef CONFIG_X86_IO_APIC
980 /* This is virtual hardware; timer routing is wired correctly */ 975 /* This is virtual hardware; timer routing is wired correctly */
981 no_timer_check = 1; 976 no_timer_check = 1;
982#endif 977#endif
978}
979
980void vmi_activate(void)
981{
982 unsigned long flags;
983
984 if (!vmi_rom)
985 return;
986
987 local_irq_save(flags);
988 activate_vmi();
983 local_irq_restore(flags & X86_EFLAGS_IF); 989 local_irq_restore(flags & X86_EFLAGS_IF);
984} 990}
985 991