aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-12 05:59:39 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-12 05:59:39 -0500
commitfd10902797fc9d6abaf55d9c2e3c6698c90b10c7 (patch)
tree0bc15b8b967e6f0973387e7c81f5b0dc9ce44e75 /arch/x86
parent5b3eec0c80038c8739ccd465b897a35c0dff1cc4 (diff)
parent8b1fae4e4200388b64dd88065639413cb3f1051c (diff)
Merge commit 'v2.6.28-rc8' into x86/irq
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/tty.c2
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h24
-rw-r--r--arch/x86/include/asm/dma-mapping.h6
-rw-r--r--arch/x86/include/asm/ds.h6
-rw-r--r--arch/x86/include/asm/pci_64.h14
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/amd_iommu.c49
-rw-r--r--arch/x86/kernel/apic.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c18
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h17
-rw-r--r--arch/x86/kernel/ds.c88
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/io_apic.c48
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/mpparse.c3
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c3
-rw-r--r--arch/x86/kernel/pci-calgary_64.c2
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/kernel/xsave.c2
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h1
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/oprofile/nmi_int.c5
-rw-r--r--arch/x86/oprofile/op_model_ppro.c6
-rw-r--r--arch/x86/pci/fixup.c25
-rw-r--r--arch/x86/xen/mmu.c21
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
30 files changed, 199 insertions, 165 deletions
diff --git a/arch/x86/boot/tty.c b/arch/x86/boot/tty.c
index 0be77b39328a..7e8e8b25f5f6 100644
--- a/arch/x86/boot/tty.c
+++ b/arch/x86/boot/tty.c
@@ -74,7 +74,7 @@ static int kbd_pending(void)
74{ 74{
75 u8 pending; 75 u8 pending;
76 asm volatile("int $0x16; setnz %0" 76 asm volatile("int $0x16; setnz %0"
77 : "=rm" (pending) 77 : "=qm" (pending)
78 : "a" (0x0100)); 78 : "a" (0x0100));
79 return pending; 79 return pending;
80} 80}
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 1a30c0440c6b..ac302a2fa339 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -251,13 +251,6 @@ struct amd_iommu {
251 /* Pointer to PCI device of this IOMMU */ 251 /* Pointer to PCI device of this IOMMU */
252 struct pci_dev *dev; 252 struct pci_dev *dev;
253 253
254 /*
255 * Capability pointer. There could be more than one IOMMU per PCI
256 * device function if there are more than one AMD IOMMU capability
257 * pointers.
258 */
259 u16 cap_ptr;
260
261 /* physical address of MMIO space */ 254 /* physical address of MMIO space */
262 u64 mmio_phys; 255 u64 mmio_phys;
263 /* virtual address of MMIO space */ 256 /* virtual address of MMIO space */
@@ -266,6 +259,13 @@ struct amd_iommu {
266 /* capabilities of that IOMMU read from ACPI */ 259 /* capabilities of that IOMMU read from ACPI */
267 u32 cap; 260 u32 cap;
268 261
262 /*
263 * Capability pointer. There could be more than one IOMMU per PCI
264 * device function if there are more than one AMD IOMMU capability
265 * pointers.
266 */
267 u16 cap_ptr;
268
269 /* pci domain of this IOMMU */ 269 /* pci domain of this IOMMU */
270 u16 pci_seg; 270 u16 pci_seg;
271 271
@@ -284,19 +284,19 @@ struct amd_iommu {
284 /* size of command buffer */ 284 /* size of command buffer */
285 u32 cmd_buf_size; 285 u32 cmd_buf_size;
286 286
287 /* event buffer virtual address */
288 u8 *evt_buf;
289 /* size of event buffer */ 287 /* size of event buffer */
290 u32 evt_buf_size; 288 u32 evt_buf_size;
289 /* event buffer virtual address */
290 u8 *evt_buf;
291 /* MSI number for event interrupt */ 291 /* MSI number for event interrupt */
292 u16 evt_msi_num; 292 u16 evt_msi_num;
293 293
294 /* if one, we need to send a completion wait command */
295 int need_sync;
296
297 /* true if interrupts for this IOMMU are already enabled */ 294 /* true if interrupts for this IOMMU are already enabled */
298 bool int_enabled; 295 bool int_enabled;
299 296
297 /* if one, we need to send a completion wait command */
298 int need_sync;
299
300 /* default dma_ops domain for that IOMMU */ 300 /* default dma_ops domain for that IOMMU */
301 struct dma_ops_domain *default_dom; 301 struct dma_ops_domain *default_dom;
302}; 302};
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 7f225a4b2a26..097794ff6b79 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -71,15 +71,13 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
71/* Make sure we keep the same behaviour */ 71/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{ 73{
74#ifdef CONFIG_X86_32 74#ifdef CONFIG_X86_64
75 return 0;
76#else
77 struct dma_mapping_ops *ops = get_dma_ops(dev); 75 struct dma_mapping_ops *ops = get_dma_ops(dev);
78 if (ops->mapping_error) 76 if (ops->mapping_error)
79 return ops->mapping_error(dev, dma_addr); 77 return ops->mapping_error(dev, dma_addr);
80 78
81 return (dma_addr == bad_dma_address);
82#endif 79#endif
80 return (dma_addr == bad_dma_address);
83} 81}
84 82
85#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 83#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/x86/include/asm/ds.h b/arch/x86/include/asm/ds.h
index 72c5a190bf48..a95008457ea4 100644
--- a/arch/x86/include/asm/ds.h
+++ b/arch/x86/include/asm/ds.h
@@ -23,12 +23,13 @@
23#ifndef _ASM_X86_DS_H 23#ifndef _ASM_X86_DS_H
24#define _ASM_X86_DS_H 24#define _ASM_X86_DS_H
25 25
26#ifdef CONFIG_X86_DS
27 26
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/init.h> 28#include <linux/init.h>
30 29
31 30
31#ifdef CONFIG_X86_DS
32
32struct task_struct; 33struct task_struct;
33 34
34/* 35/*
@@ -232,7 +233,8 @@ extern void ds_free(struct ds_context *context);
232 233
233#else /* CONFIG_X86_DS */ 234#else /* CONFIG_X86_DS */
234 235
235#define ds_init_intel(config) do {} while (0) 236struct cpuinfo_x86;
237static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
236 238
237#endif /* CONFIG_X86_DS */ 239#endif /* CONFIG_X86_DS */
238#endif /* _ASM_X86_DS_H */ 240#endif /* _ASM_X86_DS_H */
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h
index 5b28995d664e..d02d936840a3 100644
--- a/arch/x86/include/asm/pci_64.h
+++ b/arch/x86/include/asm/pci_64.h
@@ -34,8 +34,6 @@ extern void pci_iommu_alloc(void);
34 */ 34 */
35#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) 35#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
36 36
37#if defined(CONFIG_GART_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
38
39#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 37#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
40 dma_addr_t ADDR_NAME; 38 dma_addr_t ADDR_NAME;
41#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ 39#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
@@ -49,18 +47,6 @@ extern void pci_iommu_alloc(void);
49#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 47#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
50 (((PTR)->LEN_NAME) = (VAL)) 48 (((PTR)->LEN_NAME) = (VAL))
51 49
52#else
53/* No IOMMU */
54
55#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
56#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
57#define pci_unmap_addr(PTR, ADDR_NAME) (0)
58#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
59#define pci_unmap_len(PTR, LEN_NAME) (0)
60#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
61
62#endif
63
64#endif /* __KERNEL__ */ 50#endif /* __KERNEL__ */
65 51
66#endif /* _ASM_X86_PCI_64_H */ 52#endif /* _ASM_X86_PCI_64_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index d1531c8480b7..eefb0594b058 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -271,8 +271,6 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
271extern int do_set_thread_area(struct task_struct *p, int idx, 271extern int do_set_thread_area(struct task_struct *p, int idx,
272 struct user_desc __user *info, int can_allocate); 272 struct user_desc __user *info, int can_allocate);
273 273
274#define __ARCH_WANT_COMPAT_SYS_PTRACE
275
276#endif /* __KERNEL__ */ 274#endif /* __KERNEL__ */
277 275
278#endif /* !__ASSEMBLY__ */ 276#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 4850e4b02b61..ff386ff50ed7 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -239,7 +239,7 @@ struct pci_bus;
239void set_pci_bus_resources_arch_default(struct pci_bus *b); 239void set_pci_bus_resources_arch_default(struct pci_bus *b);
240 240
241#ifdef CONFIG_SMP 241#ifdef CONFIG_SMP
242#define mc_capable() (boot_cpu_data.x86_max_cores > 1) 242#define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids)
243#define smt_capable() (smp_num_siblings > 1) 243#define smt_capable() (smp_num_siblings > 1)
244#endif 244#endif
245 245
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 943fe6026c64..3de1f2350457 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
42obj-y += process.o 42obj-y += process.o
43obj-y += i387.o xsave.o 43obj-y += i387.o xsave.o
44obj-y += ptrace.o 44obj-y += ptrace.o
45obj-y += ds.o 45obj-$(CONFIG_X86_DS) += ds.o
46obj-$(CONFIG_X86_32) += tls.o 46obj-$(CONFIG_X86_32) += tls.o
47obj-$(CONFIG_IA32_EMULATION) += tls.o 47obj-$(CONFIG_IA32_EMULATION) += tls.o
48obj-y += step.o 48obj-y += step.o
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index e4899e0e8787..a7b6dec6fc3f 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
187 187
188 spin_lock_irqsave(&iommu->lock, flags); 188 spin_lock_irqsave(&iommu->lock, flags);
189 ret = __iommu_queue_command(iommu, cmd); 189 ret = __iommu_queue_command(iommu, cmd);
190 if (!ret)
191 iommu->need_sync = 1;
190 spin_unlock_irqrestore(&iommu->lock, flags); 192 spin_unlock_irqrestore(&iommu->lock, flags);
191 193
192 return ret; 194 return ret;
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
210 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 212 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
211 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 213 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
212 214
213 iommu->need_sync = 0;
214
215 spin_lock_irqsave(&iommu->lock, flags); 215 spin_lock_irqsave(&iommu->lock, flags);
216 216
217 if (!iommu->need_sync)
218 goto out;
219
220 iommu->need_sync = 0;
221
217 ret = __iommu_queue_command(iommu, &cmd); 222 ret = __iommu_queue_command(iommu, &cmd);
218 223
219 if (ret) 224 if (ret)
@@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
254 259
255 ret = iommu_queue_command(iommu, &cmd); 260 ret = iommu_queue_command(iommu, &cmd);
256 261
257 iommu->need_sync = 1;
258
259 return ret; 262 return ret;
260} 263}
261 264
@@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
281 284
282 ret = iommu_queue_command(iommu, &cmd); 285 ret = iommu_queue_command(iommu, &cmd);
283 286
284 iommu->need_sync = 1;
285
286 return ret; 287 return ret;
287} 288}
288 289
@@ -343,7 +344,7 @@ static int iommu_map(struct protection_domain *dom,
343 u64 __pte, *pte, *page; 344 u64 __pte, *pte, *page;
344 345
345 bus_addr = PAGE_ALIGN(bus_addr); 346 bus_addr = PAGE_ALIGN(bus_addr);
346 phys_addr = PAGE_ALIGN(bus_addr); 347 phys_addr = PAGE_ALIGN(phys_addr);
347 348
348 /* only support 512GB address spaces for now */ 349 /* only support 512GB address spaces for now */
349 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) 350 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
@@ -599,7 +600,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
599 continue; 600 continue;
600 601
601 p2 = IOMMU_PTE_PAGE(p1[i]); 602 p2 = IOMMU_PTE_PAGE(p1[i]);
602 for (j = 0; j < 512; ++i) { 603 for (j = 0; j < 512; ++j) {
603 if (!IOMMU_PTE_PRESENT(p2[j])) 604 if (!IOMMU_PTE_PRESENT(p2[j]))
604 continue; 605 continue;
605 p3 = IOMMU_PTE_PAGE(p2[j]); 606 p3 = IOMMU_PTE_PAGE(p2[j]);
@@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu,
762 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 763 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
763 764
764 iommu_queue_inv_dev_entry(iommu, devid); 765 iommu_queue_inv_dev_entry(iommu, devid);
765
766 iommu->need_sync = 1;
767} 766}
768 767
769/***************************************************************************** 768/*****************************************************************************
@@ -858,6 +857,9 @@ static int get_device_resources(struct device *dev,
858 print_devid(_bdf, 1); 857 print_devid(_bdf, 1);
859 } 858 }
860 859
860 if (domain_for_device(_bdf) == NULL)
861 set_device_domain(*iommu, *domain, _bdf);
862
861 return 1; 863 return 1;
862} 864}
863 865
@@ -908,7 +910,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
908 if (address >= dom->aperture_size) 910 if (address >= dom->aperture_size)
909 return; 911 return;
910 912
911 WARN_ON(address & 0xfffULL || address > dom->aperture_size); 913 WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
912 914
913 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; 915 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
914 pte += IOMMU_PTE_L0_INDEX(address); 916 pte += IOMMU_PTE_L0_INDEX(address);
@@ -920,8 +922,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
920 922
921/* 923/*
922 * This function contains common code for mapping of a physically 924 * This function contains common code for mapping of a physically
923 * contiguous memory region into DMA address space. It is uses by all 925 * contiguous memory region into DMA address space. It is used by all
924 * mapping functions provided by this IOMMU driver. 926 * mapping functions provided with this IOMMU driver.
925 * Must be called with the domain lock held. 927 * Must be called with the domain lock held.
926 */ 928 */
927static dma_addr_t __map_single(struct device *dev, 929static dma_addr_t __map_single(struct device *dev,
@@ -981,7 +983,8 @@ static void __unmap_single(struct amd_iommu *iommu,
981 dma_addr_t i, start; 983 dma_addr_t i, start;
982 unsigned int pages; 984 unsigned int pages;
983 985
984 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 986 if ((dma_addr == bad_dma_address) ||
987 (dma_addr + size > dma_dom->aperture_size))
985 return; 988 return;
986 989
987 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 990 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@@ -1031,8 +1034,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1031 if (addr == bad_dma_address) 1034 if (addr == bad_dma_address)
1032 goto out; 1035 goto out;
1033 1036
1034 if (unlikely(iommu->need_sync)) 1037 iommu_completion_wait(iommu);
1035 iommu_completion_wait(iommu);
1036 1038
1037out: 1039out:
1038 spin_unlock_irqrestore(&domain->lock, flags); 1040 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1060,8 +1062,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
1060 1062
1061 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1063 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
1062 1064
1063 if (unlikely(iommu->need_sync)) 1065 iommu_completion_wait(iommu);
1064 iommu_completion_wait(iommu);
1065 1066
1066 spin_unlock_irqrestore(&domain->lock, flags); 1067 spin_unlock_irqrestore(&domain->lock, flags);
1067} 1068}
@@ -1127,8 +1128,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1127 goto unmap; 1128 goto unmap;
1128 } 1129 }
1129 1130
1130 if (unlikely(iommu->need_sync)) 1131 iommu_completion_wait(iommu);
1131 iommu_completion_wait(iommu);
1132 1132
1133out: 1133out:
1134 spin_unlock_irqrestore(&domain->lock, flags); 1134 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1173,8 +1173,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1173 s->dma_address = s->dma_length = 0; 1173 s->dma_address = s->dma_length = 0;
1174 } 1174 }
1175 1175
1176 if (unlikely(iommu->need_sync)) 1176 iommu_completion_wait(iommu);
1177 iommu_completion_wait(iommu);
1178 1177
1179 spin_unlock_irqrestore(&domain->lock, flags); 1178 spin_unlock_irqrestore(&domain->lock, flags);
1180} 1179}
@@ -1225,8 +1224,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
1225 goto out; 1224 goto out;
1226 } 1225 }
1227 1226
1228 if (unlikely(iommu->need_sync)) 1227 iommu_completion_wait(iommu);
1229 iommu_completion_wait(iommu);
1230 1228
1231out: 1229out:
1232 spin_unlock_irqrestore(&domain->lock, flags); 1230 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1257,8 +1255,7 @@ static void free_coherent(struct device *dev, size_t size,
1257 1255
1258 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1256 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
1259 1257
1260 if (unlikely(iommu->need_sync)) 1258 iommu_completion_wait(iommu);
1261 iommu_completion_wait(iommu);
1262 1259
1263 spin_unlock_irqrestore(&domain->lock, flags); 1260 spin_unlock_irqrestore(&domain->lock, flags);
1264 1261
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 04a7f960bbc0..16f94879b525 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -1315,7 +1315,7 @@ void enable_x2apic(void)
1315 } 1315 }
1316} 1316}
1317 1317
1318void enable_IR_x2apic(void) 1318void __init enable_IR_x2apic(void)
1319{ 1319{
1320#ifdef CONFIG_INTR_REMAP 1320#ifdef CONFIG_INTR_REMAP
1321 int ret; 1321 int ret;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index d3dcd58b87cd..7f05f44b97e9 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -115,9 +115,20 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
115 u32 i = 0; 115 u32 i = 0;
116 116
117 if (cpu_family == CPU_HW_PSTATE) { 117 if (cpu_family == CPU_HW_PSTATE) {
118 rdmsr(MSR_PSTATE_STATUS, lo, hi); 118 if (data->currpstate == HW_PSTATE_INVALID) {
119 i = lo & HW_PSTATE_MASK; 119 /* read (initial) hw pstate if not yet set */
120 data->currpstate = i; 120 rdmsr(MSR_PSTATE_STATUS, lo, hi);
121 i = lo & HW_PSTATE_MASK;
122
123 /*
124 * a workaround for family 11h erratum 311 might cause
125 * an "out-of-range Pstate if the core is in Pstate-0
126 */
127 if (i >= data->numps)
128 data->currpstate = HW_PSTATE_0;
129 else
130 data->currpstate = i;
131 }
121 return 0; 132 return 0;
122 } 133 }
123 do { 134 do {
@@ -1121,6 +1132,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1121 } 1132 }
1122 1133
1123 data->cpu = pol->cpu; 1134 data->cpu = pol->cpu;
1135 data->currpstate = HW_PSTATE_INVALID;
1124 1136
1125 if (powernow_k8_cpu_init_acpi(data)) { 1137 if (powernow_k8_cpu_init_acpi(data)) {
1126 /* 1138 /*
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index ab48cfed4d96..65cfb5d7f77f 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -5,6 +5,19 @@
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
6 */ 6 */
7 7
8
9enum pstate {
10 HW_PSTATE_INVALID = 0xff,
11 HW_PSTATE_0 = 0,
12 HW_PSTATE_1 = 1,
13 HW_PSTATE_2 = 2,
14 HW_PSTATE_3 = 3,
15 HW_PSTATE_4 = 4,
16 HW_PSTATE_5 = 5,
17 HW_PSTATE_6 = 6,
18 HW_PSTATE_7 = 7,
19};
20
8struct powernow_k8_data { 21struct powernow_k8_data {
9 unsigned int cpu; 22 unsigned int cpu;
10 23
@@ -23,7 +36,9 @@ struct powernow_k8_data {
23 u32 exttype; /* extended interface = 1 */ 36 u32 exttype; /* extended interface = 1 */
24 37
25 /* keep track of the current fid / vid or pstate */ 38 /* keep track of the current fid / vid or pstate */
26 u32 currvid, currfid, currpstate; 39 u32 currvid;
40 u32 currfid;
41 enum pstate currpstate;
27 42
28 /* the powernow_table includes all frequency and vid/fid pairings: 43 /* the powernow_table includes all frequency and vid/fid pairings:
29 * fid are the lower 8 bits of the index, vid are the upper 8 bits. 44 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index d1a121443bde..a2d1176c38ee 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -21,8 +21,6 @@
21 */ 21 */
22 22
23 23
24#ifdef CONFIG_X86_DS
25
26#include <asm/ds.h> 24#include <asm/ds.h>
27 25
28#include <linux/errno.h> 26#include <linux/errno.h>
@@ -211,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context);
211static inline struct ds_context *ds_get_context(struct task_struct *task) 209static inline struct ds_context *ds_get_context(struct task_struct *task)
212{ 210{
213 struct ds_context *context; 211 struct ds_context *context;
212 unsigned long irq;
214 213
215 spin_lock(&ds_lock); 214 spin_lock_irqsave(&ds_lock, irq);
216 215
217 context = (task ? task->thread.ds_ctx : this_system_context); 216 context = (task ? task->thread.ds_ctx : this_system_context);
218 if (context) 217 if (context)
219 context->count++; 218 context->count++;
220 219
221 spin_unlock(&ds_lock); 220 spin_unlock_irqrestore(&ds_lock, irq);
222 221
223 return context; 222 return context;
224} 223}
@@ -226,55 +225,46 @@ static inline struct ds_context *ds_get_context(struct task_struct *task)
226/* 225/*
227 * Same as ds_get_context, but allocates the context and it's DS 226 * Same as ds_get_context, but allocates the context and it's DS
228 * structure, if necessary; returns NULL; if out of memory. 227 * structure, if necessary; returns NULL; if out of memory.
229 *
230 * pre: requires ds_lock to be held
231 */ 228 */
232static inline struct ds_context *ds_alloc_context(struct task_struct *task) 229static inline struct ds_context *ds_alloc_context(struct task_struct *task)
233{ 230{
234 struct ds_context **p_context = 231 struct ds_context **p_context =
235 (task ? &task->thread.ds_ctx : &this_system_context); 232 (task ? &task->thread.ds_ctx : &this_system_context);
236 struct ds_context *context = *p_context; 233 struct ds_context *context = *p_context;
234 unsigned long irq;
237 235
238 if (!context) { 236 if (!context) {
239 spin_unlock(&ds_lock);
240
241 context = kzalloc(sizeof(*context), GFP_KERNEL); 237 context = kzalloc(sizeof(*context), GFP_KERNEL);
242 238 if (!context)
243 if (!context) {
244 spin_lock(&ds_lock);
245 return NULL; 239 return NULL;
246 }
247 240
248 context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); 241 context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
249 if (!context->ds) { 242 if (!context->ds) {
250 kfree(context); 243 kfree(context);
251 spin_lock(&ds_lock);
252 return NULL; 244 return NULL;
253 } 245 }
254 246
255 spin_lock(&ds_lock); 247 spin_lock_irqsave(&ds_lock, irq);
256 /* 248
257 * Check for race - another CPU could have allocated
258 * it meanwhile:
259 */
260 if (*p_context) { 249 if (*p_context) {
261 kfree(context->ds); 250 kfree(context->ds);
262 kfree(context); 251 kfree(context);
263 return *p_context;
264 }
265 252
266 *p_context = context; 253 context = *p_context;
254 } else {
255 *p_context = context;
267 256
268 context->this = p_context; 257 context->this = p_context;
269 context->task = task; 258 context->task = task;
270 259
271 if (task) 260 if (task)
272 set_tsk_thread_flag(task, TIF_DS_AREA_MSR); 261 set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
273 262
274 if (!task || (task == current)) 263 if (!task || (task == current))
275 wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0); 264 wrmsrl(MSR_IA32_DS_AREA,
276 265 (unsigned long)context->ds);
277 get_tracer(task); 266 }
267 spin_unlock_irqrestore(&ds_lock, irq);
278 } 268 }
279 269
280 context->count++; 270 context->count++;
@@ -288,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
288 */ 278 */
289static inline void ds_put_context(struct ds_context *context) 279static inline void ds_put_context(struct ds_context *context)
290{ 280{
281 unsigned long irq;
282
291 if (!context) 283 if (!context)
292 return; 284 return;
293 285
294 spin_lock(&ds_lock); 286 spin_lock_irqsave(&ds_lock, irq);
295 287
296 if (--context->count) 288 if (--context->count)
297 goto out; 289 goto out;
@@ -313,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context)
313 kfree(context->ds); 305 kfree(context->ds);
314 kfree(context); 306 kfree(context);
315 out: 307 out:
316 spin_unlock(&ds_lock); 308 spin_unlock_irqrestore(&ds_lock, irq);
317} 309}
318 310
319 311
@@ -384,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
384 struct ds_context *context; 376 struct ds_context *context;
385 unsigned long buffer, adj; 377 unsigned long buffer, adj;
386 const unsigned long alignment = (1 << 3); 378 const unsigned long alignment = (1 << 3);
379 unsigned long irq;
387 int error = 0; 380 int error = 0;
388 381
389 if (!ds_cfg.sizeof_ds) 382 if (!ds_cfg.sizeof_ds)
@@ -398,26 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
398 return -EOPNOTSUPP; 391 return -EOPNOTSUPP;
399 392
400 393
401 spin_lock(&ds_lock);
402
403 error = -ENOMEM;
404 context = ds_alloc_context(task); 394 context = ds_alloc_context(task);
405 if (!context) 395 if (!context)
406 goto out_unlock; 396 return -ENOMEM;
397
398 spin_lock_irqsave(&ds_lock, irq);
407 399
408 error = -EPERM; 400 error = -EPERM;
409 if (!check_tracer(task)) 401 if (!check_tracer(task))
410 goto out_unlock; 402 goto out_unlock;
411 403
404 get_tracer(task);
405
412 error = -EALREADY; 406 error = -EALREADY;
413 if (context->owner[qual] == current) 407 if (context->owner[qual] == current)
414 goto out_unlock; 408 goto out_put_tracer;
415 error = -EPERM; 409 error = -EPERM;
416 if (context->owner[qual] != NULL) 410 if (context->owner[qual] != NULL)
417 goto out_unlock; 411 goto out_put_tracer;
418 context->owner[qual] = current; 412 context->owner[qual] = current;
419 413
420 spin_unlock(&ds_lock); 414 spin_unlock_irqrestore(&ds_lock, irq);
421 415
422 416
423 error = -ENOMEM; 417 error = -ENOMEM;
@@ -465,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
465 out_release: 459 out_release:
466 context->owner[qual] = NULL; 460 context->owner[qual] = NULL;
467 ds_put_context(context); 461 ds_put_context(context);
462 put_tracer(task);
463 return error;
464
465 out_put_tracer:
466 spin_unlock_irqrestore(&ds_lock, irq);
467 ds_put_context(context);
468 put_tracer(task);
468 return error; 469 return error;
469 470
470 out_unlock: 471 out_unlock:
471 spin_unlock(&ds_lock); 472 spin_unlock_irqrestore(&ds_lock, irq);
472 ds_put_context(context); 473 ds_put_context(context);
473 return error; 474 return error;
474} 475}
@@ -818,13 +819,21 @@ static const struct ds_configuration ds_cfg_var = {
818 .sizeof_ds = sizeof(long) * 12, 819 .sizeof_ds = sizeof(long) * 12,
819 .sizeof_field = sizeof(long), 820 .sizeof_field = sizeof(long),
820 .sizeof_rec[ds_bts] = sizeof(long) * 3, 821 .sizeof_rec[ds_bts] = sizeof(long) * 3,
822#ifdef __i386__
821 .sizeof_rec[ds_pebs] = sizeof(long) * 10 823 .sizeof_rec[ds_pebs] = sizeof(long) * 10
824#else
825 .sizeof_rec[ds_pebs] = sizeof(long) * 18
826#endif
822}; 827};
823static const struct ds_configuration ds_cfg_64 = { 828static const struct ds_configuration ds_cfg_64 = {
824 .sizeof_ds = 8 * 12, 829 .sizeof_ds = 8 * 12,
825 .sizeof_field = 8, 830 .sizeof_field = 8,
826 .sizeof_rec[ds_bts] = 8 * 3, 831 .sizeof_rec[ds_bts] = 8 * 3,
832#ifdef __i386__
827 .sizeof_rec[ds_pebs] = 8 * 10 833 .sizeof_rec[ds_pebs] = 8 * 10
834#else
835 .sizeof_rec[ds_pebs] = 8 * 18
836#endif
828}; 837};
829 838
830static inline void 839static inline void
@@ -878,4 +887,3 @@ void ds_free(struct ds_context *context)
878 while (leftovers--) 887 while (leftovers--)
879 ds_put_context(context); 888 ds_put_context(context);
880} 889}
881#endif /* CONFIG_X86_DS */
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 1f20608d4ca8..b0f61f0dcd0a 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -58,7 +58,7 @@ void __cpuinit mxcsr_feature_mask_init(void)
58 stts(); 58 stts();
59} 59}
60 60
61void __init init_thread_xstate(void) 61void __cpuinit init_thread_xstate(void)
62{ 62{
63 if (!HAVE_HWFP) { 63 if (!HAVE_HWFP) {
64 xstate_size = sizeof(struct i387_soft_struct); 64 xstate_size = sizeof(struct i387_soft_struct);
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index c9513e1ff28d..9043251210fb 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -3608,27 +3608,7 @@ int __init io_apic_get_redir_entries (int ioapic)
3608 3608
3609int __init probe_nr_irqs(void) 3609int __init probe_nr_irqs(void)
3610{ 3610{
3611 int idx; 3611 return NR_IRQS;
3612 int nr = 0;
3613#ifndef CONFIG_XEN
3614 int nr_min = 32;
3615#else
3616 int nr_min = NR_IRQS;
3617#endif
3618
3619 for (idx = 0; idx < nr_ioapics; idx++)
3620 nr += io_apic_get_redir_entries(idx) + 1;
3621
3622 /* double it for hotplug and msi and nmi */
3623 nr <<= 1;
3624
3625 /* something wrong ? */
3626 if (nr < nr_min)
3627 nr = nr_min;
3628 if (WARN_ON(nr > NR_IRQS))
3629 nr = NR_IRQS;
3630
3631 return nr;
3632} 3612}
3633 3613
3634/* -------------------------------------------------------------------------- 3614/* --------------------------------------------------------------------------
@@ -3775,7 +3755,9 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3775void __init setup_ioapic_dest(void) 3755void __init setup_ioapic_dest(void)
3776{ 3756{
3777 int pin, ioapic, irq, irq_entry; 3757 int pin, ioapic, irq, irq_entry;
3758 struct irq_desc *desc;
3778 struct irq_cfg *cfg; 3759 struct irq_cfg *cfg;
3760 cpumask_t mask;
3779 3761
3780 if (skip_ioapic_setup == 1) 3762 if (skip_ioapic_setup == 1)
3781 return; 3763 return;
@@ -3792,16 +3774,30 @@ void __init setup_ioapic_dest(void)
3792 * cpu is online. 3774 * cpu is online.
3793 */ 3775 */
3794 cfg = irq_cfg(irq); 3776 cfg = irq_cfg(irq);
3795 if (!cfg->vector) 3777 if (!cfg->vector) {
3796 setup_IO_APIC_irq(ioapic, pin, irq, 3778 setup_IO_APIC_irq(ioapic, pin, irq,
3797 irq_trigger(irq_entry), 3779 irq_trigger(irq_entry),
3798 irq_polarity(irq_entry)); 3780 irq_polarity(irq_entry));
3781 continue;
3782
3783 }
3784
3785 /*
3786 * Honour affinities which have been set in early boot
3787 */
3788 desc = irq_to_desc(irq);
3789 if (desc->status &
3790 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3791 mask = desc->affinity;
3792 else
3793 mask = TARGET_CPUS;
3794
3799#ifdef CONFIG_INTR_REMAP 3795#ifdef CONFIG_INTR_REMAP
3800 else if (intr_remapping_enabled) 3796 if (intr_remapping_enabled)
3801 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS); 3797 set_ir_ioapic_affinity_irq(irq, mask);
3802#endif
3803 else 3798 else
3804 set_ioapic_affinity_irq(irq, TARGET_CPUS); 3799#endif
3800 set_ioapic_affinity_irq(irq, mask);
3805 } 3801 }
3806 3802
3807 } 3803 }
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1c9cc431ea4f..e169ae9b6a62 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt)
128} 128}
129 129
130#ifdef CONFIG_X86_LOCAL_APIC 130#ifdef CONFIG_X86_LOCAL_APIC
131static void __devinit kvm_setup_secondary_clock(void) 131static void __cpuinit kvm_setup_secondary_clock(void)
132{ 132{
133 /* 133 /*
134 * Now that the first cpu already had this clocksource initialized, 134 * Now that the first cpu already had this clocksource initialized,
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index f98f4e1dba09..0f4c1fd5a1f4 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early)
604 printk(KERN_INFO "Using ACPI for processor (LAPIC) " 604 printk(KERN_INFO "Using ACPI for processor (LAPIC) "
605 "configuration information\n"); 605 "configuration information\n");
606 606
607 if (!mpf)
608 return;
609
607 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 610 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
608 mpf->mpf_specification); 611 mpf->mpf_specification);
609#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 612#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 0e9f1982b1dd..95777b0faa73 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -7,7 +7,8 @@
7 7
8#include <asm/paravirt.h> 8#include <asm/paravirt.h>
9 9
10static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) 10static inline void
11default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
11{ 12{
12 __raw_spin_lock(lock); 13 __raw_spin_lock(lock);
13} 14}
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index e1e731d78f38..d28bbdc35e4e 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -1567,7 +1567,7 @@ static int __init calgary_parse_options(char *p)
1567 ++p; 1567 ++p;
1568 if (*p == '\0') 1568 if (*p == '\0')
1569 break; 1569 break;
1570 bridge = simple_strtol(p, &endp, 0); 1570 bridge = simple_strtoul(p, &endp, 0);
1571 if (p == endp) 1571 if (p == endp)
1572 break; 1572 break;
1573 1573
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index a42b02b4df68..ba7ad83e20a8 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size)
123 123
124 spin_lock_irqsave(&iommu_bitmap_lock, flags); 124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
125 iommu_area_free(iommu_gart_bitmap, offset, size); 125 iommu_area_free(iommu_gart_bitmap, offset, size);
126 if (offset >= next_bit)
127 next_bit = offset + size;
126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 128 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
127} 129}
128 130
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index b13acb75e822..15c3e6999182 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -310,7 +310,7 @@ static void __init setup_xstate_init(void)
310/* 310/*
311 * Enable and initialize the xsave feature. 311 * Enable and initialize the xsave feature.
312 */ 312 */
313void __init xsave_cntxt_init(void) 313void __ref xsave_cntxt_init(void)
314{ 314{
315 unsigned int eax, ebx, ecx, edx; 315 unsigned int eax, ebx, ecx, edx;
316 316
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f1983d9477cd..410ddbc1aa2e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1038,13 +1038,13 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1038 } 1038 }
1039 1039
1040 rmap_write_protect(vcpu->kvm, sp->gfn); 1040 rmap_write_protect(vcpu->kvm, sp->gfn);
1041 kvm_unlink_unsync_page(vcpu->kvm, sp);
1041 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { 1042 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1042 kvm_mmu_zap_page(vcpu->kvm, sp); 1043 kvm_mmu_zap_page(vcpu->kvm, sp);
1043 return 1; 1044 return 1;
1044 } 1045 }
1045 1046
1046 kvm_mmu_flush_tlb(vcpu); 1047 kvm_mmu_flush_tlb(vcpu);
1047 kvm_unlink_unsync_page(vcpu->kvm, sp);
1048 return 0; 1048 return 0;
1049} 1049}
1050 1050
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 613ec9aa674a..84eee43bbe74 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -331,6 +331,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
331 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], 331 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
332 &curr_pte, sizeof(curr_pte)); 332 &curr_pte, sizeof(curr_pte));
333 if (r || curr_pte != gw->ptes[level - 2]) { 333 if (r || curr_pte != gw->ptes[level - 2]) {
334 kvm_mmu_put_page(shadow_page, sptep);
334 kvm_release_pfn_clean(sw->pfn); 335 kvm_release_pfn_clean(sw->pfn);
335 sw->sptep = NULL; 336 sw->sptep = NULL;
336 return 1; 337 return 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d06b4dc0e2ea..a4018b01e1f9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3149,7 +3149,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
3149 3149
3150 if (cpu_has_virtual_nmis()) { 3150 if (cpu_has_virtual_nmis()) {
3151 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { 3151 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
3152 if (vmx_nmi_enabled(vcpu)) { 3152 if (vcpu->arch.interrupt.pending) {
3153 enable_nmi_window(vcpu);
3154 } else if (vmx_nmi_enabled(vcpu)) {
3153 vcpu->arch.nmi_pending = false; 3155 vcpu->arch.nmi_pending = false;
3154 vcpu->arch.nmi_injected = true; 3156 vcpu->arch.nmi_injected = true;
3155 } else { 3157 } else {
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 022cd41ea9b4..202864ad49a7 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -401,14 +401,13 @@ static int __init ppro_init(char **cpu_type)
401 *cpu_type = "i386/pii"; 401 *cpu_type = "i386/pii";
402 break; 402 break;
403 case 6 ... 8: 403 case 6 ... 8:
404 case 10 ... 11:
404 *cpu_type = "i386/piii"; 405 *cpu_type = "i386/piii";
405 break; 406 break;
406 case 9: 407 case 9:
408 case 13:
407 *cpu_type = "i386/p6_mobile"; 409 *cpu_type = "i386/p6_mobile";
408 break; 410 break;
409 case 10 ... 13:
410 *cpu_type = "i386/p6";
411 break;
412 case 14: 411 case 14:
413 *cpu_type = "i386/core"; 412 *cpu_type = "i386/core";
414 break; 413 break;
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 3f1b81a83e2e..e9f80c744cf3 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -69,7 +69,7 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
69 int i; 69 int i;
70 70
71 if (!reset_value) { 71 if (!reset_value) {
72 reset_value = kmalloc(sizeof(unsigned) * num_counters, 72 reset_value = kmalloc(sizeof(reset_value[0]) * num_counters,
73 GFP_ATOMIC); 73 GFP_ATOMIC);
74 if (!reset_value) 74 if (!reset_value)
75 return; 75 return;
@@ -156,6 +156,8 @@ static void ppro_start(struct op_msrs const * const msrs)
156 unsigned int low, high; 156 unsigned int low, high;
157 int i; 157 int i;
158 158
159 if (!reset_value)
160 return;
159 for (i = 0; i < num_counters; ++i) { 161 for (i = 0; i < num_counters; ++i) {
160 if (reset_value[i]) { 162 if (reset_value[i]) {
161 CTRL_READ(low, high, msrs, i); 163 CTRL_READ(low, high, msrs, i);
@@ -171,6 +173,8 @@ static void ppro_stop(struct op_msrs const * const msrs)
171 unsigned int low, high; 173 unsigned int low, high;
172 int i; 174 int i;
173 175
176 if (!reset_value)
177 return;
174 for (i = 0; i < num_counters; ++i) { 178 for (i = 0; i < num_counters; ++i) {
175 if (!reset_value[i]) 179 if (!reset_value[i])
176 continue; 180 continue;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 3c27a809393b..2051dc96b8e9 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -496,21 +496,24 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
496 pci_siemens_interrupt_controller); 496 pci_siemens_interrupt_controller);
497 497
498/* 498/*
499 * Regular PCI devices have 256 bytes, but AMD Family 10h Opteron ext config 499 * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have
500 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 500 * 4096 bytes configuration space for each function of their processor
501 * access it. Maybe we don't have a way to generate extended config space 501 * configuration space.
502 * accesses. So check it
503 */ 502 */
504static void fam10h_pci_cfg_space_size(struct pci_dev *dev) 503static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev)
505{ 504{
506 dev->cfg_size = pci_cfg_space_size_ext(dev); 505 dev->cfg_size = pci_cfg_space_size_ext(dev);
507} 506}
508 507DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size);
509DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, fam10h_pci_cfg_space_size); 508DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size);
510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, fam10h_pci_cfg_space_size); 509DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size);
511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, fam10h_pci_cfg_space_size); 510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size);
512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, fam10h_pci_cfg_space_size); 511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size);
513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, fam10h_pci_cfg_space_size); 512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size);
513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size);
514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size);
515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size);
516DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size);
514 517
515/* 518/*
516 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from 519 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 688936044dc9..636ef4caa52d 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -661,12 +661,11 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
661 * For 64-bit, we must skip the Xen hole in the middle of the address 661 * For 64-bit, we must skip the Xen hole in the middle of the address
662 * space, just after the big x86-64 virtual hole. 662 * space, just after the big x86-64 virtual hole.
663 */ 663 */
664static int xen_pgd_walk(struct mm_struct *mm, 664static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
665 int (*func)(struct mm_struct *mm, struct page *, 665 int (*func)(struct mm_struct *mm, struct page *,
666 enum pt_level), 666 enum pt_level),
667 unsigned long limit) 667 unsigned long limit)
668{ 668{
669 pgd_t *pgd = mm->pgd;
670 int flush = 0; 669 int flush = 0;
671 unsigned hole_low, hole_high; 670 unsigned hole_low, hole_high;
672 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; 671 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
@@ -753,6 +752,14 @@ out:
753 return flush; 752 return flush;
754} 753}
755 754
755static int xen_pgd_walk(struct mm_struct *mm,
756 int (*func)(struct mm_struct *mm, struct page *,
757 enum pt_level),
758 unsigned long limit)
759{
760 return __xen_pgd_walk(mm, mm->pgd, func, limit);
761}
762
756/* If we're using split pte locks, then take the page's lock and 763/* If we're using split pte locks, then take the page's lock and
757 return a pointer to it. Otherwise return NULL. */ 764 return a pointer to it. Otherwise return NULL. */
758static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) 765static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
@@ -854,7 +861,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
854 861
855 xen_mc_batch(); 862 xen_mc_batch();
856 863
857 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { 864 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
858 /* re-enable interrupts for flushing */ 865 /* re-enable interrupts for flushing */
859 xen_mc_issue(0); 866 xen_mc_issue(0);
860 867
@@ -998,7 +1005,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
998 PT_PMD); 1005 PT_PMD);
999#endif 1006#endif
1000 1007
1001 xen_pgd_walk(mm, xen_unpin_page, USER_LIMIT); 1008 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1002 1009
1003 xen_mc_issue(0); 1010 xen_mc_issue(0);
1004} 1011}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d77da613b1d2..acd9b6705e02 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -362,7 +362,7 @@ static void xen_cpu_die(unsigned int cpu)
362 alternatives_smp_switch(0); 362 alternatives_smp_switch(0);
363} 363}
364 364
365static void xen_play_dead(void) 365static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
366{ 366{
367 play_dead_common(); 367 play_dead_common();
368 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 368 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index d7422dc2a55c..9e1afae8461f 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -49,7 +49,7 @@ bool xen_vcpu_stolen(int vcpu);
49 49
50void xen_mark_init_mm_pinned(void); 50void xen_mark_init_mm_pinned(void);
51 51
52void __init xen_setup_vcpu_info_placement(void); 52void xen_setup_vcpu_info_placement(void);
53 53
54#ifdef CONFIG_SMP 54#ifdef CONFIG_SMP
55void xen_smp_init(void); 55void xen_smp_init(void);