diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-12-25 07:51:46 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-12-25 07:51:46 -0500 |
| commit | 0b271ef4521756010675b1611bef20fd3096790d (patch) | |
| tree | 2c9d22a2c74122a9904e533df27f41d63ffef394 /arch/x86 | |
| parent | b19b3c74c7bbec45a848631b8f970ac110665a01 (diff) | |
| parent | 4a6908a3a050aacc9c3a2f36b276b46c0629ad91 (diff) | |
Merge commit 'v2.6.28' into core/core
Diffstat (limited to 'arch/x86')
39 files changed, 253 insertions, 194 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index b815664fe370..8e99073b9e0f 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
| @@ -520,6 +520,7 @@ config X86_PTRACE_BTS | |||
| 520 | bool "Branch Trace Store" | 520 | bool "Branch Trace Store" |
| 521 | default y | 521 | default y |
| 522 | depends on X86_DEBUGCTLMSR | 522 | depends on X86_DEBUGCTLMSR |
| 523 | depends on BROKEN | ||
| 523 | help | 524 | help |
| 524 | This adds a ptrace interface to the hardware's branch trace store. | 525 | This adds a ptrace interface to the hardware's branch trace store. |
| 525 | 526 | ||
diff --git a/arch/x86/boot/tty.c b/arch/x86/boot/tty.c index 0be77b39328a..7e8e8b25f5f6 100644 --- a/arch/x86/boot/tty.c +++ b/arch/x86/boot/tty.c | |||
| @@ -74,7 +74,7 @@ static int kbd_pending(void) | |||
| 74 | { | 74 | { |
| 75 | u8 pending; | 75 | u8 pending; |
| 76 | asm volatile("int $0x16; setnz %0" | 76 | asm volatile("int $0x16; setnz %0" |
| 77 | : "=rm" (pending) | 77 | : "=qm" (pending) |
| 78 | : "a" (0x0100)); | 78 | : "a" (0x0100)); |
| 79 | return pending; | 79 | return pending; |
| 80 | } | 80 | } |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 1a30c0440c6b..ac302a2fa339 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
| @@ -251,13 +251,6 @@ struct amd_iommu { | |||
| 251 | /* Pointer to PCI device of this IOMMU */ | 251 | /* Pointer to PCI device of this IOMMU */ |
| 252 | struct pci_dev *dev; | 252 | struct pci_dev *dev; |
| 253 | 253 | ||
| 254 | /* | ||
| 255 | * Capability pointer. There could be more than one IOMMU per PCI | ||
| 256 | * device function if there are more than one AMD IOMMU capability | ||
| 257 | * pointers. | ||
| 258 | */ | ||
| 259 | u16 cap_ptr; | ||
| 260 | |||
| 261 | /* physical address of MMIO space */ | 254 | /* physical address of MMIO space */ |
| 262 | u64 mmio_phys; | 255 | u64 mmio_phys; |
| 263 | /* virtual address of MMIO space */ | 256 | /* virtual address of MMIO space */ |
| @@ -266,6 +259,13 @@ struct amd_iommu { | |||
| 266 | /* capabilities of that IOMMU read from ACPI */ | 259 | /* capabilities of that IOMMU read from ACPI */ |
| 267 | u32 cap; | 260 | u32 cap; |
| 268 | 261 | ||
| 262 | /* | ||
| 263 | * Capability pointer. There could be more than one IOMMU per PCI | ||
| 264 | * device function if there are more than one AMD IOMMU capability | ||
| 265 | * pointers. | ||
| 266 | */ | ||
| 267 | u16 cap_ptr; | ||
| 268 | |||
| 269 | /* pci domain of this IOMMU */ | 269 | /* pci domain of this IOMMU */ |
| 270 | u16 pci_seg; | 270 | u16 pci_seg; |
| 271 | 271 | ||
| @@ -284,19 +284,19 @@ struct amd_iommu { | |||
| 284 | /* size of command buffer */ | 284 | /* size of command buffer */ |
| 285 | u32 cmd_buf_size; | 285 | u32 cmd_buf_size; |
| 286 | 286 | ||
| 287 | /* event buffer virtual address */ | ||
| 288 | u8 *evt_buf; | ||
| 289 | /* size of event buffer */ | 287 | /* size of event buffer */ |
| 290 | u32 evt_buf_size; | 288 | u32 evt_buf_size; |
| 289 | /* event buffer virtual address */ | ||
| 290 | u8 *evt_buf; | ||
| 291 | /* MSI number for event interrupt */ | 291 | /* MSI number for event interrupt */ |
| 292 | u16 evt_msi_num; | 292 | u16 evt_msi_num; |
| 293 | 293 | ||
| 294 | /* if one, we need to send a completion wait command */ | ||
| 295 | int need_sync; | ||
| 296 | |||
| 297 | /* true if interrupts for this IOMMU are already enabled */ | 294 | /* true if interrupts for this IOMMU are already enabled */ |
| 298 | bool int_enabled; | 295 | bool int_enabled; |
| 299 | 296 | ||
| 297 | /* if one, we need to send a completion wait command */ | ||
| 298 | int need_sync; | ||
| 299 | |||
| 300 | /* default dma_ops domain for that IOMMU */ | 300 | /* default dma_ops domain for that IOMMU */ |
| 301 | struct dma_ops_domain *default_dom; | 301 | struct dma_ops_domain *default_dom; |
| 302 | }; | 302 | }; |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 7f225a4b2a26..097794ff6b79 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
| @@ -71,15 +71,13 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
| 71 | /* Make sure we keep the same behaviour */ | 71 | /* Make sure we keep the same behaviour */ |
| 72 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 72 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 73 | { | 73 | { |
| 74 | #ifdef CONFIG_X86_32 | 74 | #ifdef CONFIG_X86_64 |
| 75 | return 0; | ||
| 76 | #else | ||
| 77 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 75 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
| 78 | if (ops->mapping_error) | 76 | if (ops->mapping_error) |
| 79 | return ops->mapping_error(dev, dma_addr); | 77 | return ops->mapping_error(dev, dma_addr); |
| 80 | 78 | ||
| 81 | return (dma_addr == bad_dma_address); | ||
| 82 | #endif | 79 | #endif |
| 80 | return (dma_addr == bad_dma_address); | ||
| 83 | } | 81 | } |
| 84 | 82 | ||
| 85 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 83 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
diff --git a/arch/x86/include/asm/ds.h b/arch/x86/include/asm/ds.h index 72c5a190bf48..a95008457ea4 100644 --- a/arch/x86/include/asm/ds.h +++ b/arch/x86/include/asm/ds.h | |||
| @@ -23,12 +23,13 @@ | |||
| 23 | #ifndef _ASM_X86_DS_H | 23 | #ifndef _ASM_X86_DS_H |
| 24 | #define _ASM_X86_DS_H | 24 | #define _ASM_X86_DS_H |
| 25 | 25 | ||
| 26 | #ifdef CONFIG_X86_DS | ||
| 27 | 26 | ||
| 28 | #include <linux/types.h> | 27 | #include <linux/types.h> |
| 29 | #include <linux/init.h> | 28 | #include <linux/init.h> |
| 30 | 29 | ||
| 31 | 30 | ||
| 31 | #ifdef CONFIG_X86_DS | ||
| 32 | |||
| 32 | struct task_struct; | 33 | struct task_struct; |
| 33 | 34 | ||
| 34 | /* | 35 | /* |
| @@ -232,7 +233,8 @@ extern void ds_free(struct ds_context *context); | |||
| 232 | 233 | ||
| 233 | #else /* CONFIG_X86_DS */ | 234 | #else /* CONFIG_X86_DS */ |
| 234 | 235 | ||
| 235 | #define ds_init_intel(config) do {} while (0) | 236 | struct cpuinfo_x86; |
| 237 | static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {} | ||
| 236 | 238 | ||
| 237 | #endif /* CONFIG_X86_DS */ | 239 | #endif /* CONFIG_X86_DS */ |
| 238 | #endif /* _ASM_X86_DS_H */ | 240 | #endif /* _ASM_X86_DS_H */ |
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h index 5b28995d664e..d02d936840a3 100644 --- a/arch/x86/include/asm/pci_64.h +++ b/arch/x86/include/asm/pci_64.h | |||
| @@ -34,8 +34,6 @@ extern void pci_iommu_alloc(void); | |||
| 34 | */ | 34 | */ |
| 35 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) | 35 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) |
| 36 | 36 | ||
| 37 | #if defined(CONFIG_GART_IOMMU) || defined(CONFIG_CALGARY_IOMMU) | ||
| 38 | |||
| 39 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 37 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
| 40 | dma_addr_t ADDR_NAME; | 38 | dma_addr_t ADDR_NAME; |
| 41 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | 39 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ |
| @@ -49,18 +47,6 @@ extern void pci_iommu_alloc(void); | |||
| 49 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | 47 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ |
| 50 | (((PTR)->LEN_NAME) = (VAL)) | 48 | (((PTR)->LEN_NAME) = (VAL)) |
| 51 | 49 | ||
| 52 | #else | ||
| 53 | /* No IOMMU */ | ||
| 54 | |||
| 55 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) | ||
| 56 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) | ||
| 57 | #define pci_unmap_addr(PTR, ADDR_NAME) (0) | ||
| 58 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | ||
| 59 | #define pci_unmap_len(PTR, LEN_NAME) (0) | ||
| 60 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | ||
| 61 | |||
| 62 | #endif | ||
| 63 | |||
| 64 | #endif /* __KERNEL__ */ | 50 | #endif /* __KERNEL__ */ |
| 65 | 51 | ||
| 66 | #endif /* _ASM_X86_PCI_64_H */ | 52 | #endif /* _ASM_X86_PCI_64_H */ |
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index d1531c8480b7..eefb0594b058 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
| @@ -271,8 +271,6 @@ extern int do_get_thread_area(struct task_struct *p, int idx, | |||
| 271 | extern int do_set_thread_area(struct task_struct *p, int idx, | 271 | extern int do_set_thread_area(struct task_struct *p, int idx, |
| 272 | struct user_desc __user *info, int can_allocate); | 272 | struct user_desc __user *info, int can_allocate); |
| 273 | 273 | ||
| 274 | #define __ARCH_WANT_COMPAT_SYS_PTRACE | ||
| 275 | |||
| 276 | #endif /* __KERNEL__ */ | 274 | #endif /* __KERNEL__ */ |
| 277 | 275 | ||
| 278 | #endif /* !__ASSEMBLY__ */ | 276 | #endif /* !__ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 4850e4b02b61..ff386ff50ed7 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
| @@ -239,7 +239,7 @@ struct pci_bus; | |||
| 239 | void set_pci_bus_resources_arch_default(struct pci_bus *b); | 239 | void set_pci_bus_resources_arch_default(struct pci_bus *b); |
| 240 | 240 | ||
| 241 | #ifdef CONFIG_SMP | 241 | #ifdef CONFIG_SMP |
| 242 | #define mc_capable() (boot_cpu_data.x86_max_cores > 1) | 242 | #define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids) |
| 243 | #define smt_capable() (smp_num_siblings > 1) | 243 | #define smt_capable() (smp_num_siblings > 1) |
| 244 | #endif | 244 | #endif |
| 245 | 245 | ||
diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h index b7c0dea119fe..61e08c0a2907 100644 --- a/arch/x86/include/asm/vmi.h +++ b/arch/x86/include/asm/vmi.h | |||
| @@ -223,9 +223,15 @@ struct pci_header { | |||
| 223 | } __attribute__((packed)); | 223 | } __attribute__((packed)); |
| 224 | 224 | ||
| 225 | /* Function prototypes for bootstrapping */ | 225 | /* Function prototypes for bootstrapping */ |
| 226 | #ifdef CONFIG_VMI | ||
| 226 | extern void vmi_init(void); | 227 | extern void vmi_init(void); |
| 228 | extern void vmi_activate(void); | ||
| 227 | extern void vmi_bringup(void); | 229 | extern void vmi_bringup(void); |
| 228 | extern void vmi_apply_boot_page_allocations(void); | 230 | #else |
| 231 | static inline void vmi_init(void) {} | ||
| 232 | static inline void vmi_activate(void) {} | ||
| 233 | static inline void vmi_bringup(void) {} | ||
| 234 | #endif | ||
| 229 | 235 | ||
| 230 | /* State needed to start an application processor in an SMP system. */ | 236 | /* State needed to start an application processor in an SMP system. */ |
| 231 | struct vmi_ap_state { | 237 | struct vmi_ap_state { |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index e489ff9cb3e2..b62a7667828e 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
| @@ -41,7 +41,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o | |||
| 41 | obj-y += process.o | 41 | obj-y += process.o |
| 42 | obj-y += i387.o xsave.o | 42 | obj-y += i387.o xsave.o |
| 43 | obj-y += ptrace.o | 43 | obj-y += ptrace.o |
| 44 | obj-y += ds.o | 44 | obj-$(CONFIG_X86_DS) += ds.o |
| 45 | obj-$(CONFIG_X86_32) += tls.o | 45 | obj-$(CONFIG_X86_32) += tls.o |
| 46 | obj-$(CONFIG_IA32_EMULATION) += tls.o | 46 | obj-$(CONFIG_IA32_EMULATION) += tls.o |
| 47 | obj-y += step.o | 47 | obj-y += step.o |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index e4899e0e8787..0a60d60ed036 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
| 187 | 187 | ||
| 188 | spin_lock_irqsave(&iommu->lock, flags); | 188 | spin_lock_irqsave(&iommu->lock, flags); |
| 189 | ret = __iommu_queue_command(iommu, cmd); | 189 | ret = __iommu_queue_command(iommu, cmd); |
| 190 | if (!ret) | ||
| 191 | iommu->need_sync = 1; | ||
| 190 | spin_unlock_irqrestore(&iommu->lock, flags); | 192 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 191 | 193 | ||
| 192 | return ret; | 194 | return ret; |
| @@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
| 210 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; | 212 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; |
| 211 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | 213 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); |
| 212 | 214 | ||
| 213 | iommu->need_sync = 0; | ||
| 214 | |||
| 215 | spin_lock_irqsave(&iommu->lock, flags); | 215 | spin_lock_irqsave(&iommu->lock, flags); |
| 216 | 216 | ||
| 217 | if (!iommu->need_sync) | ||
| 218 | goto out; | ||
| 219 | |||
| 220 | iommu->need_sync = 0; | ||
| 221 | |||
| 217 | ret = __iommu_queue_command(iommu, &cmd); | 222 | ret = __iommu_queue_command(iommu, &cmd); |
| 218 | 223 | ||
| 219 | if (ret) | 224 | if (ret) |
| @@ -230,8 +235,9 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
| 230 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | 235 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; |
| 231 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | 236 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); |
| 232 | 237 | ||
| 233 | if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) | 238 | if (unlikely(i == EXIT_LOOP_COUNT)) |
| 234 | printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); | 239 | panic("AMD IOMMU: Completion wait loop failed\n"); |
| 240 | |||
| 235 | out: | 241 | out: |
| 236 | spin_unlock_irqrestore(&iommu->lock, flags); | 242 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 237 | 243 | ||
| @@ -254,8 +260,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | |||
| 254 | 260 | ||
| 255 | ret = iommu_queue_command(iommu, &cmd); | 261 | ret = iommu_queue_command(iommu, &cmd); |
| 256 | 262 | ||
| 257 | iommu->need_sync = 1; | ||
| 258 | |||
| 259 | return ret; | 263 | return ret; |
| 260 | } | 264 | } |
| 261 | 265 | ||
| @@ -281,8 +285,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
| 281 | 285 | ||
| 282 | ret = iommu_queue_command(iommu, &cmd); | 286 | ret = iommu_queue_command(iommu, &cmd); |
| 283 | 287 | ||
| 284 | iommu->need_sync = 1; | ||
| 285 | |||
| 286 | return ret; | 288 | return ret; |
| 287 | } | 289 | } |
| 288 | 290 | ||
| @@ -343,7 +345,7 @@ static int iommu_map(struct protection_domain *dom, | |||
| 343 | u64 __pte, *pte, *page; | 345 | u64 __pte, *pte, *page; |
| 344 | 346 | ||
| 345 | bus_addr = PAGE_ALIGN(bus_addr); | 347 | bus_addr = PAGE_ALIGN(bus_addr); |
| 346 | phys_addr = PAGE_ALIGN(bus_addr); | 348 | phys_addr = PAGE_ALIGN(phys_addr); |
| 347 | 349 | ||
| 348 | /* only support 512GB address spaces for now */ | 350 | /* only support 512GB address spaces for now */ |
| 349 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) | 351 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) |
| @@ -599,7 +601,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) | |||
| 599 | continue; | 601 | continue; |
| 600 | 602 | ||
| 601 | p2 = IOMMU_PTE_PAGE(p1[i]); | 603 | p2 = IOMMU_PTE_PAGE(p1[i]); |
| 602 | for (j = 0; j < 512; ++i) { | 604 | for (j = 0; j < 512; ++j) { |
| 603 | if (!IOMMU_PTE_PRESENT(p2[j])) | 605 | if (!IOMMU_PTE_PRESENT(p2[j])) |
| 604 | continue; | 606 | continue; |
| 605 | p3 = IOMMU_PTE_PAGE(p2[j]); | 607 | p3 = IOMMU_PTE_PAGE(p2[j]); |
| @@ -762,8 +764,6 @@ static void set_device_domain(struct amd_iommu *iommu, | |||
| 762 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 764 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
| 763 | 765 | ||
| 764 | iommu_queue_inv_dev_entry(iommu, devid); | 766 | iommu_queue_inv_dev_entry(iommu, devid); |
| 765 | |||
| 766 | iommu->need_sync = 1; | ||
| 767 | } | 767 | } |
| 768 | 768 | ||
| 769 | /***************************************************************************** | 769 | /***************************************************************************** |
| @@ -858,6 +858,9 @@ static int get_device_resources(struct device *dev, | |||
| 858 | print_devid(_bdf, 1); | 858 | print_devid(_bdf, 1); |
| 859 | } | 859 | } |
| 860 | 860 | ||
| 861 | if (domain_for_device(_bdf) == NULL) | ||
| 862 | set_device_domain(*iommu, *domain, _bdf); | ||
| 863 | |||
| 861 | return 1; | 864 | return 1; |
| 862 | } | 865 | } |
| 863 | 866 | ||
| @@ -908,7 +911,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
| 908 | if (address >= dom->aperture_size) | 911 | if (address >= dom->aperture_size) |
| 909 | return; | 912 | return; |
| 910 | 913 | ||
| 911 | WARN_ON(address & 0xfffULL || address > dom->aperture_size); | 914 | WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size); |
| 912 | 915 | ||
| 913 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | 916 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; |
| 914 | pte += IOMMU_PTE_L0_INDEX(address); | 917 | pte += IOMMU_PTE_L0_INDEX(address); |
| @@ -920,8 +923,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
| 920 | 923 | ||
| 921 | /* | 924 | /* |
| 922 | * This function contains common code for mapping of a physically | 925 | * This function contains common code for mapping of a physically |
| 923 | * contiguous memory region into DMA address space. It is uses by all | 926 | * contiguous memory region into DMA address space. It is used by all |
| 924 | * mapping functions provided by this IOMMU driver. | 927 | * mapping functions provided with this IOMMU driver. |
| 925 | * Must be called with the domain lock held. | 928 | * Must be called with the domain lock held. |
| 926 | */ | 929 | */ |
| 927 | static dma_addr_t __map_single(struct device *dev, | 930 | static dma_addr_t __map_single(struct device *dev, |
| @@ -981,7 +984,8 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
| 981 | dma_addr_t i, start; | 984 | dma_addr_t i, start; |
| 982 | unsigned int pages; | 985 | unsigned int pages; |
| 983 | 986 | ||
| 984 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) | 987 | if ((dma_addr == bad_dma_address) || |
| 988 | (dma_addr + size > dma_dom->aperture_size)) | ||
| 985 | return; | 989 | return; |
| 986 | 990 | ||
| 987 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | 991 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
| @@ -1031,8 +1035,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | |||
| 1031 | if (addr == bad_dma_address) | 1035 | if (addr == bad_dma_address) |
| 1032 | goto out; | 1036 | goto out; |
| 1033 | 1037 | ||
| 1034 | if (unlikely(iommu->need_sync)) | 1038 | iommu_completion_wait(iommu); |
| 1035 | iommu_completion_wait(iommu); | ||
| 1036 | 1039 | ||
| 1037 | out: | 1040 | out: |
| 1038 | spin_unlock_irqrestore(&domain->lock, flags); | 1041 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1060,8 +1063,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
| 1060 | 1063 | ||
| 1061 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | 1064 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); |
| 1062 | 1065 | ||
| 1063 | if (unlikely(iommu->need_sync)) | 1066 | iommu_completion_wait(iommu); |
| 1064 | iommu_completion_wait(iommu); | ||
| 1065 | 1067 | ||
| 1066 | spin_unlock_irqrestore(&domain->lock, flags); | 1068 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1067 | } | 1069 | } |
| @@ -1127,8 +1129,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1127 | goto unmap; | 1129 | goto unmap; |
| 1128 | } | 1130 | } |
| 1129 | 1131 | ||
| 1130 | if (unlikely(iommu->need_sync)) | 1132 | iommu_completion_wait(iommu); |
| 1131 | iommu_completion_wait(iommu); | ||
| 1132 | 1133 | ||
| 1133 | out: | 1134 | out: |
| 1134 | spin_unlock_irqrestore(&domain->lock, flags); | 1135 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1173,8 +1174,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1173 | s->dma_address = s->dma_length = 0; | 1174 | s->dma_address = s->dma_length = 0; |
| 1174 | } | 1175 | } |
| 1175 | 1176 | ||
| 1176 | if (unlikely(iommu->need_sync)) | 1177 | iommu_completion_wait(iommu); |
| 1177 | iommu_completion_wait(iommu); | ||
| 1178 | 1178 | ||
| 1179 | spin_unlock_irqrestore(&domain->lock, flags); | 1179 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1180 | } | 1180 | } |
| @@ -1225,8 +1225,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
| 1225 | goto out; | 1225 | goto out; |
| 1226 | } | 1226 | } |
| 1227 | 1227 | ||
| 1228 | if (unlikely(iommu->need_sync)) | 1228 | iommu_completion_wait(iommu); |
| 1229 | iommu_completion_wait(iommu); | ||
| 1230 | 1229 | ||
| 1231 | out: | 1230 | out: |
| 1232 | spin_unlock_irqrestore(&domain->lock, flags); | 1231 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1257,8 +1256,7 @@ static void free_coherent(struct device *dev, size_t size, | |||
| 1257 | 1256 | ||
| 1258 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 1257 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |
| 1259 | 1258 | ||
| 1260 | if (unlikely(iommu->need_sync)) | 1259 | iommu_completion_wait(iommu); |
| 1261 | iommu_completion_wait(iommu); | ||
| 1262 | 1260 | ||
| 1263 | spin_unlock_irqrestore(&domain->lock, flags); | 1261 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1264 | 1262 | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 30ae2701b3df..c6cc22815d35 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
| @@ -427,6 +427,10 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | |||
| 427 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, | 427 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
| 428 | &entry, sizeof(entry)); | 428 | &entry, sizeof(entry)); |
| 429 | 429 | ||
| 430 | /* set head and tail to zero manually */ | ||
| 431 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
| 432 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
| 433 | |||
| 430 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | 434 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); |
| 431 | 435 | ||
| 432 | return cmd_buf; | 436 | return cmd_buf; |
| @@ -1074,7 +1078,8 @@ int __init amd_iommu_init(void) | |||
| 1074 | goto free; | 1078 | goto free; |
| 1075 | 1079 | ||
| 1076 | /* IOMMU rlookup table - find the IOMMU for a specific device */ | 1080 | /* IOMMU rlookup table - find the IOMMU for a specific device */ |
| 1077 | amd_iommu_rlookup_table = (void *)__get_free_pages(GFP_KERNEL, | 1081 | amd_iommu_rlookup_table = (void *)__get_free_pages( |
| 1082 | GFP_KERNEL | __GFP_ZERO, | ||
| 1078 | get_order(rlookup_table_size)); | 1083 | get_order(rlookup_table_size)); |
| 1079 | if (amd_iommu_rlookup_table == NULL) | 1084 | if (amd_iommu_rlookup_table == NULL) |
| 1080 | goto free; | 1085 | goto free; |
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 04a7f960bbc0..16f94879b525 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
| @@ -1315,7 +1315,7 @@ void enable_x2apic(void) | |||
| 1315 | } | 1315 | } |
| 1316 | } | 1316 | } |
| 1317 | 1317 | ||
| 1318 | void enable_IR_x2apic(void) | 1318 | void __init enable_IR_x2apic(void) |
| 1319 | { | 1319 | { |
| 1320 | #ifdef CONFIG_INTR_REMAP | 1320 | #ifdef CONFIG_INTR_REMAP |
| 1321 | int ret; | 1321 | int ret; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index d3dcd58b87cd..7f05f44b97e9 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
| @@ -115,9 +115,20 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) | |||
| 115 | u32 i = 0; | 115 | u32 i = 0; |
| 116 | 116 | ||
| 117 | if (cpu_family == CPU_HW_PSTATE) { | 117 | if (cpu_family == CPU_HW_PSTATE) { |
| 118 | rdmsr(MSR_PSTATE_STATUS, lo, hi); | 118 | if (data->currpstate == HW_PSTATE_INVALID) { |
| 119 | i = lo & HW_PSTATE_MASK; | 119 | /* read (initial) hw pstate if not yet set */ |
| 120 | data->currpstate = i; | 120 | rdmsr(MSR_PSTATE_STATUS, lo, hi); |
| 121 | i = lo & HW_PSTATE_MASK; | ||
| 122 | |||
| 123 | /* | ||
| 124 | * a workaround for family 11h erratum 311 might cause | ||
| 125 | * an "out-of-range Pstate if the core is in Pstate-0 | ||
| 126 | */ | ||
| 127 | if (i >= data->numps) | ||
| 128 | data->currpstate = HW_PSTATE_0; | ||
| 129 | else | ||
| 130 | data->currpstate = i; | ||
| 131 | } | ||
| 121 | return 0; | 132 | return 0; |
| 122 | } | 133 | } |
| 123 | do { | 134 | do { |
| @@ -1121,6 +1132,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
| 1121 | } | 1132 | } |
| 1122 | 1133 | ||
| 1123 | data->cpu = pol->cpu; | 1134 | data->cpu = pol->cpu; |
| 1135 | data->currpstate = HW_PSTATE_INVALID; | ||
| 1124 | 1136 | ||
| 1125 | if (powernow_k8_cpu_init_acpi(data)) { | 1137 | if (powernow_k8_cpu_init_acpi(data)) { |
| 1126 | /* | 1138 | /* |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index ab48cfed4d96..65cfb5d7f77f 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
| @@ -5,6 +5,19 @@ | |||
| 5 | * http://www.gnu.org/licenses/gpl.html | 5 | * http://www.gnu.org/licenses/gpl.html |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | |||
| 9 | enum pstate { | ||
| 10 | HW_PSTATE_INVALID = 0xff, | ||
| 11 | HW_PSTATE_0 = 0, | ||
| 12 | HW_PSTATE_1 = 1, | ||
| 13 | HW_PSTATE_2 = 2, | ||
| 14 | HW_PSTATE_3 = 3, | ||
| 15 | HW_PSTATE_4 = 4, | ||
| 16 | HW_PSTATE_5 = 5, | ||
| 17 | HW_PSTATE_6 = 6, | ||
| 18 | HW_PSTATE_7 = 7, | ||
| 19 | }; | ||
| 20 | |||
| 8 | struct powernow_k8_data { | 21 | struct powernow_k8_data { |
| 9 | unsigned int cpu; | 22 | unsigned int cpu; |
| 10 | 23 | ||
| @@ -23,7 +36,9 @@ struct powernow_k8_data { | |||
| 23 | u32 exttype; /* extended interface = 1 */ | 36 | u32 exttype; /* extended interface = 1 */ |
| 24 | 37 | ||
| 25 | /* keep track of the current fid / vid or pstate */ | 38 | /* keep track of the current fid / vid or pstate */ |
| 26 | u32 currvid, currfid, currpstate; | 39 | u32 currvid; |
| 40 | u32 currfid; | ||
| 41 | enum pstate currpstate; | ||
| 27 | 42 | ||
| 28 | /* the powernow_table includes all frequency and vid/fid pairings: | 43 | /* the powernow_table includes all frequency and vid/fid pairings: |
| 29 | * fid are the lower 8 bits of the index, vid are the upper 8 bits. | 44 | * fid are the lower 8 bits of the index, vid are the upper 8 bits. |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 4b031a4ac856..1c838032fd37 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
| @@ -510,12 +510,9 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) | |||
| 510 | */ | 510 | */ |
| 511 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | 511 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) |
| 512 | { | 512 | { |
| 513 | static cpumask_t mce_cpus = CPU_MASK_NONE; | ||
| 514 | |||
| 515 | mce_cpu_quirks(c); | 513 | mce_cpu_quirks(c); |
| 516 | 514 | ||
| 517 | if (mce_dont_init || | 515 | if (mce_dont_init || |
| 518 | cpu_test_and_set(smp_processor_id(), mce_cpus) || | ||
| 519 | !mce_available(c)) | 516 | !mce_available(c)) |
| 520 | return; | 517 | return; |
| 521 | 518 | ||
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index d1a121443bde..a2d1176c38ee 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
| @@ -21,8 +21,6 @@ | |||
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | 23 | ||
| 24 | #ifdef CONFIG_X86_DS | ||
| 25 | |||
| 26 | #include <asm/ds.h> | 24 | #include <asm/ds.h> |
| 27 | 25 | ||
| 28 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
| @@ -211,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context); | |||
| 211 | static inline struct ds_context *ds_get_context(struct task_struct *task) | 209 | static inline struct ds_context *ds_get_context(struct task_struct *task) |
| 212 | { | 210 | { |
| 213 | struct ds_context *context; | 211 | struct ds_context *context; |
| 212 | unsigned long irq; | ||
| 214 | 213 | ||
| 215 | spin_lock(&ds_lock); | 214 | spin_lock_irqsave(&ds_lock, irq); |
| 216 | 215 | ||
| 217 | context = (task ? task->thread.ds_ctx : this_system_context); | 216 | context = (task ? task->thread.ds_ctx : this_system_context); |
| 218 | if (context) | 217 | if (context) |
| 219 | context->count++; | 218 | context->count++; |
| 220 | 219 | ||
| 221 | spin_unlock(&ds_lock); | 220 | spin_unlock_irqrestore(&ds_lock, irq); |
| 222 | 221 | ||
| 223 | return context; | 222 | return context; |
| 224 | } | 223 | } |
| @@ -226,55 +225,46 @@ static inline struct ds_context *ds_get_context(struct task_struct *task) | |||
| 226 | /* | 225 | /* |
| 227 | * Same as ds_get_context, but allocates the context and it's DS | 226 | * Same as ds_get_context, but allocates the context and it's DS |
| 228 | * structure, if necessary; returns NULL; if out of memory. | 227 | * structure, if necessary; returns NULL; if out of memory. |
| 229 | * | ||
| 230 | * pre: requires ds_lock to be held | ||
| 231 | */ | 228 | */ |
| 232 | static inline struct ds_context *ds_alloc_context(struct task_struct *task) | 229 | static inline struct ds_context *ds_alloc_context(struct task_struct *task) |
| 233 | { | 230 | { |
| 234 | struct ds_context **p_context = | 231 | struct ds_context **p_context = |
| 235 | (task ? &task->thread.ds_ctx : &this_system_context); | 232 | (task ? &task->thread.ds_ctx : &this_system_context); |
| 236 | struct ds_context *context = *p_context; | 233 | struct ds_context *context = *p_context; |
| 234 | unsigned long irq; | ||
| 237 | 235 | ||
| 238 | if (!context) { | 236 | if (!context) { |
| 239 | spin_unlock(&ds_lock); | ||
| 240 | |||
| 241 | context = kzalloc(sizeof(*context), GFP_KERNEL); | 237 | context = kzalloc(sizeof(*context), GFP_KERNEL); |
| 242 | 238 | if (!context) | |
| 243 | if (!context) { | ||
| 244 | spin_lock(&ds_lock); | ||
| 245 | return NULL; | 239 | return NULL; |
| 246 | } | ||
| 247 | 240 | ||
| 248 | context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); | 241 | context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); |
| 249 | if (!context->ds) { | 242 | if (!context->ds) { |
| 250 | kfree(context); | 243 | kfree(context); |
| 251 | spin_lock(&ds_lock); | ||
| 252 | return NULL; | 244 | return NULL; |
| 253 | } | 245 | } |
| 254 | 246 | ||
| 255 | spin_lock(&ds_lock); | 247 | spin_lock_irqsave(&ds_lock, irq); |
| 256 | /* | 248 | |
| 257 | * Check for race - another CPU could have allocated | ||
| 258 | * it meanwhile: | ||
| 259 | */ | ||
| 260 | if (*p_context) { | 249 | if (*p_context) { |
| 261 | kfree(context->ds); | 250 | kfree(context->ds); |
| 262 | kfree(context); | 251 | kfree(context); |
| 263 | return *p_context; | ||
| 264 | } | ||
| 265 | 252 | ||
| 266 | *p_context = context; | 253 | context = *p_context; |
| 254 | } else { | ||
| 255 | *p_context = context; | ||
| 267 | 256 | ||
| 268 | context->this = p_context; | 257 | context->this = p_context; |
| 269 | context->task = task; | 258 | context->task = task; |
| 270 | 259 | ||
| 271 | if (task) | 260 | if (task) |
| 272 | set_tsk_thread_flag(task, TIF_DS_AREA_MSR); | 261 | set_tsk_thread_flag(task, TIF_DS_AREA_MSR); |
| 273 | 262 | ||
| 274 | if (!task || (task == current)) | 263 | if (!task || (task == current)) |
| 275 | wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0); | 264 | wrmsrl(MSR_IA32_DS_AREA, |
| 276 | 265 | (unsigned long)context->ds); | |
| 277 | get_tracer(task); | 266 | } |
| 267 | spin_unlock_irqrestore(&ds_lock, irq); | ||
| 278 | } | 268 | } |
| 279 | 269 | ||
| 280 | context->count++; | 270 | context->count++; |
| @@ -288,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task) | |||
| 288 | */ | 278 | */ |
| 289 | static inline void ds_put_context(struct ds_context *context) | 279 | static inline void ds_put_context(struct ds_context *context) |
| 290 | { | 280 | { |
| 281 | unsigned long irq; | ||
| 282 | |||
| 291 | if (!context) | 283 | if (!context) |
| 292 | return; | 284 | return; |
| 293 | 285 | ||
| 294 | spin_lock(&ds_lock); | 286 | spin_lock_irqsave(&ds_lock, irq); |
| 295 | 287 | ||
| 296 | if (--context->count) | 288 | if (--context->count) |
| 297 | goto out; | 289 | goto out; |
| @@ -313,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context) | |||
| 313 | kfree(context->ds); | 305 | kfree(context->ds); |
| 314 | kfree(context); | 306 | kfree(context); |
| 315 | out: | 307 | out: |
| 316 | spin_unlock(&ds_lock); | 308 | spin_unlock_irqrestore(&ds_lock, irq); |
| 317 | } | 309 | } |
| 318 | 310 | ||
| 319 | 311 | ||
| @@ -384,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size, | |||
| 384 | struct ds_context *context; | 376 | struct ds_context *context; |
| 385 | unsigned long buffer, adj; | 377 | unsigned long buffer, adj; |
| 386 | const unsigned long alignment = (1 << 3); | 378 | const unsigned long alignment = (1 << 3); |
| 379 | unsigned long irq; | ||
| 387 | int error = 0; | 380 | int error = 0; |
| 388 | 381 | ||
| 389 | if (!ds_cfg.sizeof_ds) | 382 | if (!ds_cfg.sizeof_ds) |
| @@ -398,26 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size, | |||
| 398 | return -EOPNOTSUPP; | 391 | return -EOPNOTSUPP; |
| 399 | 392 | ||
| 400 | 393 | ||
| 401 | spin_lock(&ds_lock); | ||
| 402 | |||
| 403 | error = -ENOMEM; | ||
| 404 | context = ds_alloc_context(task); | 394 | context = ds_alloc_context(task); |
| 405 | if (!context) | 395 | if (!context) |
| 406 | goto out_unlock; | 396 | return -ENOMEM; |
| 397 | |||
| 398 | spin_lock_irqsave(&ds_lock, irq); | ||
| 407 | 399 | ||
| 408 | error = -EPERM; | 400 | error = -EPERM; |
| 409 | if (!check_tracer(task)) | 401 | if (!check_tracer(task)) |
| 410 | goto out_unlock; | 402 | goto out_unlock; |
| 411 | 403 | ||
| 404 | get_tracer(task); | ||
| 405 | |||
| 412 | error = -EALREADY; | 406 | error = -EALREADY; |
| 413 | if (context->owner[qual] == current) | 407 | if (context->owner[qual] == current) |
| 414 | goto out_unlock; | 408 | goto out_put_tracer; |
| 415 | error = -EPERM; | 409 | error = -EPERM; |
| 416 | if (context->owner[qual] != NULL) | 410 | if (context->owner[qual] != NULL) |
| 417 | goto out_unlock; | 411 | goto out_put_tracer; |
| 418 | context->owner[qual] = current; | 412 | context->owner[qual] = current; |
| 419 | 413 | ||
| 420 | spin_unlock(&ds_lock); | 414 | spin_unlock_irqrestore(&ds_lock, irq); |
| 421 | 415 | ||
| 422 | 416 | ||
| 423 | error = -ENOMEM; | 417 | error = -ENOMEM; |
| @@ -465,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size, | |||
| 465 | out_release: | 459 | out_release: |
| 466 | context->owner[qual] = NULL; | 460 | context->owner[qual] = NULL; |
| 467 | ds_put_context(context); | 461 | ds_put_context(context); |
| 462 | put_tracer(task); | ||
| 463 | return error; | ||
| 464 | |||
| 465 | out_put_tracer: | ||
| 466 | spin_unlock_irqrestore(&ds_lock, irq); | ||
| 467 | ds_put_context(context); | ||
| 468 | put_tracer(task); | ||
| 468 | return error; | 469 | return error; |
| 469 | 470 | ||
| 470 | out_unlock: | 471 | out_unlock: |
| 471 | spin_unlock(&ds_lock); | 472 | spin_unlock_irqrestore(&ds_lock, irq); |
| 472 | ds_put_context(context); | 473 | ds_put_context(context); |
| 473 | return error; | 474 | return error; |
| 474 | } | 475 | } |
| @@ -818,13 +819,21 @@ static const struct ds_configuration ds_cfg_var = { | |||
| 818 | .sizeof_ds = sizeof(long) * 12, | 819 | .sizeof_ds = sizeof(long) * 12, |
| 819 | .sizeof_field = sizeof(long), | 820 | .sizeof_field = sizeof(long), |
| 820 | .sizeof_rec[ds_bts] = sizeof(long) * 3, | 821 | .sizeof_rec[ds_bts] = sizeof(long) * 3, |
| 822 | #ifdef __i386__ | ||
| 821 | .sizeof_rec[ds_pebs] = sizeof(long) * 10 | 823 | .sizeof_rec[ds_pebs] = sizeof(long) * 10 |
| 824 | #else | ||
| 825 | .sizeof_rec[ds_pebs] = sizeof(long) * 18 | ||
| 826 | #endif | ||
| 822 | }; | 827 | }; |
| 823 | static const struct ds_configuration ds_cfg_64 = { | 828 | static const struct ds_configuration ds_cfg_64 = { |
| 824 | .sizeof_ds = 8 * 12, | 829 | .sizeof_ds = 8 * 12, |
| 825 | .sizeof_field = 8, | 830 | .sizeof_field = 8, |
| 826 | .sizeof_rec[ds_bts] = 8 * 3, | 831 | .sizeof_rec[ds_bts] = 8 * 3, |
| 832 | #ifdef __i386__ | ||
| 827 | .sizeof_rec[ds_pebs] = 8 * 10 | 833 | .sizeof_rec[ds_pebs] = 8 * 10 |
| 834 | #else | ||
| 835 | .sizeof_rec[ds_pebs] = 8 * 18 | ||
| 836 | #endif | ||
| 828 | }; | 837 | }; |
| 829 | 838 | ||
| 830 | static inline void | 839 | static inline void |
| @@ -878,4 +887,3 @@ void ds_free(struct ds_context *context) | |||
| 878 | while (leftovers--) | 887 | while (leftovers--) |
| 879 | ds_put_context(context); | 888 | ds_put_context(context); |
| 880 | } | 889 | } |
| 881 | #endif /* CONFIG_X86_DS */ | ||
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 1f20608d4ca8..b0f61f0dcd0a 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
| @@ -58,7 +58,7 @@ void __cpuinit mxcsr_feature_mask_init(void) | |||
| 58 | stts(); | 58 | stts(); |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | void __init init_thread_xstate(void) | 61 | void __cpuinit init_thread_xstate(void) |
| 62 | { | 62 | { |
| 63 | if (!HAVE_HWFP) { | 63 | if (!HAVE_HWFP) { |
| 64 | xstate_size = sizeof(struct i387_soft_struct); | 64 | xstate_size = sizeof(struct i387_soft_struct); |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index c9513e1ff28d..9043251210fb 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
| @@ -3608,27 +3608,7 @@ int __init io_apic_get_redir_entries (int ioapic) | |||
| 3608 | 3608 | ||
| 3609 | int __init probe_nr_irqs(void) | 3609 | int __init probe_nr_irqs(void) |
| 3610 | { | 3610 | { |
| 3611 | int idx; | 3611 | return NR_IRQS; |
| 3612 | int nr = 0; | ||
| 3613 | #ifndef CONFIG_XEN | ||
| 3614 | int nr_min = 32; | ||
| 3615 | #else | ||
| 3616 | int nr_min = NR_IRQS; | ||
| 3617 | #endif | ||
| 3618 | |||
| 3619 | for (idx = 0; idx < nr_ioapics; idx++) | ||
| 3620 | nr += io_apic_get_redir_entries(idx) + 1; | ||
| 3621 | |||
| 3622 | /* double it for hotplug and msi and nmi */ | ||
| 3623 | nr <<= 1; | ||
| 3624 | |||
| 3625 | /* something wrong ? */ | ||
| 3626 | if (nr < nr_min) | ||
| 3627 | nr = nr_min; | ||
| 3628 | if (WARN_ON(nr > NR_IRQS)) | ||
| 3629 | nr = NR_IRQS; | ||
| 3630 | |||
| 3631 | return nr; | ||
| 3632 | } | 3612 | } |
| 3633 | 3613 | ||
| 3634 | /* -------------------------------------------------------------------------- | 3614 | /* -------------------------------------------------------------------------- |
| @@ -3775,7 +3755,9 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | |||
| 3775 | void __init setup_ioapic_dest(void) | 3755 | void __init setup_ioapic_dest(void) |
| 3776 | { | 3756 | { |
| 3777 | int pin, ioapic, irq, irq_entry; | 3757 | int pin, ioapic, irq, irq_entry; |
| 3758 | struct irq_desc *desc; | ||
| 3778 | struct irq_cfg *cfg; | 3759 | struct irq_cfg *cfg; |
| 3760 | cpumask_t mask; | ||
| 3779 | 3761 | ||
| 3780 | if (skip_ioapic_setup == 1) | 3762 | if (skip_ioapic_setup == 1) |
| 3781 | return; | 3763 | return; |
| @@ -3792,16 +3774,30 @@ void __init setup_ioapic_dest(void) | |||
| 3792 | * cpu is online. | 3774 | * cpu is online. |
| 3793 | */ | 3775 | */ |
| 3794 | cfg = irq_cfg(irq); | 3776 | cfg = irq_cfg(irq); |
| 3795 | if (!cfg->vector) | 3777 | if (!cfg->vector) { |
| 3796 | setup_IO_APIC_irq(ioapic, pin, irq, | 3778 | setup_IO_APIC_irq(ioapic, pin, irq, |
| 3797 | irq_trigger(irq_entry), | 3779 | irq_trigger(irq_entry), |
| 3798 | irq_polarity(irq_entry)); | 3780 | irq_polarity(irq_entry)); |
| 3781 | continue; | ||
| 3782 | |||
| 3783 | } | ||
| 3784 | |||
| 3785 | /* | ||
| 3786 | * Honour affinities which have been set in early boot | ||
| 3787 | */ | ||
| 3788 | desc = irq_to_desc(irq); | ||
| 3789 | if (desc->status & | ||
| 3790 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | ||
| 3791 | mask = desc->affinity; | ||
| 3792 | else | ||
| 3793 | mask = TARGET_CPUS; | ||
| 3794 | |||
| 3799 | #ifdef CONFIG_INTR_REMAP | 3795 | #ifdef CONFIG_INTR_REMAP |
| 3800 | else if (intr_remapping_enabled) | 3796 | if (intr_remapping_enabled) |
| 3801 | set_ir_ioapic_affinity_irq(irq, TARGET_CPUS); | 3797 | set_ir_ioapic_affinity_irq(irq, mask); |
| 3802 | #endif | ||
| 3803 | else | 3798 | else |
| 3804 | set_ioapic_affinity_irq(irq, TARGET_CPUS); | 3799 | #endif |
| 3800 | set_ioapic_affinity_irq(irq, mask); | ||
| 3805 | } | 3801 | } |
| 3806 | 3802 | ||
| 3807 | } | 3803 | } |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 1c9cc431ea4f..e169ae9b6a62 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
| @@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt) | |||
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | #ifdef CONFIG_X86_LOCAL_APIC | 130 | #ifdef CONFIG_X86_LOCAL_APIC |
| 131 | static void __devinit kvm_setup_secondary_clock(void) | 131 | static void __cpuinit kvm_setup_secondary_clock(void) |
| 132 | { | 132 | { |
| 133 | /* | 133 | /* |
| 134 | * Now that the first cpu already had this clocksource initialized, | 134 | * Now that the first cpu already had this clocksource initialized, |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 82fb2809ce32..c4b5b24e0217 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
| @@ -272,13 +272,18 @@ static struct attribute_group mc_attr_group = { | |||
| 272 | .name = "microcode", | 272 | .name = "microcode", |
| 273 | }; | 273 | }; |
| 274 | 274 | ||
| 275 | static void microcode_fini_cpu(int cpu) | 275 | static void __microcode_fini_cpu(int cpu) |
| 276 | { | 276 | { |
| 277 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 277 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
| 278 | 278 | ||
| 279 | mutex_lock(µcode_mutex); | ||
| 280 | microcode_ops->microcode_fini_cpu(cpu); | 279 | microcode_ops->microcode_fini_cpu(cpu); |
| 281 | uci->valid = 0; | 280 | uci->valid = 0; |
| 281 | } | ||
| 282 | |||
| 283 | static void microcode_fini_cpu(int cpu) | ||
| 284 | { | ||
| 285 | mutex_lock(µcode_mutex); | ||
| 286 | __microcode_fini_cpu(cpu); | ||
| 282 | mutex_unlock(µcode_mutex); | 287 | mutex_unlock(µcode_mutex); |
| 283 | } | 288 | } |
| 284 | 289 | ||
| @@ -306,12 +311,16 @@ static int microcode_resume_cpu(int cpu) | |||
| 306 | * to this cpu (a bit of paranoia): | 311 | * to this cpu (a bit of paranoia): |
| 307 | */ | 312 | */ |
| 308 | if (microcode_ops->collect_cpu_info(cpu, &nsig)) { | 313 | if (microcode_ops->collect_cpu_info(cpu, &nsig)) { |
| 309 | microcode_fini_cpu(cpu); | 314 | __microcode_fini_cpu(cpu); |
| 315 | printk(KERN_ERR "failed to collect_cpu_info for resuming cpu #%d\n", | ||
| 316 | cpu); | ||
| 310 | return -1; | 317 | return -1; |
| 311 | } | 318 | } |
| 312 | 319 | ||
| 313 | if (memcmp(&nsig, &uci->cpu_sig, sizeof(nsig))) { | 320 | if ((nsig.sig != uci->cpu_sig.sig) || (nsig.pf != uci->cpu_sig.pf)) { |
| 314 | microcode_fini_cpu(cpu); | 321 | __microcode_fini_cpu(cpu); |
| 322 | printk(KERN_ERR "cached ucode doesn't match the resuming cpu #%d\n", | ||
| 323 | cpu); | ||
| 315 | /* Should we look for a new ucode here? */ | 324 | /* Should we look for a new ucode here? */ |
| 316 | return 1; | 325 | return 1; |
| 317 | } | 326 | } |
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 622dc4a21784..a8e62792d171 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c | |||
| @@ -155,6 +155,7 @@ static DEFINE_SPINLOCK(microcode_update_lock); | |||
| 155 | static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | 155 | static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) |
| 156 | { | 156 | { |
| 157 | struct cpuinfo_x86 *c = &cpu_data(cpu_num); | 157 | struct cpuinfo_x86 *c = &cpu_data(cpu_num); |
| 158 | unsigned long flags; | ||
| 158 | unsigned int val[2]; | 159 | unsigned int val[2]; |
| 159 | 160 | ||
| 160 | memset(csig, 0, sizeof(*csig)); | 161 | memset(csig, 0, sizeof(*csig)); |
| @@ -174,11 +175,16 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | |||
| 174 | csig->pf = 1 << ((val[1] >> 18) & 7); | 175 | csig->pf = 1 << ((val[1] >> 18) & 7); |
| 175 | } | 176 | } |
| 176 | 177 | ||
| 178 | /* serialize access to the physical write to MSR 0x79 */ | ||
| 179 | spin_lock_irqsave(µcode_update_lock, flags); | ||
| 180 | |||
| 177 | wrmsr(MSR_IA32_UCODE_REV, 0, 0); | 181 | wrmsr(MSR_IA32_UCODE_REV, 0, 0); |
| 178 | /* see notes above for revision 1.07. Apparent chip bug */ | 182 | /* see notes above for revision 1.07. Apparent chip bug */ |
| 179 | sync_core(); | 183 | sync_core(); |
| 180 | /* get the current revision from MSR 0x8B */ | 184 | /* get the current revision from MSR 0x8B */ |
| 181 | rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); | 185 | rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); |
| 186 | spin_unlock_irqrestore(µcode_update_lock, flags); | ||
| 187 | |||
| 182 | pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", | 188 | pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", |
| 183 | csig->sig, csig->pf, csig->rev); | 189 | csig->sig, csig->pf, csig->rev); |
| 184 | 190 | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index f98f4e1dba09..0f4c1fd5a1f4 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
| @@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early) | |||
| 604 | printk(KERN_INFO "Using ACPI for processor (LAPIC) " | 604 | printk(KERN_INFO "Using ACPI for processor (LAPIC) " |
| 605 | "configuration information\n"); | 605 | "configuration information\n"); |
| 606 | 606 | ||
| 607 | if (!mpf) | ||
| 608 | return; | ||
| 609 | |||
| 607 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", | 610 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", |
| 608 | mpf->mpf_specification); | 611 | mpf->mpf_specification); |
| 609 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) | 612 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) |
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 0e9f1982b1dd..95777b0faa73 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c | |||
| @@ -7,7 +7,8 @@ | |||
| 7 | 7 | ||
| 8 | #include <asm/paravirt.h> | 8 | #include <asm/paravirt.h> |
| 9 | 9 | ||
| 10 | static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) | 10 | static inline void |
| 11 | default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
| 11 | { | 12 | { |
| 12 | __raw_spin_lock(lock); | 13 | __raw_spin_lock(lock); |
| 13 | } | 14 | } |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index e1e731d78f38..d28bbdc35e4e 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
| @@ -1567,7 +1567,7 @@ static int __init calgary_parse_options(char *p) | |||
| 1567 | ++p; | 1567 | ++p; |
| 1568 | if (*p == '\0') | 1568 | if (*p == '\0') |
| 1569 | break; | 1569 | break; |
| 1570 | bridge = simple_strtol(p, &endp, 0); | 1570 | bridge = simple_strtoul(p, &endp, 0); |
| 1571 | if (p == endp) | 1571 | if (p == endp) |
| 1572 | break; | 1572 | break; |
| 1573 | 1573 | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index a42b02b4df68..a35eaa379ff6 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
| @@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size) | |||
| 123 | 123 | ||
| 124 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 124 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
| 125 | iommu_area_free(iommu_gart_bitmap, offset, size); | 125 | iommu_area_free(iommu_gart_bitmap, offset, size); |
| 126 | if (offset >= next_bit) | ||
| 127 | next_bit = offset + size; | ||
| 126 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 128 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
| 127 | } | 129 | } |
| 128 | 130 | ||
| @@ -743,10 +745,8 @@ void __init gart_iommu_init(void) | |||
| 743 | unsigned long scratch; | 745 | unsigned long scratch; |
| 744 | long i; | 746 | long i; |
| 745 | 747 | ||
| 746 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { | 748 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) |
| 747 | printk(KERN_INFO "PCI-GART: No AMD GART found.\n"); | ||
| 748 | return; | 749 | return; |
| 749 | } | ||
| 750 | 750 | ||
| 751 | #ifndef CONFIG_AGP_AMD64 | 751 | #ifndef CONFIG_AGP_AMD64 |
| 752 | no_agp = 1; | 752 | no_agp = 1; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9d5674f7b6cc..bdec76e55594 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
| @@ -794,6 +794,9 @@ void __init setup_arch(char **cmdline_p) | |||
| 794 | printk(KERN_INFO "Command line: %s\n", boot_command_line); | 794 | printk(KERN_INFO "Command line: %s\n", boot_command_line); |
| 795 | #endif | 795 | #endif |
| 796 | 796 | ||
| 797 | /* VMI may relocate the fixmap; do this before touching ioremap area */ | ||
| 798 | vmi_init(); | ||
| 799 | |||
| 797 | early_cpu_init(); | 800 | early_cpu_init(); |
| 798 | early_ioremap_init(); | 801 | early_ioremap_init(); |
| 799 | 802 | ||
| @@ -880,13 +883,8 @@ void __init setup_arch(char **cmdline_p) | |||
| 880 | check_efer(); | 883 | check_efer(); |
| 881 | #endif | 884 | #endif |
| 882 | 885 | ||
| 883 | #if defined(CONFIG_VMI) && defined(CONFIG_X86_32) | 886 | /* Must be before kernel pagetables are setup */ |
| 884 | /* | 887 | vmi_activate(); |
| 885 | * Must be before kernel pagetables are setup | ||
| 886 | * or fixmap area is touched. | ||
| 887 | */ | ||
| 888 | vmi_init(); | ||
| 889 | #endif | ||
| 890 | 888 | ||
| 891 | /* after early param, so could get panic from serial */ | 889 | /* after early param, so could get panic from serial */ |
| 892 | reserve_early_setup_data(); | 890 | reserve_early_setup_data(); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7b1093397319..f71f96fc9e62 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -294,9 +294,7 @@ static void __cpuinit start_secondary(void *unused) | |||
| 294 | * fragile that we want to limit the things done here to the | 294 | * fragile that we want to limit the things done here to the |
| 295 | * most necessary things. | 295 | * most necessary things. |
| 296 | */ | 296 | */ |
| 297 | #ifdef CONFIG_VMI | ||
| 298 | vmi_bringup(); | 297 | vmi_bringup(); |
| 299 | #endif | ||
| 300 | cpu_init(); | 298 | cpu_init(); |
| 301 | preempt_disable(); | 299 | preempt_disable(); |
| 302 | smp_callin(); | 300 | smp_callin(); |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 8b6c393ab9fd..22fd6577156a 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
| @@ -960,8 +960,6 @@ static inline int __init activate_vmi(void) | |||
| 960 | 960 | ||
| 961 | void __init vmi_init(void) | 961 | void __init vmi_init(void) |
| 962 | { | 962 | { |
| 963 | unsigned long flags; | ||
| 964 | |||
| 965 | if (!vmi_rom) | 963 | if (!vmi_rom) |
| 966 | probe_vmi_rom(); | 964 | probe_vmi_rom(); |
| 967 | else | 965 | else |
| @@ -973,13 +971,21 @@ void __init vmi_init(void) | |||
| 973 | 971 | ||
| 974 | reserve_top_address(-vmi_rom->virtual_top); | 972 | reserve_top_address(-vmi_rom->virtual_top); |
| 975 | 973 | ||
| 976 | local_irq_save(flags); | ||
| 977 | activate_vmi(); | ||
| 978 | |||
| 979 | #ifdef CONFIG_X86_IO_APIC | 974 | #ifdef CONFIG_X86_IO_APIC |
| 980 | /* This is virtual hardware; timer routing is wired correctly */ | 975 | /* This is virtual hardware; timer routing is wired correctly */ |
| 981 | no_timer_check = 1; | 976 | no_timer_check = 1; |
| 982 | #endif | 977 | #endif |
| 978 | } | ||
| 979 | |||
| 980 | void vmi_activate(void) | ||
| 981 | { | ||
| 982 | unsigned long flags; | ||
| 983 | |||
| 984 | if (!vmi_rom) | ||
| 985 | return; | ||
| 986 | |||
| 987 | local_irq_save(flags); | ||
| 988 | activate_vmi(); | ||
| 983 | local_irq_restore(flags & X86_EFLAGS_IF); | 989 | local_irq_restore(flags & X86_EFLAGS_IF); |
| 984 | } | 990 | } |
| 985 | 991 | ||
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index b13acb75e822..15c3e6999182 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
| @@ -310,7 +310,7 @@ static void __init setup_xstate_init(void) | |||
| 310 | /* | 310 | /* |
| 311 | * Enable and initialize the xsave feature. | 311 | * Enable and initialize the xsave feature. |
| 312 | */ | 312 | */ |
| 313 | void __init xsave_cntxt_init(void) | 313 | void __ref xsave_cntxt_init(void) |
| 314 | { | 314 | { |
| 315 | unsigned int eax, ebx, ecx, edx; | 315 | unsigned int eax, ebx, ecx, edx; |
| 316 | 316 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f1983d9477cd..410ddbc1aa2e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -1038,13 +1038,13 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
| 1038 | } | 1038 | } |
| 1039 | 1039 | ||
| 1040 | rmap_write_protect(vcpu->kvm, sp->gfn); | 1040 | rmap_write_protect(vcpu->kvm, sp->gfn); |
| 1041 | kvm_unlink_unsync_page(vcpu->kvm, sp); | ||
| 1041 | if (vcpu->arch.mmu.sync_page(vcpu, sp)) { | 1042 | if (vcpu->arch.mmu.sync_page(vcpu, sp)) { |
| 1042 | kvm_mmu_zap_page(vcpu->kvm, sp); | 1043 | kvm_mmu_zap_page(vcpu->kvm, sp); |
| 1043 | return 1; | 1044 | return 1; |
| 1044 | } | 1045 | } |
| 1045 | 1046 | ||
| 1046 | kvm_mmu_flush_tlb(vcpu); | 1047 | kvm_mmu_flush_tlb(vcpu); |
| 1047 | kvm_unlink_unsync_page(vcpu->kvm, sp); | ||
| 1048 | return 0; | 1048 | return 0; |
| 1049 | } | 1049 | } |
| 1050 | 1050 | ||
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 613ec9aa674a..84eee43bbe74 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
| @@ -331,6 +331,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, | |||
| 331 | r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], | 331 | r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], |
| 332 | &curr_pte, sizeof(curr_pte)); | 332 | &curr_pte, sizeof(curr_pte)); |
| 333 | if (r || curr_pte != gw->ptes[level - 2]) { | 333 | if (r || curr_pte != gw->ptes[level - 2]) { |
| 334 | kvm_mmu_put_page(shadow_page, sptep); | ||
| 334 | kvm_release_pfn_clean(sw->pfn); | 335 | kvm_release_pfn_clean(sw->pfn); |
| 335 | sw->sptep = NULL; | 336 | sw->sptep = NULL; |
| 336 | return 1; | 337 | return 1; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d06b4dc0e2ea..a4018b01e1f9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -3149,7 +3149,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
| 3149 | 3149 | ||
| 3150 | if (cpu_has_virtual_nmis()) { | 3150 | if (cpu_has_virtual_nmis()) { |
| 3151 | if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { | 3151 | if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { |
| 3152 | if (vmx_nmi_enabled(vcpu)) { | 3152 | if (vcpu->arch.interrupt.pending) { |
| 3153 | enable_nmi_window(vcpu); | ||
| 3154 | } else if (vmx_nmi_enabled(vcpu)) { | ||
| 3153 | vcpu->arch.nmi_pending = false; | 3155 | vcpu->arch.nmi_pending = false; |
| 3154 | vcpu->arch.nmi_injected = true; | 3156 | vcpu->arch.nmi_injected = true; |
| 3155 | } else { | 3157 | } else { |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 022cd41ea9b4..202864ad49a7 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
| @@ -401,14 +401,13 @@ static int __init ppro_init(char **cpu_type) | |||
| 401 | *cpu_type = "i386/pii"; | 401 | *cpu_type = "i386/pii"; |
| 402 | break; | 402 | break; |
| 403 | case 6 ... 8: | 403 | case 6 ... 8: |
| 404 | case 10 ... 11: | ||
| 404 | *cpu_type = "i386/piii"; | 405 | *cpu_type = "i386/piii"; |
| 405 | break; | 406 | break; |
| 406 | case 9: | 407 | case 9: |
| 408 | case 13: | ||
| 407 | *cpu_type = "i386/p6_mobile"; | 409 | *cpu_type = "i386/p6_mobile"; |
| 408 | break; | 410 | break; |
| 409 | case 10 ... 13: | ||
| 410 | *cpu_type = "i386/p6"; | ||
| 411 | break; | ||
| 412 | case 14: | 411 | case 14: |
| 413 | *cpu_type = "i386/core"; | 412 | *cpu_type = "i386/core"; |
| 414 | break; | 413 | break; |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 3f1b81a83e2e..e9f80c744cf3 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
| @@ -69,7 +69,7 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
| 69 | int i; | 69 | int i; |
| 70 | 70 | ||
| 71 | if (!reset_value) { | 71 | if (!reset_value) { |
| 72 | reset_value = kmalloc(sizeof(unsigned) * num_counters, | 72 | reset_value = kmalloc(sizeof(reset_value[0]) * num_counters, |
| 73 | GFP_ATOMIC); | 73 | GFP_ATOMIC); |
| 74 | if (!reset_value) | 74 | if (!reset_value) |
| 75 | return; | 75 | return; |
| @@ -156,6 +156,8 @@ static void ppro_start(struct op_msrs const * const msrs) | |||
| 156 | unsigned int low, high; | 156 | unsigned int low, high; |
| 157 | int i; | 157 | int i; |
| 158 | 158 | ||
| 159 | if (!reset_value) | ||
| 160 | return; | ||
| 159 | for (i = 0; i < num_counters; ++i) { | 161 | for (i = 0; i < num_counters; ++i) { |
| 160 | if (reset_value[i]) { | 162 | if (reset_value[i]) { |
| 161 | CTRL_READ(low, high, msrs, i); | 163 | CTRL_READ(low, high, msrs, i); |
| @@ -171,6 +173,8 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
| 171 | unsigned int low, high; | 173 | unsigned int low, high; |
| 172 | int i; | 174 | int i; |
| 173 | 175 | ||
| 176 | if (!reset_value) | ||
| 177 | return; | ||
| 174 | for (i = 0; i < num_counters; ++i) { | 178 | for (i = 0; i < num_counters; ++i) { |
| 175 | if (!reset_value[i]) | 179 | if (!reset_value[i]) |
| 176 | continue; | 180 | continue; |
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 3c27a809393b..2051dc96b8e9 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c | |||
| @@ -496,21 +496,24 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015, | |||
| 496 | pci_siemens_interrupt_controller); | 496 | pci_siemens_interrupt_controller); |
| 497 | 497 | ||
| 498 | /* | 498 | /* |
| 499 | * Regular PCI devices have 256 bytes, but AMD Family 10h Opteron ext config | 499 | * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have |
| 500 | * have 4096 bytes. Even if the device is capable, that doesn't mean we can | 500 | * 4096 bytes configuration space for each function of their processor |
| 501 | * access it. Maybe we don't have a way to generate extended config space | 501 | * configuration space. |
| 502 | * accesses. So check it | ||
| 503 | */ | 502 | */ |
| 504 | static void fam10h_pci_cfg_space_size(struct pci_dev *dev) | 503 | static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev) |
| 505 | { | 504 | { |
| 506 | dev->cfg_size = pci_cfg_space_size_ext(dev); | 505 | dev->cfg_size = pci_cfg_space_size_ext(dev); |
| 507 | } | 506 | } |
| 508 | 507 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size); | |
| 509 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, fam10h_pci_cfg_space_size); | 508 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size); |
| 510 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, fam10h_pci_cfg_space_size); | 509 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size); |
| 511 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, fam10h_pci_cfg_space_size); | 510 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size); |
| 512 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, fam10h_pci_cfg_space_size); | 511 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size); |
| 513 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, fam10h_pci_cfg_space_size); | 512 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size); |
| 513 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size); | ||
| 514 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size); | ||
| 515 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size); | ||
| 516 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size); | ||
| 514 | 517 | ||
| 515 | /* | 518 | /* |
| 516 | * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from | 519 | * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 688936044dc9..636ef4caa52d 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -661,12 +661,11 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val) | |||
| 661 | * For 64-bit, we must skip the Xen hole in the middle of the address | 661 | * For 64-bit, we must skip the Xen hole in the middle of the address |
| 662 | * space, just after the big x86-64 virtual hole. | 662 | * space, just after the big x86-64 virtual hole. |
| 663 | */ | 663 | */ |
| 664 | static int xen_pgd_walk(struct mm_struct *mm, | 664 | static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, |
| 665 | int (*func)(struct mm_struct *mm, struct page *, | 665 | int (*func)(struct mm_struct *mm, struct page *, |
| 666 | enum pt_level), | 666 | enum pt_level), |
| 667 | unsigned long limit) | 667 | unsigned long limit) |
| 668 | { | 668 | { |
| 669 | pgd_t *pgd = mm->pgd; | ||
| 670 | int flush = 0; | 669 | int flush = 0; |
| 671 | unsigned hole_low, hole_high; | 670 | unsigned hole_low, hole_high; |
| 672 | unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; | 671 | unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; |
| @@ -753,6 +752,14 @@ out: | |||
| 753 | return flush; | 752 | return flush; |
| 754 | } | 753 | } |
| 755 | 754 | ||
| 755 | static int xen_pgd_walk(struct mm_struct *mm, | ||
| 756 | int (*func)(struct mm_struct *mm, struct page *, | ||
| 757 | enum pt_level), | ||
| 758 | unsigned long limit) | ||
| 759 | { | ||
| 760 | return __xen_pgd_walk(mm, mm->pgd, func, limit); | ||
| 761 | } | ||
| 762 | |||
| 756 | /* If we're using split pte locks, then take the page's lock and | 763 | /* If we're using split pte locks, then take the page's lock and |
| 757 | return a pointer to it. Otherwise return NULL. */ | 764 | return a pointer to it. Otherwise return NULL. */ |
| 758 | static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) | 765 | static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) |
| @@ -854,7 +861,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | |||
| 854 | 861 | ||
| 855 | xen_mc_batch(); | 862 | xen_mc_batch(); |
| 856 | 863 | ||
| 857 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { | 864 | if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { |
| 858 | /* re-enable interrupts for flushing */ | 865 | /* re-enable interrupts for flushing */ |
| 859 | xen_mc_issue(0); | 866 | xen_mc_issue(0); |
| 860 | 867 | ||
| @@ -998,7 +1005,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) | |||
| 998 | PT_PMD); | 1005 | PT_PMD); |
| 999 | #endif | 1006 | #endif |
| 1000 | 1007 | ||
| 1001 | xen_pgd_walk(mm, xen_unpin_page, USER_LIMIT); | 1008 | __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); |
| 1002 | 1009 | ||
| 1003 | xen_mc_issue(0); | 1010 | xen_mc_issue(0); |
| 1004 | } | 1011 | } |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d77da613b1d2..acd9b6705e02 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
| @@ -362,7 +362,7 @@ static void xen_cpu_die(unsigned int cpu) | |||
| 362 | alternatives_smp_switch(0); | 362 | alternatives_smp_switch(0); |
| 363 | } | 363 | } |
| 364 | 364 | ||
| 365 | static void xen_play_dead(void) | 365 | static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */ |
| 366 | { | 366 | { |
| 367 | play_dead_common(); | 367 | play_dead_common(); |
| 368 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | 368 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index d7422dc2a55c..9e1afae8461f 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
| @@ -49,7 +49,7 @@ bool xen_vcpu_stolen(int vcpu); | |||
| 49 | 49 | ||
| 50 | void xen_mark_init_mm_pinned(void); | 50 | void xen_mark_init_mm_pinned(void); |
| 51 | 51 | ||
| 52 | void __init xen_setup_vcpu_info_placement(void); | 52 | void xen_setup_vcpu_info_placement(void); |
| 53 | 53 | ||
| 54 | #ifdef CONFIG_SMP | 54 | #ifdef CONFIG_SMP |
| 55 | void xen_smp_init(void); | 55 | void xen_smp_init(void); |
