diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/amd_iommu_types.h | 24 | ||||
-rw-r--r-- | arch/x86/include/asm/dma-mapping.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/topology.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/vmi.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 54 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/microcode_core.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/microcode_intel.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/mpparse.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt-spinlocks.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 16 | ||||
-rw-r--r-- | arch/x86/oprofile/nmi_int.c | 5 | ||||
-rw-r--r-- | arch/x86/oprofile/op_model_ppro.c | 4 |
17 files changed, 104 insertions, 76 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 1a30c0440c6b..ac302a2fa339 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -251,13 +251,6 @@ struct amd_iommu { | |||
251 | /* Pointer to PCI device of this IOMMU */ | 251 | /* Pointer to PCI device of this IOMMU */ |
252 | struct pci_dev *dev; | 252 | struct pci_dev *dev; |
253 | 253 | ||
254 | /* | ||
255 | * Capability pointer. There could be more than one IOMMU per PCI | ||
256 | * device function if there are more than one AMD IOMMU capability | ||
257 | * pointers. | ||
258 | */ | ||
259 | u16 cap_ptr; | ||
260 | |||
261 | /* physical address of MMIO space */ | 254 | /* physical address of MMIO space */ |
262 | u64 mmio_phys; | 255 | u64 mmio_phys; |
263 | /* virtual address of MMIO space */ | 256 | /* virtual address of MMIO space */ |
@@ -266,6 +259,13 @@ struct amd_iommu { | |||
266 | /* capabilities of that IOMMU read from ACPI */ | 259 | /* capabilities of that IOMMU read from ACPI */ |
267 | u32 cap; | 260 | u32 cap; |
268 | 261 | ||
262 | /* | ||
263 | * Capability pointer. There could be more than one IOMMU per PCI | ||
264 | * device function if there are more than one AMD IOMMU capability | ||
265 | * pointers. | ||
266 | */ | ||
267 | u16 cap_ptr; | ||
268 | |||
269 | /* pci domain of this IOMMU */ | 269 | /* pci domain of this IOMMU */ |
270 | u16 pci_seg; | 270 | u16 pci_seg; |
271 | 271 | ||
@@ -284,19 +284,19 @@ struct amd_iommu { | |||
284 | /* size of command buffer */ | 284 | /* size of command buffer */ |
285 | u32 cmd_buf_size; | 285 | u32 cmd_buf_size; |
286 | 286 | ||
287 | /* event buffer virtual address */ | ||
288 | u8 *evt_buf; | ||
289 | /* size of event buffer */ | 287 | /* size of event buffer */ |
290 | u32 evt_buf_size; | 288 | u32 evt_buf_size; |
289 | /* event buffer virtual address */ | ||
290 | u8 *evt_buf; | ||
291 | /* MSI number for event interrupt */ | 291 | /* MSI number for event interrupt */ |
292 | u16 evt_msi_num; | 292 | u16 evt_msi_num; |
293 | 293 | ||
294 | /* if one, we need to send a completion wait command */ | ||
295 | int need_sync; | ||
296 | |||
297 | /* true if interrupts for this IOMMU are already enabled */ | 294 | /* true if interrupts for this IOMMU are already enabled */ |
298 | bool int_enabled; | 295 | bool int_enabled; |
299 | 296 | ||
297 | /* if one, we need to send a completion wait command */ | ||
298 | int need_sync; | ||
299 | |||
300 | /* default dma_ops domain for that IOMMU */ | 300 | /* default dma_ops domain for that IOMMU */ |
301 | struct dma_ops_domain *default_dom; | 301 | struct dma_ops_domain *default_dom; |
302 | }; | 302 | }; |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 7f225a4b2a26..097794ff6b79 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -71,15 +71,13 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
71 | /* Make sure we keep the same behaviour */ | 71 | /* Make sure we keep the same behaviour */ |
72 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 72 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
73 | { | 73 | { |
74 | #ifdef CONFIG_X86_32 | 74 | #ifdef CONFIG_X86_64 |
75 | return 0; | ||
76 | #else | ||
77 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 75 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
78 | if (ops->mapping_error) | 76 | if (ops->mapping_error) |
79 | return ops->mapping_error(dev, dma_addr); | 77 | return ops->mapping_error(dev, dma_addr); |
80 | 78 | ||
81 | return (dma_addr == bad_dma_address); | ||
82 | #endif | 79 | #endif |
80 | return (dma_addr == bad_dma_address); | ||
83 | } | 81 | } |
84 | 82 | ||
85 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 83 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 4850e4b02b61..ff386ff50ed7 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -239,7 +239,7 @@ struct pci_bus; | |||
239 | void set_pci_bus_resources_arch_default(struct pci_bus *b); | 239 | void set_pci_bus_resources_arch_default(struct pci_bus *b); |
240 | 240 | ||
241 | #ifdef CONFIG_SMP | 241 | #ifdef CONFIG_SMP |
242 | #define mc_capable() (boot_cpu_data.x86_max_cores > 1) | 242 | #define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids) |
243 | #define smt_capable() (smp_num_siblings > 1) | 243 | #define smt_capable() (smp_num_siblings > 1) |
244 | #endif | 244 | #endif |
245 | 245 | ||
diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h index b7c0dea119fe..61e08c0a2907 100644 --- a/arch/x86/include/asm/vmi.h +++ b/arch/x86/include/asm/vmi.h | |||
@@ -223,9 +223,15 @@ struct pci_header { | |||
223 | } __attribute__((packed)); | 223 | } __attribute__((packed)); |
224 | 224 | ||
225 | /* Function prototypes for bootstrapping */ | 225 | /* Function prototypes for bootstrapping */ |
226 | #ifdef CONFIG_VMI | ||
226 | extern void vmi_init(void); | 227 | extern void vmi_init(void); |
228 | extern void vmi_activate(void); | ||
227 | extern void vmi_bringup(void); | 229 | extern void vmi_bringup(void); |
228 | extern void vmi_apply_boot_page_allocations(void); | 230 | #else |
231 | static inline void vmi_init(void) {} | ||
232 | static inline void vmi_activate(void) {} | ||
233 | static inline void vmi_bringup(void) {} | ||
234 | #endif | ||
229 | 235 | ||
230 | /* State needed to start an application processor in an SMP system. */ | 236 | /* State needed to start an application processor in an SMP system. */ |
231 | struct vmi_ap_state { | 237 | struct vmi_ap_state { |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index e4899e0e8787..0a60d60ed036 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
187 | 187 | ||
188 | spin_lock_irqsave(&iommu->lock, flags); | 188 | spin_lock_irqsave(&iommu->lock, flags); |
189 | ret = __iommu_queue_command(iommu, cmd); | 189 | ret = __iommu_queue_command(iommu, cmd); |
190 | if (!ret) | ||
191 | iommu->need_sync = 1; | ||
190 | spin_unlock_irqrestore(&iommu->lock, flags); | 192 | spin_unlock_irqrestore(&iommu->lock, flags); |
191 | 193 | ||
192 | return ret; | 194 | return ret; |
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
210 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; | 212 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; |
211 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | 213 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); |
212 | 214 | ||
213 | iommu->need_sync = 0; | ||
214 | |||
215 | spin_lock_irqsave(&iommu->lock, flags); | 215 | spin_lock_irqsave(&iommu->lock, flags); |
216 | 216 | ||
217 | if (!iommu->need_sync) | ||
218 | goto out; | ||
219 | |||
220 | iommu->need_sync = 0; | ||
221 | |||
217 | ret = __iommu_queue_command(iommu, &cmd); | 222 | ret = __iommu_queue_command(iommu, &cmd); |
218 | 223 | ||
219 | if (ret) | 224 | if (ret) |
@@ -230,8 +235,9 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
230 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | 235 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; |
231 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | 236 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); |
232 | 237 | ||
233 | if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) | 238 | if (unlikely(i == EXIT_LOOP_COUNT)) |
234 | printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); | 239 | panic("AMD IOMMU: Completion wait loop failed\n"); |
240 | |||
235 | out: | 241 | out: |
236 | spin_unlock_irqrestore(&iommu->lock, flags); | 242 | spin_unlock_irqrestore(&iommu->lock, flags); |
237 | 243 | ||
@@ -254,8 +260,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | |||
254 | 260 | ||
255 | ret = iommu_queue_command(iommu, &cmd); | 261 | ret = iommu_queue_command(iommu, &cmd); |
256 | 262 | ||
257 | iommu->need_sync = 1; | ||
258 | |||
259 | return ret; | 263 | return ret; |
260 | } | 264 | } |
261 | 265 | ||
@@ -281,8 +285,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
281 | 285 | ||
282 | ret = iommu_queue_command(iommu, &cmd); | 286 | ret = iommu_queue_command(iommu, &cmd); |
283 | 287 | ||
284 | iommu->need_sync = 1; | ||
285 | |||
286 | return ret; | 288 | return ret; |
287 | } | 289 | } |
288 | 290 | ||
@@ -343,7 +345,7 @@ static int iommu_map(struct protection_domain *dom, | |||
343 | u64 __pte, *pte, *page; | 345 | u64 __pte, *pte, *page; |
344 | 346 | ||
345 | bus_addr = PAGE_ALIGN(bus_addr); | 347 | bus_addr = PAGE_ALIGN(bus_addr); |
346 | phys_addr = PAGE_ALIGN(bus_addr); | 348 | phys_addr = PAGE_ALIGN(phys_addr); |
347 | 349 | ||
348 | /* only support 512GB address spaces for now */ | 350 | /* only support 512GB address spaces for now */ |
349 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) | 351 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) |
@@ -599,7 +601,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) | |||
599 | continue; | 601 | continue; |
600 | 602 | ||
601 | p2 = IOMMU_PTE_PAGE(p1[i]); | 603 | p2 = IOMMU_PTE_PAGE(p1[i]); |
602 | for (j = 0; j < 512; ++i) { | 604 | for (j = 0; j < 512; ++j) { |
603 | if (!IOMMU_PTE_PRESENT(p2[j])) | 605 | if (!IOMMU_PTE_PRESENT(p2[j])) |
604 | continue; | 606 | continue; |
605 | p3 = IOMMU_PTE_PAGE(p2[j]); | 607 | p3 = IOMMU_PTE_PAGE(p2[j]); |
@@ -762,8 +764,6 @@ static void set_device_domain(struct amd_iommu *iommu, | |||
762 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 764 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
763 | 765 | ||
764 | iommu_queue_inv_dev_entry(iommu, devid); | 766 | iommu_queue_inv_dev_entry(iommu, devid); |
765 | |||
766 | iommu->need_sync = 1; | ||
767 | } | 767 | } |
768 | 768 | ||
769 | /***************************************************************************** | 769 | /***************************************************************************** |
@@ -858,6 +858,9 @@ static int get_device_resources(struct device *dev, | |||
858 | print_devid(_bdf, 1); | 858 | print_devid(_bdf, 1); |
859 | } | 859 | } |
860 | 860 | ||
861 | if (domain_for_device(_bdf) == NULL) | ||
862 | set_device_domain(*iommu, *domain, _bdf); | ||
863 | |||
861 | return 1; | 864 | return 1; |
862 | } | 865 | } |
863 | 866 | ||
@@ -908,7 +911,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
908 | if (address >= dom->aperture_size) | 911 | if (address >= dom->aperture_size) |
909 | return; | 912 | return; |
910 | 913 | ||
911 | WARN_ON(address & 0xfffULL || address > dom->aperture_size); | 914 | WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size); |
912 | 915 | ||
913 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | 916 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; |
914 | pte += IOMMU_PTE_L0_INDEX(address); | 917 | pte += IOMMU_PTE_L0_INDEX(address); |
@@ -920,8 +923,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
920 | 923 | ||
921 | /* | 924 | /* |
922 | * This function contains common code for mapping of a physically | 925 | * This function contains common code for mapping of a physically |
923 | * contiguous memory region into DMA address space. It is uses by all | 926 | * contiguous memory region into DMA address space. It is used by all |
924 | * mapping functions provided by this IOMMU driver. | 927 | * mapping functions provided with this IOMMU driver. |
925 | * Must be called with the domain lock held. | 928 | * Must be called with the domain lock held. |
926 | */ | 929 | */ |
927 | static dma_addr_t __map_single(struct device *dev, | 930 | static dma_addr_t __map_single(struct device *dev, |
@@ -981,7 +984,8 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
981 | dma_addr_t i, start; | 984 | dma_addr_t i, start; |
982 | unsigned int pages; | 985 | unsigned int pages; |
983 | 986 | ||
984 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) | 987 | if ((dma_addr == bad_dma_address) || |
988 | (dma_addr + size > dma_dom->aperture_size)) | ||
985 | return; | 989 | return; |
986 | 990 | ||
987 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | 991 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
@@ -1031,8 +1035,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | |||
1031 | if (addr == bad_dma_address) | 1035 | if (addr == bad_dma_address) |
1032 | goto out; | 1036 | goto out; |
1033 | 1037 | ||
1034 | if (unlikely(iommu->need_sync)) | 1038 | iommu_completion_wait(iommu); |
1035 | iommu_completion_wait(iommu); | ||
1036 | 1039 | ||
1037 | out: | 1040 | out: |
1038 | spin_unlock_irqrestore(&domain->lock, flags); | 1041 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -1060,8 +1063,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
1060 | 1063 | ||
1061 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | 1064 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); |
1062 | 1065 | ||
1063 | if (unlikely(iommu->need_sync)) | 1066 | iommu_completion_wait(iommu); |
1064 | iommu_completion_wait(iommu); | ||
1065 | 1067 | ||
1066 | spin_unlock_irqrestore(&domain->lock, flags); | 1068 | spin_unlock_irqrestore(&domain->lock, flags); |
1067 | } | 1069 | } |
@@ -1127,8 +1129,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
1127 | goto unmap; | 1129 | goto unmap; |
1128 | } | 1130 | } |
1129 | 1131 | ||
1130 | if (unlikely(iommu->need_sync)) | 1132 | iommu_completion_wait(iommu); |
1131 | iommu_completion_wait(iommu); | ||
1132 | 1133 | ||
1133 | out: | 1134 | out: |
1134 | spin_unlock_irqrestore(&domain->lock, flags); | 1135 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -1173,8 +1174,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
1173 | s->dma_address = s->dma_length = 0; | 1174 | s->dma_address = s->dma_length = 0; |
1174 | } | 1175 | } |
1175 | 1176 | ||
1176 | if (unlikely(iommu->need_sync)) | 1177 | iommu_completion_wait(iommu); |
1177 | iommu_completion_wait(iommu); | ||
1178 | 1178 | ||
1179 | spin_unlock_irqrestore(&domain->lock, flags); | 1179 | spin_unlock_irqrestore(&domain->lock, flags); |
1180 | } | 1180 | } |
@@ -1225,8 +1225,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1225 | goto out; | 1225 | goto out; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | if (unlikely(iommu->need_sync)) | 1228 | iommu_completion_wait(iommu); |
1229 | iommu_completion_wait(iommu); | ||
1230 | 1229 | ||
1231 | out: | 1230 | out: |
1232 | spin_unlock_irqrestore(&domain->lock, flags); | 1231 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -1257,8 +1256,7 @@ static void free_coherent(struct device *dev, size_t size, | |||
1257 | 1256 | ||
1258 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 1257 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |
1259 | 1258 | ||
1260 | if (unlikely(iommu->need_sync)) | 1259 | iommu_completion_wait(iommu); |
1261 | iommu_completion_wait(iommu); | ||
1262 | 1260 | ||
1263 | spin_unlock_irqrestore(&domain->lock, flags); | 1261 | spin_unlock_irqrestore(&domain->lock, flags); |
1264 | 1262 | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 30ae2701b3df..c6cc22815d35 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -427,6 +427,10 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | |||
427 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, | 427 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
428 | &entry, sizeof(entry)); | 428 | &entry, sizeof(entry)); |
429 | 429 | ||
430 | /* set head and tail to zero manually */ | ||
431 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
432 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
433 | |||
430 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | 434 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); |
431 | 435 | ||
432 | return cmd_buf; | 436 | return cmd_buf; |
@@ -1074,7 +1078,8 @@ int __init amd_iommu_init(void) | |||
1074 | goto free; | 1078 | goto free; |
1075 | 1079 | ||
1076 | /* IOMMU rlookup table - find the IOMMU for a specific device */ | 1080 | /* IOMMU rlookup table - find the IOMMU for a specific device */ |
1077 | amd_iommu_rlookup_table = (void *)__get_free_pages(GFP_KERNEL, | 1081 | amd_iommu_rlookup_table = (void *)__get_free_pages( |
1082 | GFP_KERNEL | __GFP_ZERO, | ||
1078 | get_order(rlookup_table_size)); | 1083 | get_order(rlookup_table_size)); |
1079 | if (amd_iommu_rlookup_table == NULL) | 1084 | if (amd_iommu_rlookup_table == NULL) |
1080 | goto free; | 1085 | goto free; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 4b031a4ac856..1c838032fd37 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -510,12 +510,9 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) | |||
510 | */ | 510 | */ |
511 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | 511 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) |
512 | { | 512 | { |
513 | static cpumask_t mce_cpus = CPU_MASK_NONE; | ||
514 | |||
515 | mce_cpu_quirks(c); | 513 | mce_cpu_quirks(c); |
516 | 514 | ||
517 | if (mce_dont_init || | 515 | if (mce_dont_init || |
518 | cpu_test_and_set(smp_processor_id(), mce_cpus) || | ||
519 | !mce_available(c)) | 516 | !mce_available(c)) |
520 | return; | 517 | return; |
521 | 518 | ||
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 82fb2809ce32..c4b5b24e0217 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -272,13 +272,18 @@ static struct attribute_group mc_attr_group = { | |||
272 | .name = "microcode", | 272 | .name = "microcode", |
273 | }; | 273 | }; |
274 | 274 | ||
275 | static void microcode_fini_cpu(int cpu) | 275 | static void __microcode_fini_cpu(int cpu) |
276 | { | 276 | { |
277 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 277 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
278 | 278 | ||
279 | mutex_lock(µcode_mutex); | ||
280 | microcode_ops->microcode_fini_cpu(cpu); | 279 | microcode_ops->microcode_fini_cpu(cpu); |
281 | uci->valid = 0; | 280 | uci->valid = 0; |
281 | } | ||
282 | |||
283 | static void microcode_fini_cpu(int cpu) | ||
284 | { | ||
285 | mutex_lock(µcode_mutex); | ||
286 | __microcode_fini_cpu(cpu); | ||
282 | mutex_unlock(µcode_mutex); | 287 | mutex_unlock(µcode_mutex); |
283 | } | 288 | } |
284 | 289 | ||
@@ -306,12 +311,16 @@ static int microcode_resume_cpu(int cpu) | |||
306 | * to this cpu (a bit of paranoia): | 311 | * to this cpu (a bit of paranoia): |
307 | */ | 312 | */ |
308 | if (microcode_ops->collect_cpu_info(cpu, &nsig)) { | 313 | if (microcode_ops->collect_cpu_info(cpu, &nsig)) { |
309 | microcode_fini_cpu(cpu); | 314 | __microcode_fini_cpu(cpu); |
315 | printk(KERN_ERR "failed to collect_cpu_info for resuming cpu #%d\n", | ||
316 | cpu); | ||
310 | return -1; | 317 | return -1; |
311 | } | 318 | } |
312 | 319 | ||
313 | if (memcmp(&nsig, &uci->cpu_sig, sizeof(nsig))) { | 320 | if ((nsig.sig != uci->cpu_sig.sig) || (nsig.pf != uci->cpu_sig.pf)) { |
314 | microcode_fini_cpu(cpu); | 321 | __microcode_fini_cpu(cpu); |
322 | printk(KERN_ERR "cached ucode doesn't match the resuming cpu #%d\n", | ||
323 | cpu); | ||
315 | /* Should we look for a new ucode here? */ | 324 | /* Should we look for a new ucode here? */ |
316 | return 1; | 325 | return 1; |
317 | } | 326 | } |
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 622dc4a21784..a8e62792d171 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c | |||
@@ -155,6 +155,7 @@ static DEFINE_SPINLOCK(microcode_update_lock); | |||
155 | static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | 155 | static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) |
156 | { | 156 | { |
157 | struct cpuinfo_x86 *c = &cpu_data(cpu_num); | 157 | struct cpuinfo_x86 *c = &cpu_data(cpu_num); |
158 | unsigned long flags; | ||
158 | unsigned int val[2]; | 159 | unsigned int val[2]; |
159 | 160 | ||
160 | memset(csig, 0, sizeof(*csig)); | 161 | memset(csig, 0, sizeof(*csig)); |
@@ -174,11 +175,16 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | |||
174 | csig->pf = 1 << ((val[1] >> 18) & 7); | 175 | csig->pf = 1 << ((val[1] >> 18) & 7); |
175 | } | 176 | } |
176 | 177 | ||
178 | /* serialize access to the physical write to MSR 0x79 */ | ||
179 | spin_lock_irqsave(µcode_update_lock, flags); | ||
180 | |||
177 | wrmsr(MSR_IA32_UCODE_REV, 0, 0); | 181 | wrmsr(MSR_IA32_UCODE_REV, 0, 0); |
178 | /* see notes above for revision 1.07. Apparent chip bug */ | 182 | /* see notes above for revision 1.07. Apparent chip bug */ |
179 | sync_core(); | 183 | sync_core(); |
180 | /* get the current revision from MSR 0x8B */ | 184 | /* get the current revision from MSR 0x8B */ |
181 | rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); | 185 | rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); |
186 | spin_unlock_irqrestore(µcode_update_lock, flags); | ||
187 | |||
182 | pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", | 188 | pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", |
183 | csig->sig, csig->pf, csig->rev); | 189 | csig->sig, csig->pf, csig->rev); |
184 | 190 | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index f98f4e1dba09..0f4c1fd5a1f4 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early) | |||
604 | printk(KERN_INFO "Using ACPI for processor (LAPIC) " | 604 | printk(KERN_INFO "Using ACPI for processor (LAPIC) " |
605 | "configuration information\n"); | 605 | "configuration information\n"); |
606 | 606 | ||
607 | if (!mpf) | ||
608 | return; | ||
609 | |||
607 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", | 610 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", |
608 | mpf->mpf_specification); | 611 | mpf->mpf_specification); |
609 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) | 612 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) |
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 0e9f1982b1dd..95777b0faa73 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c | |||
@@ -7,7 +7,8 @@ | |||
7 | 7 | ||
8 | #include <asm/paravirt.h> | 8 | #include <asm/paravirt.h> |
9 | 9 | ||
10 | static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) | 10 | static inline void |
11 | default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
11 | { | 12 | { |
12 | __raw_spin_lock(lock); | 13 | __raw_spin_lock(lock); |
13 | } | 14 | } |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index a42b02b4df68..a35eaa379ff6 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size) | |||
123 | 123 | ||
124 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 124 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
125 | iommu_area_free(iommu_gart_bitmap, offset, size); | 125 | iommu_area_free(iommu_gart_bitmap, offset, size); |
126 | if (offset >= next_bit) | ||
127 | next_bit = offset + size; | ||
126 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 128 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
127 | } | 129 | } |
128 | 130 | ||
@@ -743,10 +745,8 @@ void __init gart_iommu_init(void) | |||
743 | unsigned long scratch; | 745 | unsigned long scratch; |
744 | long i; | 746 | long i; |
745 | 747 | ||
746 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { | 748 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) |
747 | printk(KERN_INFO "PCI-GART: No AMD GART found.\n"); | ||
748 | return; | 749 | return; |
749 | } | ||
750 | 750 | ||
751 | #ifndef CONFIG_AGP_AMD64 | 751 | #ifndef CONFIG_AGP_AMD64 |
752 | no_agp = 1; | 752 | no_agp = 1; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9d5674f7b6cc..bdec76e55594 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -794,6 +794,9 @@ void __init setup_arch(char **cmdline_p) | |||
794 | printk(KERN_INFO "Command line: %s\n", boot_command_line); | 794 | printk(KERN_INFO "Command line: %s\n", boot_command_line); |
795 | #endif | 795 | #endif |
796 | 796 | ||
797 | /* VMI may relocate the fixmap; do this before touching ioremap area */ | ||
798 | vmi_init(); | ||
799 | |||
797 | early_cpu_init(); | 800 | early_cpu_init(); |
798 | early_ioremap_init(); | 801 | early_ioremap_init(); |
799 | 802 | ||
@@ -880,13 +883,8 @@ void __init setup_arch(char **cmdline_p) | |||
880 | check_efer(); | 883 | check_efer(); |
881 | #endif | 884 | #endif |
882 | 885 | ||
883 | #if defined(CONFIG_VMI) && defined(CONFIG_X86_32) | 886 | /* Must be before kernel pagetables are setup */ |
884 | /* | 887 | vmi_activate(); |
885 | * Must be before kernel pagetables are setup | ||
886 | * or fixmap area is touched. | ||
887 | */ | ||
888 | vmi_init(); | ||
889 | #endif | ||
890 | 888 | ||
891 | /* after early param, so could get panic from serial */ | 889 | /* after early param, so could get panic from serial */ |
892 | reserve_early_setup_data(); | 890 | reserve_early_setup_data(); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7b1093397319..f71f96fc9e62 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -294,9 +294,7 @@ static void __cpuinit start_secondary(void *unused) | |||
294 | * fragile that we want to limit the things done here to the | 294 | * fragile that we want to limit the things done here to the |
295 | * most necessary things. | 295 | * most necessary things. |
296 | */ | 296 | */ |
297 | #ifdef CONFIG_VMI | ||
298 | vmi_bringup(); | 297 | vmi_bringup(); |
299 | #endif | ||
300 | cpu_init(); | 298 | cpu_init(); |
301 | preempt_disable(); | 299 | preempt_disable(); |
302 | smp_callin(); | 300 | smp_callin(); |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 8b6c393ab9fd..22fd6577156a 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -960,8 +960,6 @@ static inline int __init activate_vmi(void) | |||
960 | 960 | ||
961 | void __init vmi_init(void) | 961 | void __init vmi_init(void) |
962 | { | 962 | { |
963 | unsigned long flags; | ||
964 | |||
965 | if (!vmi_rom) | 963 | if (!vmi_rom) |
966 | probe_vmi_rom(); | 964 | probe_vmi_rom(); |
967 | else | 965 | else |
@@ -973,13 +971,21 @@ void __init vmi_init(void) | |||
973 | 971 | ||
974 | reserve_top_address(-vmi_rom->virtual_top); | 972 | reserve_top_address(-vmi_rom->virtual_top); |
975 | 973 | ||
976 | local_irq_save(flags); | ||
977 | activate_vmi(); | ||
978 | |||
979 | #ifdef CONFIG_X86_IO_APIC | 974 | #ifdef CONFIG_X86_IO_APIC |
980 | /* This is virtual hardware; timer routing is wired correctly */ | 975 | /* This is virtual hardware; timer routing is wired correctly */ |
981 | no_timer_check = 1; | 976 | no_timer_check = 1; |
982 | #endif | 977 | #endif |
978 | } | ||
979 | |||
980 | void vmi_activate(void) | ||
981 | { | ||
982 | unsigned long flags; | ||
983 | |||
984 | if (!vmi_rom) | ||
985 | return; | ||
986 | |||
987 | local_irq_save(flags); | ||
988 | activate_vmi(); | ||
983 | local_irq_restore(flags & X86_EFLAGS_IF); | 989 | local_irq_restore(flags & X86_EFLAGS_IF); |
984 | } | 990 | } |
985 | 991 | ||
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 022cd41ea9b4..202864ad49a7 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -401,14 +401,13 @@ static int __init ppro_init(char **cpu_type) | |||
401 | *cpu_type = "i386/pii"; | 401 | *cpu_type = "i386/pii"; |
402 | break; | 402 | break; |
403 | case 6 ... 8: | 403 | case 6 ... 8: |
404 | case 10 ... 11: | ||
404 | *cpu_type = "i386/piii"; | 405 | *cpu_type = "i386/piii"; |
405 | break; | 406 | break; |
406 | case 9: | 407 | case 9: |
408 | case 13: | ||
407 | *cpu_type = "i386/p6_mobile"; | 409 | *cpu_type = "i386/p6_mobile"; |
408 | break; | 410 | break; |
409 | case 10 ... 13: | ||
410 | *cpu_type = "i386/p6"; | ||
411 | break; | ||
412 | case 14: | 411 | case 14: |
413 | *cpu_type = "i386/core"; | 412 | *cpu_type = "i386/core"; |
414 | break; | 413 | break; |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 716d26f0e5d4..e9f80c744cf3 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -156,6 +156,8 @@ static void ppro_start(struct op_msrs const * const msrs) | |||
156 | unsigned int low, high; | 156 | unsigned int low, high; |
157 | int i; | 157 | int i; |
158 | 158 | ||
159 | if (!reset_value) | ||
160 | return; | ||
159 | for (i = 0; i < num_counters; ++i) { | 161 | for (i = 0; i < num_counters; ++i) { |
160 | if (reset_value[i]) { | 162 | if (reset_value[i]) { |
161 | CTRL_READ(low, high, msrs, i); | 163 | CTRL_READ(low, high, msrs, i); |
@@ -171,6 +173,8 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
171 | unsigned int low, high; | 173 | unsigned int low, high; |
172 | int i; | 174 | int i; |
173 | 175 | ||
176 | if (!reset_value) | ||
177 | return; | ||
174 | for (i = 0; i < num_counters; ++i) { | 178 | for (i = 0; i < num_counters; ++i) { |
175 | if (!reset_value[i]) | 179 | if (!reset_value[i]) |
176 | continue; | 180 | continue; |