aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-05 00:40:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-05 00:40:08 -0500
commite948990f958c6c41036a99c8a6581e35e7e23d80 (patch)
tree34c5c1f7a8f58c398aa79e3b89c2ae3a6cedd4c6 /arch
parent2b218aea3674d7f8ac853f872b4042d9aa2e1130 (diff)
parent9adc13867ec5fe0cd35434f92954d90e42381f0b (diff)
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: fix early panic with boot option "nosmp" x86/oprofile: fix Intel cpu family 6 detection oprofile: fix CPU unplug panic in ppro_stop() AMD IOMMU: fix possible race while accessing iommu->need_sync AMD IOMMU: set device table entry for aliased devices AMD IOMMU: struct amd_iommu remove padding on 64 bit x86: fix broken flushing in GART nofullflush path x86: fix dma_mapping_error for 32bit x86
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h24
-rw-r--r--arch/x86/include/asm/dma-mapping.h6
-rw-r--r--arch/x86/kernel/amd_iommu.c36
-rw-r--r--arch/x86/kernel/mpparse.c3
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/oprofile/nmi_int.c5
-rw-r--r--arch/x86/oprofile/op_model_ppro.c4
7 files changed, 41 insertions, 39 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 1a30c0440c6b..ac302a2fa339 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -251,13 +251,6 @@ struct amd_iommu {
251 /* Pointer to PCI device of this IOMMU */ 251 /* Pointer to PCI device of this IOMMU */
252 struct pci_dev *dev; 252 struct pci_dev *dev;
253 253
254 /*
255 * Capability pointer. There could be more than one IOMMU per PCI
256 * device function if there are more than one AMD IOMMU capability
257 * pointers.
258 */
259 u16 cap_ptr;
260
261 /* physical address of MMIO space */ 254 /* physical address of MMIO space */
262 u64 mmio_phys; 255 u64 mmio_phys;
263 /* virtual address of MMIO space */ 256 /* virtual address of MMIO space */
@@ -266,6 +259,13 @@ struct amd_iommu {
266 /* capabilities of that IOMMU read from ACPI */ 259 /* capabilities of that IOMMU read from ACPI */
267 u32 cap; 260 u32 cap;
268 261
262 /*
263 * Capability pointer. There could be more than one IOMMU per PCI
264 * device function if there are more than one AMD IOMMU capability
265 * pointers.
266 */
267 u16 cap_ptr;
268
269 /* pci domain of this IOMMU */ 269 /* pci domain of this IOMMU */
270 u16 pci_seg; 270 u16 pci_seg;
271 271
@@ -284,19 +284,19 @@ struct amd_iommu {
284 /* size of command buffer */ 284 /* size of command buffer */
285 u32 cmd_buf_size; 285 u32 cmd_buf_size;
286 286
287 /* event buffer virtual address */
288 u8 *evt_buf;
289 /* size of event buffer */ 287 /* size of event buffer */
290 u32 evt_buf_size; 288 u32 evt_buf_size;
289 /* event buffer virtual address */
290 u8 *evt_buf;
291 /* MSI number for event interrupt */ 291 /* MSI number for event interrupt */
292 u16 evt_msi_num; 292 u16 evt_msi_num;
293 293
294 /* if one, we need to send a completion wait command */
295 int need_sync;
296
297 /* true if interrupts for this IOMMU are already enabled */ 294 /* true if interrupts for this IOMMU are already enabled */
298 bool int_enabled; 295 bool int_enabled;
299 296
297 /* if one, we need to send a completion wait command */
298 int need_sync;
299
300 /* default dma_ops domain for that IOMMU */ 300 /* default dma_ops domain for that IOMMU */
301 struct dma_ops_domain *default_dom; 301 struct dma_ops_domain *default_dom;
302}; 302};
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 7f225a4b2a26..097794ff6b79 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -71,15 +71,13 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
71/* Make sure we keep the same behaviour */ 71/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{ 73{
74#ifdef CONFIG_X86_32 74#ifdef CONFIG_X86_64
75 return 0;
76#else
77 struct dma_mapping_ops *ops = get_dma_ops(dev); 75 struct dma_mapping_ops *ops = get_dma_ops(dev);
78 if (ops->mapping_error) 76 if (ops->mapping_error)
79 return ops->mapping_error(dev, dma_addr); 77 return ops->mapping_error(dev, dma_addr);
80 78
81 return (dma_addr == bad_dma_address);
82#endif 79#endif
80 return (dma_addr == bad_dma_address);
83} 81}
84 82
85#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 83#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index e4899e0e8787..5662e226b0c9 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
187 187
188 spin_lock_irqsave(&iommu->lock, flags); 188 spin_lock_irqsave(&iommu->lock, flags);
189 ret = __iommu_queue_command(iommu, cmd); 189 ret = __iommu_queue_command(iommu, cmd);
190 if (!ret)
191 iommu->need_sync = 1;
190 spin_unlock_irqrestore(&iommu->lock, flags); 192 spin_unlock_irqrestore(&iommu->lock, flags);
191 193
192 return ret; 194 return ret;
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
210 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 212 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
211 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 213 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
212 214
213 iommu->need_sync = 0;
214
215 spin_lock_irqsave(&iommu->lock, flags); 215 spin_lock_irqsave(&iommu->lock, flags);
216 216
217 if (!iommu->need_sync)
218 goto out;
219
220 iommu->need_sync = 0;
221
217 ret = __iommu_queue_command(iommu, &cmd); 222 ret = __iommu_queue_command(iommu, &cmd);
218 223
219 if (ret) 224 if (ret)
@@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
254 259
255 ret = iommu_queue_command(iommu, &cmd); 260 ret = iommu_queue_command(iommu, &cmd);
256 261
257 iommu->need_sync = 1;
258
259 return ret; 262 return ret;
260} 263}
261 264
@@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
281 284
282 ret = iommu_queue_command(iommu, &cmd); 285 ret = iommu_queue_command(iommu, &cmd);
283 286
284 iommu->need_sync = 1;
285
286 return ret; 287 return ret;
287} 288}
288 289
@@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu,
762 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 763 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
763 764
764 iommu_queue_inv_dev_entry(iommu, devid); 765 iommu_queue_inv_dev_entry(iommu, devid);
765
766 iommu->need_sync = 1;
767} 766}
768 767
769/***************************************************************************** 768/*****************************************************************************
@@ -858,6 +857,9 @@ static int get_device_resources(struct device *dev,
858 print_devid(_bdf, 1); 857 print_devid(_bdf, 1);
859 } 858 }
860 859
860 if (domain_for_device(_bdf) == NULL)
861 set_device_domain(*iommu, *domain, _bdf);
862
861 return 1; 863 return 1;
862} 864}
863 865
@@ -1031,8 +1033,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1031 if (addr == bad_dma_address) 1033 if (addr == bad_dma_address)
1032 goto out; 1034 goto out;
1033 1035
1034 if (unlikely(iommu->need_sync)) 1036 iommu_completion_wait(iommu);
1035 iommu_completion_wait(iommu);
1036 1037
1037out: 1038out:
1038 spin_unlock_irqrestore(&domain->lock, flags); 1039 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1060,8 +1061,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
1060 1061
1061 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1062 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
1062 1063
1063 if (unlikely(iommu->need_sync)) 1064 iommu_completion_wait(iommu);
1064 iommu_completion_wait(iommu);
1065 1065
1066 spin_unlock_irqrestore(&domain->lock, flags); 1066 spin_unlock_irqrestore(&domain->lock, flags);
1067} 1067}
@@ -1127,8 +1127,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1127 goto unmap; 1127 goto unmap;
1128 } 1128 }
1129 1129
1130 if (unlikely(iommu->need_sync)) 1130 iommu_completion_wait(iommu);
1131 iommu_completion_wait(iommu);
1132 1131
1133out: 1132out:
1134 spin_unlock_irqrestore(&domain->lock, flags); 1133 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1173,8 +1172,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1173 s->dma_address = s->dma_length = 0; 1172 s->dma_address = s->dma_length = 0;
1174 } 1173 }
1175 1174
1176 if (unlikely(iommu->need_sync)) 1175 iommu_completion_wait(iommu);
1177 iommu_completion_wait(iommu);
1178 1176
1179 spin_unlock_irqrestore(&domain->lock, flags); 1177 spin_unlock_irqrestore(&domain->lock, flags);
1180} 1178}
@@ -1225,8 +1223,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
1225 goto out; 1223 goto out;
1226 } 1224 }
1227 1225
1228 if (unlikely(iommu->need_sync)) 1226 iommu_completion_wait(iommu);
1229 iommu_completion_wait(iommu);
1230 1227
1231out: 1228out:
1232 spin_unlock_irqrestore(&domain->lock, flags); 1229 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1257,8 +1254,7 @@ static void free_coherent(struct device *dev, size_t size,
1257 1254
1258 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1255 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
1259 1256
1260 if (unlikely(iommu->need_sync)) 1257 iommu_completion_wait(iommu);
1261 iommu_completion_wait(iommu);
1262 1258
1263 spin_unlock_irqrestore(&domain->lock, flags); 1259 spin_unlock_irqrestore(&domain->lock, flags);
1264 1260
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index f98f4e1dba09..0f4c1fd5a1f4 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early)
604 printk(KERN_INFO "Using ACPI for processor (LAPIC) " 604 printk(KERN_INFO "Using ACPI for processor (LAPIC) "
605 "configuration information\n"); 605 "configuration information\n");
606 606
607 if (!mpf)
608 return;
609
607 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 610 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
608 mpf->mpf_specification); 611 mpf->mpf_specification);
609#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 612#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index a42b02b4df68..ba7ad83e20a8 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size)
123 123
124 spin_lock_irqsave(&iommu_bitmap_lock, flags); 124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
125 iommu_area_free(iommu_gart_bitmap, offset, size); 125 iommu_area_free(iommu_gart_bitmap, offset, size);
126 if (offset >= next_bit)
127 next_bit = offset + size;
126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 128 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
127} 129}
128 130
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 022cd41ea9b4..202864ad49a7 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -401,14 +401,13 @@ static int __init ppro_init(char **cpu_type)
401 *cpu_type = "i386/pii"; 401 *cpu_type = "i386/pii";
402 break; 402 break;
403 case 6 ... 8: 403 case 6 ... 8:
404 case 10 ... 11:
404 *cpu_type = "i386/piii"; 405 *cpu_type = "i386/piii";
405 break; 406 break;
406 case 9: 407 case 9:
408 case 13:
407 *cpu_type = "i386/p6_mobile"; 409 *cpu_type = "i386/p6_mobile";
408 break; 410 break;
409 case 10 ... 13:
410 *cpu_type = "i386/p6";
411 break;
412 case 14: 411 case 14:
413 *cpu_type = "i386/core"; 412 *cpu_type = "i386/core";
414 break; 413 break;
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 716d26f0e5d4..e9f80c744cf3 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -156,6 +156,8 @@ static void ppro_start(struct op_msrs const * const msrs)
156 unsigned int low, high; 156 unsigned int low, high;
157 int i; 157 int i;
158 158
159 if (!reset_value)
160 return;
159 for (i = 0; i < num_counters; ++i) { 161 for (i = 0; i < num_counters; ++i) {
160 if (reset_value[i]) { 162 if (reset_value[i]) {
161 CTRL_READ(low, high, msrs, i); 163 CTRL_READ(low, high, msrs, i);
@@ -171,6 +173,8 @@ static void ppro_stop(struct op_msrs const * const msrs)
171 unsigned int low, high; 173 unsigned int low, high;
172 int i; 174 int i;
173 175
176 if (!reset_value)
177 return;
174 for (i = 0; i < num_counters; ++i) { 178 for (i = 0; i < num_counters; ++i) {
175 if (!reset_value[i]) 179 if (!reset_value[i])
176 continue; 180 continue;