aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h8
-rw-r--r--arch/x86/kernel/amd_iommu.c118
-rw-r--r--arch/x86/kernel/amd_iommu_init.c34
4 files changed, 120 insertions, 41 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 13ffa5df37d7..1d9c18aa17eb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -586,7 +586,6 @@ config GART_IOMMU
586 bool "GART IOMMU support" if EMBEDDED 586 bool "GART IOMMU support" if EMBEDDED
587 default y 587 default y
588 select SWIOTLB 588 select SWIOTLB
589 select AGP
590 depends on X86_64 && PCI 589 depends on X86_64 && PCI
591 ---help--- 590 ---help---
592 Support for full DMA access of devices with 32bit memory access only 591 Support for full DMA access of devices with 32bit memory access only
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 49f7453bff76..86a56b49f2c6 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -202,7 +202,7 @@ extern bool amd_iommu_dump;
202#define DUMP_printk(format, arg...) \ 202#define DUMP_printk(format, arg...) \
203 do { \ 203 do { \
204 if (amd_iommu_dump) \ 204 if (amd_iommu_dump) \
205 printk(KERN_INFO "AMD IOMMU: " format, ## arg); \ 205 printk(KERN_INFO "AMD-Vi: " format, ## arg); \
206 } while(0); 206 } while(0);
207 207
208/* 208/*
@@ -341,6 +341,9 @@ struct amd_iommu {
341 /* if one, we need to send a completion wait command */ 341 /* if one, we need to send a completion wait command */
342 bool need_sync; 342 bool need_sync;
343 343
344 /* becomes true if a command buffer reset is running */
345 bool reset_in_progress;
346
344 /* default dma_ops domain for that IOMMU */ 347 /* default dma_ops domain for that IOMMU */
345 struct dma_ops_domain *default_dom; 348 struct dma_ops_domain *default_dom;
346}; 349};
@@ -461,4 +464,7 @@ static inline void amd_iommu_stats_init(void) { }
461 464
462#endif /* CONFIG_AMD_IOMMU_STATS */ 465#endif /* CONFIG_AMD_IOMMU_STATS */
463 466
467/* some function prototypes */
468extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
469
464#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 470#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 12a541deae5e..dc19ed43b54e 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -49,7 +49,6 @@ static struct protection_domain *pt_domain;
49 49
50#ifdef CONFIG_IOMMU_API 50#ifdef CONFIG_IOMMU_API
51static struct iommu_ops amd_iommu_ops; 51static struct iommu_ops amd_iommu_ops;
52#endif
53 52
54/* 53/*
55 * general struct to manage commands send to an IOMMU 54 * general struct to manage commands send to an IOMMU
@@ -67,10 +66,7 @@ static u64* alloc_pte(struct protection_domain *dom,
67static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, 66static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
68 unsigned long start_page, 67 unsigned long start_page,
69 unsigned int pages); 68 unsigned int pages);
70 69static void reset_iommu_command_buffer(struct amd_iommu *iommu);
71#ifndef BUS_NOTIFY_UNBOUND_DRIVER
72#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
73#endif
74 70
75#ifdef CONFIG_AMD_IOMMU_STATS 71#ifdef CONFIG_AMD_IOMMU_STATS
76 72
@@ -144,7 +140,25 @@ static int iommu_has_npcache(struct amd_iommu *iommu)
144 * 140 *
145 ****************************************************************************/ 141 ****************************************************************************/
146 142
147static void iommu_print_event(void *__evt) 143static void dump_dte_entry(u16 devid)
144{
145 int i;
146
147 for (i = 0; i < 8; ++i)
148 pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
149 amd_iommu_dev_table[devid].data[i]);
150}
151
152static void dump_command(unsigned long phys_addr)
153{
154 struct iommu_cmd *cmd = phys_to_virt(phys_addr);
155 int i;
156
157 for (i = 0; i < 4; ++i)
158 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
159}
160
161static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
148{ 162{
149 u32 *event = __evt; 163 u32 *event = __evt;
150 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; 164 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
@@ -153,7 +167,7 @@ static void iommu_print_event(void *__evt)
153 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; 167 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
154 u64 address = (u64)(((u64)event[3]) << 32) | event[2]; 168 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
155 169
156 printk(KERN_ERR "AMD IOMMU: Event logged ["); 170 printk(KERN_ERR "AMD-Vi: Event logged [");
157 171
158 switch (type) { 172 switch (type) {
159 case EVENT_TYPE_ILL_DEV: 173 case EVENT_TYPE_ILL_DEV:
@@ -161,6 +175,7 @@ static void iommu_print_event(void *__evt)
161 "address=0x%016llx flags=0x%04x]\n", 175 "address=0x%016llx flags=0x%04x]\n",
162 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), 176 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
163 address, flags); 177 address, flags);
178 dump_dte_entry(devid);
164 break; 179 break;
165 case EVENT_TYPE_IO_FAULT: 180 case EVENT_TYPE_IO_FAULT:
166 printk("IO_PAGE_FAULT device=%02x:%02x.%x " 181 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
@@ -182,6 +197,8 @@ static void iommu_print_event(void *__evt)
182 break; 197 break;
183 case EVENT_TYPE_ILL_CMD: 198 case EVENT_TYPE_ILL_CMD:
184 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); 199 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
200 reset_iommu_command_buffer(iommu);
201 dump_command(address);
185 break; 202 break;
186 case EVENT_TYPE_CMD_HARD_ERR: 203 case EVENT_TYPE_CMD_HARD_ERR:
187 printk("COMMAND_HARDWARE_ERROR address=0x%016llx " 204 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
@@ -215,7 +232,7 @@ static void iommu_poll_events(struct amd_iommu *iommu)
215 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 232 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
216 233
217 while (head != tail) { 234 while (head != tail) {
218 iommu_print_event(iommu->evt_buf + head); 235 iommu_print_event(iommu, iommu->evt_buf + head);
219 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; 236 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
220 } 237 }
221 238
@@ -302,8 +319,11 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu)
302 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; 319 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
303 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); 320 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
304 321
305 if (unlikely(i == EXIT_LOOP_COUNT)) 322 if (unlikely(i == EXIT_LOOP_COUNT)) {
306 panic("AMD IOMMU: Completion wait loop failed\n"); 323 spin_unlock(&iommu->lock);
324 reset_iommu_command_buffer(iommu);
325 spin_lock(&iommu->lock);
326 }
307} 327}
308 328
309/* 329/*
@@ -451,37 +471,67 @@ static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
451} 471}
452 472
453/* 473/*
474 * This function flushes one domain on one IOMMU
475 */
476static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid)
477{
478 struct iommu_cmd cmd;
479 unsigned long flags;
480
481 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
482 domid, 1, 1);
483
484 spin_lock_irqsave(&iommu->lock, flags);
485 __iommu_queue_command(iommu, &cmd);
486 __iommu_completion_wait(iommu);
487 __iommu_wait_for_completion(iommu);
488 spin_unlock_irqrestore(&iommu->lock, flags);
489}
490
491static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
492{
493 int i;
494
495 for (i = 1; i < MAX_DOMAIN_ID; ++i) {
496 if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
497 continue;
498 flush_domain_on_iommu(iommu, i);
499 }
500
501}
502
503/*
454 * This function is used to flush the IO/TLB for a given protection domain 504 * This function is used to flush the IO/TLB for a given protection domain
455 * on every IOMMU in the system 505 * on every IOMMU in the system
456 */ 506 */
457static void iommu_flush_domain(u16 domid) 507static void iommu_flush_domain(u16 domid)
458{ 508{
459 unsigned long flags;
460 struct amd_iommu *iommu; 509 struct amd_iommu *iommu;
461 struct iommu_cmd cmd;
462 510
463 INC_STATS_COUNTER(domain_flush_all); 511 INC_STATS_COUNTER(domain_flush_all);
464 512
465 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 513 for_each_iommu(iommu)
466 domid, 1, 1); 514 flush_domain_on_iommu(iommu, domid);
467
468 for_each_iommu(iommu) {
469 spin_lock_irqsave(&iommu->lock, flags);
470 __iommu_queue_command(iommu, &cmd);
471 __iommu_completion_wait(iommu);
472 __iommu_wait_for_completion(iommu);
473 spin_unlock_irqrestore(&iommu->lock, flags);
474 }
475} 515}
476 516
477void amd_iommu_flush_all_domains(void) 517void amd_iommu_flush_all_domains(void)
478{ 518{
519 struct amd_iommu *iommu;
520
521 for_each_iommu(iommu)
522 flush_all_domains_on_iommu(iommu);
523}
524
525static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
526{
479 int i; 527 int i;
480 528
481 for (i = 1; i < MAX_DOMAIN_ID; ++i) { 529 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
482 if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) 530 if (iommu != amd_iommu_rlookup_table[i])
483 continue; 531 continue;
484 iommu_flush_domain(i); 532
533 iommu_queue_inv_dev_entry(iommu, i);
534 iommu_completion_wait(iommu);
485 } 535 }
486} 536}
487 537
@@ -491,8 +541,6 @@ void amd_iommu_flush_all_devices(void)
491 int i; 541 int i;
492 542
493 for (i = 0; i <= amd_iommu_last_bdf; ++i) { 543 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
494 if (amd_iommu_pd_table[i] == NULL)
495 continue;
496 544
497 iommu = amd_iommu_rlookup_table[i]; 545 iommu = amd_iommu_rlookup_table[i];
498 if (!iommu) 546 if (!iommu)
@@ -503,6 +551,22 @@ void amd_iommu_flush_all_devices(void)
503 } 551 }
504} 552}
505 553
554static void reset_iommu_command_buffer(struct amd_iommu *iommu)
555{
556 pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
557
558 if (iommu->reset_in_progress)
559 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
560
561 iommu->reset_in_progress = true;
562
563 amd_iommu_reset_cmd_buffer(iommu);
564 flush_all_devices_for_iommu(iommu);
565 flush_all_domains_on_iommu(iommu);
566
567 iommu->reset_in_progress = false;
568}
569
506/**************************************************************************** 570/****************************************************************************
507 * 571 *
508 * The functions below are used the create the page table mappings for 572 * The functions below are used the create the page table mappings for
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index f00f489ab150..b4b61d462dcc 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -252,7 +252,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
252/* Function to enable the hardware */ 252/* Function to enable the hardware */
253static void iommu_enable(struct amd_iommu *iommu) 253static void iommu_enable(struct amd_iommu *iommu)
254{ 254{
255 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n", 255 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n",
256 dev_name(&iommu->dev->dev), iommu->cap_ptr); 256 dev_name(&iommu->dev->dev), iommu->cap_ptr);
257 257
258 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 258 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
@@ -435,6 +435,20 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
435} 435}
436 436
437/* 437/*
438 * This function resets the command buffer if the IOMMU stopped fetching
439 * commands from it.
440 */
441void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
442{
443 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
444
445 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
446 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
447
448 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
449}
450
451/*
438 * This function writes the command buffer address to the hardware and 452 * This function writes the command buffer address to the hardware and
439 * enables it. 453 * enables it.
440 */ 454 */
@@ -450,11 +464,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
450 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 464 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
451 &entry, sizeof(entry)); 465 &entry, sizeof(entry));
452 466
453 /* set head and tail to zero manually */ 467 amd_iommu_reset_cmd_buffer(iommu);
454 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
455 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
456
457 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
458} 468}
459 469
460static void __init free_command_buffer(struct amd_iommu *iommu) 470static void __init free_command_buffer(struct amd_iommu *iommu)
@@ -858,7 +868,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
858 switch (*p) { 868 switch (*p) {
859 case ACPI_IVHD_TYPE: 869 case ACPI_IVHD_TYPE:
860 870
861 DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x " 871 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
862 "seg: %d flags: %01x info %04x\n", 872 "seg: %d flags: %01x info %04x\n",
863 PCI_BUS(h->devid), PCI_SLOT(h->devid), 873 PCI_BUS(h->devid), PCI_SLOT(h->devid),
864 PCI_FUNC(h->devid), h->cap_ptr, 874 PCI_FUNC(h->devid), h->cap_ptr,
@@ -902,7 +912,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu)
902 912
903 r = request_irq(iommu->dev->irq, amd_iommu_int_handler, 913 r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
904 IRQF_SAMPLE_RANDOM, 914 IRQF_SAMPLE_RANDOM,
905 "AMD IOMMU", 915 "AMD-Vi",
906 NULL); 916 NULL);
907 917
908 if (r) { 918 if (r) {
@@ -1150,7 +1160,7 @@ int __init amd_iommu_init(void)
1150 1160
1151 1161
1152 if (no_iommu) { 1162 if (no_iommu) {
1153 printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); 1163 printk(KERN_INFO "AMD-Vi disabled by kernel command line\n");
1154 return 0; 1164 return 0;
1155 } 1165 }
1156 1166
@@ -1254,16 +1264,16 @@ int __init amd_iommu_init(void)
1254 if (iommu_pass_through) 1264 if (iommu_pass_through)
1255 goto out; 1265 goto out;
1256 1266
1257 printk(KERN_INFO "AMD IOMMU: device isolation "); 1267 printk(KERN_INFO "AMD-Vi: device isolation ");
1258 if (amd_iommu_isolate) 1268 if (amd_iommu_isolate)
1259 printk("enabled\n"); 1269 printk("enabled\n");
1260 else 1270 else
1261 printk("disabled\n"); 1271 printk("disabled\n");
1262 1272
1263 if (amd_iommu_unmap_flush) 1273 if (amd_iommu_unmap_flush)
1264 printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); 1274 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1265 else 1275 else
1266 printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); 1276 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1267 1277
1268out: 1278out:
1269 return ret; 1279 return ret;