aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h6
-rw-r--r--arch/x86/kernel/amd_iommu.c109
-rw-r--r--arch/x86/kernel/amd_iommu_init.c20
4 files changed, 111 insertions, 25 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 13ffa5df37d7..1d9c18aa17eb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -586,7 +586,6 @@ config GART_IOMMU
586 bool "GART IOMMU support" if EMBEDDED 586 bool "GART IOMMU support" if EMBEDDED
587 default y 587 default y
588 select SWIOTLB 588 select SWIOTLB
589 select AGP
590 depends on X86_64 && PCI 589 depends on X86_64 && PCI
591 ---help--- 590 ---help---
592 Support for full DMA access of devices with 32bit memory access only 591 Support for full DMA access of devices with 32bit memory access only
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 106e1305ef86..3a6a3259e1eb 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -337,6 +337,9 @@ struct amd_iommu {
337 /* if one, we need to send a completion wait command */ 337 /* if one, we need to send a completion wait command */
338 bool need_sync; 338 bool need_sync;
339 339
340 /* becomes true if a command buffer reset is running */
341 bool reset_in_progress;
342
340 /* default dma_ops domain for that IOMMU */ 343 /* default dma_ops domain for that IOMMU */
341 struct dma_ops_domain *default_dom; 344 struct dma_ops_domain *default_dom;
342}; 345};
@@ -457,4 +460,7 @@ static inline void amd_iommu_stats_init(void) { }
457 460
458#endif /* CONFIG_AMD_IOMMU_STATS */ 461#endif /* CONFIG_AMD_IOMMU_STATS */
459 462
463/* some function prototypes */
464extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
465
460#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 466#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 009d722af009..8c93b7c7735e 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -59,6 +59,7 @@ static u64* alloc_pte(struct protection_domain *dom,
59static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, 59static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
60 unsigned long start_page, 60 unsigned long start_page,
61 unsigned int pages); 61 unsigned int pages);
62static void reset_iommu_command_buffer(struct amd_iommu *iommu);
62 63
63#ifdef CONFIG_AMD_IOMMU_STATS 64#ifdef CONFIG_AMD_IOMMU_STATS
64 65
@@ -132,7 +133,25 @@ static int iommu_has_npcache(struct amd_iommu *iommu)
132 * 133 *
133 ****************************************************************************/ 134 ****************************************************************************/
134 135
135static void iommu_print_event(void *__evt) 136static void dump_dte_entry(u16 devid)
137{
138 int i;
139
140 for (i = 0; i < 8; ++i)
141 pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
142 amd_iommu_dev_table[devid].data[i]);
143}
144
145static void dump_command(unsigned long phys_addr)
146{
147 struct iommu_cmd *cmd = phys_to_virt(phys_addr);
148 int i;
149
150 for (i = 0; i < 4; ++i)
151 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
152}
153
154static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
136{ 155{
137 u32 *event = __evt; 156 u32 *event = __evt;
138 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; 157 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
@@ -149,6 +168,7 @@ static void iommu_print_event(void *__evt)
149 "address=0x%016llx flags=0x%04x]\n", 168 "address=0x%016llx flags=0x%04x]\n",
150 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), 169 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
151 address, flags); 170 address, flags);
171 dump_dte_entry(devid);
152 break; 172 break;
153 case EVENT_TYPE_IO_FAULT: 173 case EVENT_TYPE_IO_FAULT:
154 printk("IO_PAGE_FAULT device=%02x:%02x.%x " 174 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
@@ -170,6 +190,8 @@ static void iommu_print_event(void *__evt)
170 break; 190 break;
171 case EVENT_TYPE_ILL_CMD: 191 case EVENT_TYPE_ILL_CMD:
172 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); 192 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
193 reset_iommu_command_buffer(iommu);
194 dump_command(address);
173 break; 195 break;
174 case EVENT_TYPE_CMD_HARD_ERR: 196 case EVENT_TYPE_CMD_HARD_ERR:
175 printk("COMMAND_HARDWARE_ERROR address=0x%016llx " 197 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
@@ -203,7 +225,7 @@ static void iommu_poll_events(struct amd_iommu *iommu)
203 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 225 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
204 226
205 while (head != tail) { 227 while (head != tail) {
206 iommu_print_event(iommu->evt_buf + head); 228 iommu_print_event(iommu, iommu->evt_buf + head);
207 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; 229 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
208 } 230 }
209 231
@@ -290,8 +312,11 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu)
290 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; 312 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
291 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); 313 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
292 314
293 if (unlikely(i == EXIT_LOOP_COUNT)) 315 if (unlikely(i == EXIT_LOOP_COUNT)) {
294 panic("AMD IOMMU: Completion wait loop failed\n"); 316 spin_unlock(&iommu->lock);
317 reset_iommu_command_buffer(iommu);
318 spin_lock(&iommu->lock);
319 }
295} 320}
296 321
297/* 322/*
@@ -439,37 +464,67 @@ static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
439} 464}
440 465
441/* 466/*
467 * This function flushes one domain on one IOMMU
468 */
469static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid)
470{
471 struct iommu_cmd cmd;
472 unsigned long flags;
473
474 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
475 domid, 1, 1);
476
477 spin_lock_irqsave(&iommu->lock, flags);
478 __iommu_queue_command(iommu, &cmd);
479 __iommu_completion_wait(iommu);
480 __iommu_wait_for_completion(iommu);
481 spin_unlock_irqrestore(&iommu->lock, flags);
482}
483
484static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
485{
486 int i;
487
488 for (i = 1; i < MAX_DOMAIN_ID; ++i) {
489 if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
490 continue;
491 flush_domain_on_iommu(iommu, i);
492 }
493
494}
495
496/*
442 * This function is used to flush the IO/TLB for a given protection domain 497 * This function is used to flush the IO/TLB for a given protection domain
443 * on every IOMMU in the system 498 * on every IOMMU in the system
444 */ 499 */
445static void iommu_flush_domain(u16 domid) 500static void iommu_flush_domain(u16 domid)
446{ 501{
447 unsigned long flags;
448 struct amd_iommu *iommu; 502 struct amd_iommu *iommu;
449 struct iommu_cmd cmd;
450 503
451 INC_STATS_COUNTER(domain_flush_all); 504 INC_STATS_COUNTER(domain_flush_all);
452 505
453 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 506 for_each_iommu(iommu)
454 domid, 1, 1); 507 flush_domain_on_iommu(iommu, domid);
455
456 for_each_iommu(iommu) {
457 spin_lock_irqsave(&iommu->lock, flags);
458 __iommu_queue_command(iommu, &cmd);
459 __iommu_completion_wait(iommu);
460 __iommu_wait_for_completion(iommu);
461 spin_unlock_irqrestore(&iommu->lock, flags);
462 }
463} 508}
464 509
465void amd_iommu_flush_all_domains(void) 510void amd_iommu_flush_all_domains(void)
466{ 511{
512 struct amd_iommu *iommu;
513
514 for_each_iommu(iommu)
515 flush_all_domains_on_iommu(iommu);
516}
517
518static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
519{
467 int i; 520 int i;
468 521
469 for (i = 1; i < MAX_DOMAIN_ID; ++i) { 522 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
470 if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) 523 if (iommu != amd_iommu_rlookup_table[i])
471 continue; 524 continue;
472 iommu_flush_domain(i); 525
526 iommu_queue_inv_dev_entry(iommu, i);
527 iommu_completion_wait(iommu);
473 } 528 }
474} 529}
475 530
@@ -489,6 +544,22 @@ void amd_iommu_flush_all_devices(void)
489 } 544 }
490} 545}
491 546
547static void reset_iommu_command_buffer(struct amd_iommu *iommu)
548{
549 pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
550
551 if (iommu->reset_in_progress)
552 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
553
554 iommu->reset_in_progress = true;
555
556 amd_iommu_reset_cmd_buffer(iommu);
557 flush_all_devices_for_iommu(iommu);
558 flush_all_domains_on_iommu(iommu);
559
560 iommu->reset_in_progress = false;
561}
562
492/**************************************************************************** 563/****************************************************************************
493 * 564 *
494 * The functions below are used the create the page table mappings for 565 * The functions below are used the create the page table mappings for
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 264b3ef9dd6e..779ace292475 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -435,6 +435,20 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
435} 435}
436 436
437/* 437/*
438 * This function resets the command buffer if the IOMMU stopped fetching
439 * commands from it.
440 */
441void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
442{
443 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
444
445 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
446 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
447
448 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
449}
450
451/*
438 * This function writes the command buffer address to the hardware and 452 * This function writes the command buffer address to the hardware and
439 * enables it. 453 * enables it.
440 */ 454 */
@@ -450,11 +464,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
450 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 464 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
451 &entry, sizeof(entry)); 465 &entry, sizeof(entry));
452 466
453 /* set head and tail to zero manually */ 467 amd_iommu_reset_cmd_buffer(iommu);
454 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
455 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
456
457 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
458} 468}
459 469
460static void __init free_command_buffer(struct amd_iommu *iommu) 470static void __init free_command_buffer(struct amd_iommu *iommu)