diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2011-11-10 13:13:51 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2011-12-12 08:55:04 -0500 |
commit | 72e1dcc4192288ad5e37888aa1dbb23b3ef4aa9a (patch) | |
tree | 7e06c2f6ddf2f03dfd6c845f05cf9ce092ceacc2 /drivers/iommu/amd_iommu.c | |
parent | 5abcdba4fa535c29f736455e37229ee97e0e7f5d (diff) |
iommu/amd: Implement notifier for PPR faults
Add a notifer at which a module can attach to get informed
about incoming PPR faults.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 90 |
1 files changed, 89 insertions, 1 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 7ccfc80ceb7a..db9b788c28ba 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -17,6 +17,7 @@ | |||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/ratelimit.h> | ||
20 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
21 | #include <linux/pci-ats.h> | 22 | #include <linux/pci-ats.h> |
22 | #include <linux/bitmap.h> | 23 | #include <linux/bitmap.h> |
@@ -28,6 +29,8 @@ | |||
28 | #include <linux/iommu.h> | 29 | #include <linux/iommu.h> |
29 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
30 | #include <linux/amd-iommu.h> | 31 | #include <linux/amd-iommu.h> |
32 | #include <linux/notifier.h> | ||
33 | #include <linux/export.h> | ||
31 | #include <asm/msidef.h> | 34 | #include <asm/msidef.h> |
32 | #include <asm/proto.h> | 35 | #include <asm/proto.h> |
33 | #include <asm/iommu.h> | 36 | #include <asm/iommu.h> |
@@ -59,6 +62,8 @@ static struct protection_domain *pt_domain; | |||
59 | 62 | ||
60 | static struct iommu_ops amd_iommu_ops; | 63 | static struct iommu_ops amd_iommu_ops; |
61 | 64 | ||
65 | static ATOMIC_NOTIFIER_HEAD(ppr_notifier); | ||
66 | |||
62 | /* | 67 | /* |
63 | * general struct to manage commands send to an IOMMU | 68 | * general struct to manage commands send to an IOMMU |
64 | */ | 69 | */ |
@@ -488,12 +493,82 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
488 | spin_unlock_irqrestore(&iommu->lock, flags); | 493 | spin_unlock_irqrestore(&iommu->lock, flags); |
489 | } | 494 | } |
490 | 495 | ||
496 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) | ||
497 | { | ||
498 | struct amd_iommu_fault fault; | ||
499 | volatile u64 *raw; | ||
500 | int i; | ||
501 | |||
502 | raw = (u64 *)(iommu->ppr_log + head); | ||
503 | |||
504 | /* | ||
505 | * Hardware bug: Interrupt may arrive before the entry is written to | ||
506 | * memory. If this happens we need to wait for the entry to arrive. | ||
507 | */ | ||
508 | for (i = 0; i < LOOP_TIMEOUT; ++i) { | ||
509 | if (PPR_REQ_TYPE(raw[0]) != 0) | ||
510 | break; | ||
511 | udelay(1); | ||
512 | } | ||
513 | |||
514 | if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { | ||
515 | pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); | ||
516 | return; | ||
517 | } | ||
518 | |||
519 | fault.address = raw[1]; | ||
520 | fault.pasid = PPR_PASID(raw[0]); | ||
521 | fault.device_id = PPR_DEVID(raw[0]); | ||
522 | fault.tag = PPR_TAG(raw[0]); | ||
523 | fault.flags = PPR_FLAGS(raw[0]); | ||
524 | |||
525 | /* | ||
526 | * To detect the hardware bug we need to clear the entry | ||
527 | * to back to zero. | ||
528 | */ | ||
529 | raw[0] = raw[1] = 0; | ||
530 | |||
531 | atomic_notifier_call_chain(&ppr_notifier, 0, &fault); | ||
532 | } | ||
533 | |||
534 | static void iommu_poll_ppr_log(struct amd_iommu *iommu) | ||
535 | { | ||
536 | unsigned long flags; | ||
537 | u32 head, tail; | ||
538 | |||
539 | if (iommu->ppr_log == NULL) | ||
540 | return; | ||
541 | |||
542 | spin_lock_irqsave(&iommu->lock, flags); | ||
543 | |||
544 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | ||
545 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | ||
546 | |||
547 | while (head != tail) { | ||
548 | |||
549 | /* Handle PPR entry */ | ||
550 | iommu_handle_ppr_entry(iommu, head); | ||
551 | |||
552 | /* Update and refresh ring-buffer state*/ | ||
553 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; | ||
554 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | ||
555 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | ||
556 | } | ||
557 | |||
558 | /* enable ppr interrupts again */ | ||
559 | writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
560 | |||
561 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
562 | } | ||
563 | |||
491 | irqreturn_t amd_iommu_int_thread(int irq, void *data) | 564 | irqreturn_t amd_iommu_int_thread(int irq, void *data) |
492 | { | 565 | { |
493 | struct amd_iommu *iommu; | 566 | struct amd_iommu *iommu; |
494 | 567 | ||
495 | for_each_iommu(iommu) | 568 | for_each_iommu(iommu) { |
496 | iommu_poll_events(iommu); | 569 | iommu_poll_events(iommu); |
570 | iommu_poll_ppr_log(iommu); | ||
571 | } | ||
497 | 572 | ||
498 | return IRQ_HANDLED; | 573 | return IRQ_HANDLED; |
499 | } | 574 | } |
@@ -2888,3 +2963,16 @@ int __init amd_iommu_init_passthrough(void) | |||
2888 | 2963 | ||
2889 | return 0; | 2964 | return 0; |
2890 | } | 2965 | } |
2966 | |||
2967 | /* IOMMUv2 specific functions */ | ||
2968 | int amd_iommu_register_ppr_notifier(struct notifier_block *nb) | ||
2969 | { | ||
2970 | return atomic_notifier_chain_register(&ppr_notifier, nb); | ||
2971 | } | ||
2972 | EXPORT_SYMBOL(amd_iommu_register_ppr_notifier); | ||
2973 | |||
2974 | int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb) | ||
2975 | { | ||
2976 | return atomic_notifier_chain_unregister(&ppr_notifier, nb); | ||
2977 | } | ||
2978 | EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier); | ||