diff options
Diffstat (limited to 'drivers/pci/dmar.c')
-rw-r--r-- | drivers/pci/dmar.c | 150 |
1 files changed, 150 insertions, 0 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 127764cfbe27..aba151ca6d26 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
30 | #include <linux/dmar.h> | 30 | #include <linux/dmar.h> |
31 | #include <linux/timer.h> | ||
31 | #include "iova.h" | 32 | #include "iova.h" |
32 | #include "intel-iommu.h" | 33 | #include "intel-iommu.h" |
33 | 34 | ||
@@ -509,3 +510,152 @@ void free_iommu(struct intel_iommu *iommu) | |||
509 | iounmap(iommu->reg); | 510 | iounmap(iommu->reg); |
510 | kfree(iommu); | 511 | kfree(iommu); |
511 | } | 512 | } |
513 | |||
514 | /* | ||
515 | * Reclaim all the submitted descriptors which have completed its work. | ||
516 | */ | ||
517 | static inline void reclaim_free_desc(struct q_inval *qi) | ||
518 | { | ||
519 | while (qi->desc_status[qi->free_tail] == QI_DONE) { | ||
520 | qi->desc_status[qi->free_tail] = QI_FREE; | ||
521 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; | ||
522 | qi->free_cnt++; | ||
523 | } | ||
524 | } | ||
525 | |||
526 | /* | ||
527 | * Submit the queued invalidation descriptor to the remapping | ||
528 | * hardware unit and wait for its completion. | ||
529 | */ | ||
530 | void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | ||
531 | { | ||
532 | struct q_inval *qi = iommu->qi; | ||
533 | struct qi_desc *hw, wait_desc; | ||
534 | int wait_index, index; | ||
535 | unsigned long flags; | ||
536 | |||
537 | if (!qi) | ||
538 | return; | ||
539 | |||
540 | hw = qi->desc; | ||
541 | |||
542 | spin_lock(&qi->q_lock); | ||
543 | while (qi->free_cnt < 3) { | ||
544 | spin_unlock(&qi->q_lock); | ||
545 | cpu_relax(); | ||
546 | spin_lock(&qi->q_lock); | ||
547 | } | ||
548 | |||
549 | index = qi->free_head; | ||
550 | wait_index = (index + 1) % QI_LENGTH; | ||
551 | |||
552 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; | ||
553 | |||
554 | hw[index] = *desc; | ||
555 | |||
556 | wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | ||
557 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); | ||
558 | |||
559 | hw[wait_index] = wait_desc; | ||
560 | |||
561 | __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); | ||
562 | __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); | ||
563 | |||
564 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | ||
565 | qi->free_cnt -= 2; | ||
566 | |||
567 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
568 | /* | ||
569 | * update the HW tail register indicating the presence of | ||
570 | * new descriptors. | ||
571 | */ | ||
572 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | ||
573 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
574 | |||
575 | while (qi->desc_status[wait_index] != QI_DONE) { | ||
576 | spin_unlock(&qi->q_lock); | ||
577 | cpu_relax(); | ||
578 | spin_lock(&qi->q_lock); | ||
579 | } | ||
580 | |||
581 | qi->desc_status[index] = QI_DONE; | ||
582 | |||
583 | reclaim_free_desc(qi); | ||
584 | spin_unlock(&qi->q_lock); | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * Flush the global interrupt entry cache. | ||
589 | */ | ||
590 | void qi_global_iec(struct intel_iommu *iommu) | ||
591 | { | ||
592 | struct qi_desc desc; | ||
593 | |||
594 | desc.low = QI_IEC_TYPE; | ||
595 | desc.high = 0; | ||
596 | |||
597 | qi_submit_sync(&desc, iommu); | ||
598 | } | ||
599 | |||
600 | /* | ||
601 | * Enable Queued Invalidation interface. This is a must to support | ||
602 | * interrupt-remapping. Also used by DMA-remapping, which replaces | ||
603 | * register based IOTLB invalidation. | ||
604 | */ | ||
605 | int dmar_enable_qi(struct intel_iommu *iommu) | ||
606 | { | ||
607 | u32 cmd, sts; | ||
608 | unsigned long flags; | ||
609 | struct q_inval *qi; | ||
610 | |||
611 | if (!ecap_qis(iommu->ecap)) | ||
612 | return -ENOENT; | ||
613 | |||
614 | /* | ||
615 | * queued invalidation is already setup and enabled. | ||
616 | */ | ||
617 | if (iommu->qi) | ||
618 | return 0; | ||
619 | |||
620 | iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); | ||
621 | if (!iommu->qi) | ||
622 | return -ENOMEM; | ||
623 | |||
624 | qi = iommu->qi; | ||
625 | |||
626 | qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); | ||
627 | if (!qi->desc) { | ||
628 | kfree(qi); | ||
629 | iommu->qi = 0; | ||
630 | return -ENOMEM; | ||
631 | } | ||
632 | |||
633 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); | ||
634 | if (!qi->desc_status) { | ||
635 | free_page((unsigned long) qi->desc); | ||
636 | kfree(qi); | ||
637 | iommu->qi = 0; | ||
638 | return -ENOMEM; | ||
639 | } | ||
640 | |||
641 | qi->free_head = qi->free_tail = 0; | ||
642 | qi->free_cnt = QI_LENGTH; | ||
643 | |||
644 | spin_lock_init(&qi->q_lock); | ||
645 | |||
646 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
647 | /* write zero to the tail reg */ | ||
648 | writel(0, iommu->reg + DMAR_IQT_REG); | ||
649 | |||
650 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); | ||
651 | |||
652 | cmd = iommu->gcmd | DMA_GCMD_QIE; | ||
653 | iommu->gcmd |= DMA_GCMD_QIE; | ||
654 | writel(cmd, iommu->reg + DMAR_GCMD_REG); | ||
655 | |||
656 | /* Make sure hardware complete it */ | ||
657 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | ||
658 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
659 | |||
660 | return 0; | ||
661 | } | ||