aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorDonald Dutile <ddutile@redhat.com>2012-06-04 17:29:02 -0400
committerIngo Molnar <mingo@kernel.org>2012-06-08 06:15:43 -0400
commit6f5cf52114dd87f9ed091678f7dfc8ff21bbe2b3 (patch)
tree51d0e821a0eae76c34181a1f8286c1480d6bcbc4 /drivers/iommu
parentbf947fcb77ff858f223c49c76e2d130095fa2585 (diff)
iommu/dmar: Reserve mmio space used by the IOMMU, if the BIOS forgets to
Intel-iommu initialization doesn't currently reserve the memory used for the IOMMU registers. This can allow the pci resource allocator to assign a device BAR to the same address as the IOMMU registers. This can cause some not so nice side affects when the driver ioremap's that region. Introduced two helper functions to map & unmap the IOMMU registers as well as simplify the init and exit paths. Signed-off-by: Donald Dutile <ddutile@redhat.com> Acked-by: Chris Wright <chrisw@redhat.com> Cc: iommu@lists.linux-foundation.org Cc: suresh.b.siddha@intel.com Cc: dwmw2@infradead.org Link: http://lkml.kernel.org/r/1338845342-12464-3-git-send-email-ddutile@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/dmar.c111
1 files changed, 84 insertions, 27 deletions
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 1e5a10de3471..9ab6ebf46f7a 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -570,14 +570,89 @@ int __init detect_intel_iommu(void)
570} 570}
571 571
572 572
573static void unmap_iommu(struct intel_iommu *iommu)
574{
575 iounmap(iommu->reg);
576 release_mem_region(iommu->reg_phys, iommu->reg_size);
577}
578
579/**
580 * map_iommu: map the iommu's registers
581 * @iommu: the iommu to map
582 * @phys_addr: the physical address of the base resgister
583 *
584 * Memory map the iommu's registers. Start w/ a single page, and
585 * possibly expand if that turns out to be insufficent.
586 */
587static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
588{
589 int map_size, err=0;
590
591 iommu->reg_phys = phys_addr;
592 iommu->reg_size = VTD_PAGE_SIZE;
593
594 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
595 pr_err("IOMMU: can't reserve memory\n");
596 err = -EBUSY;
597 goto out;
598 }
599
600 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
601 if (!iommu->reg) {
602 pr_err("IOMMU: can't map the region\n");
603 err = -ENOMEM;
604 goto release;
605 }
606
607 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
608 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
609
610 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
611 err = -EINVAL;
612 warn_invalid_dmar(phys_addr, " returns all ones");
613 goto unmap;
614 }
615
616 /* the registers might be more than one page */
617 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
618 cap_max_fault_reg_offset(iommu->cap));
619 map_size = VTD_PAGE_ALIGN(map_size);
620 if (map_size > iommu->reg_size) {
621 iounmap(iommu->reg);
622 release_mem_region(iommu->reg_phys, iommu->reg_size);
623 iommu->reg_size = map_size;
624 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
625 iommu->name)) {
626 pr_err("IOMMU: can't reserve memory\n");
627 err = -EBUSY;
628 goto out;
629 }
630 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
631 if (!iommu->reg) {
632 pr_err("IOMMU: can't map the region\n");
633 err = -ENOMEM;
634 goto release;
635 }
636 }
637 err = 0;
638 goto out;
639
640unmap:
641 iounmap(iommu->reg);
642release:
643 release_mem_region(iommu->reg_phys, iommu->reg_size);
644out:
645 return err;
646}
647
573int alloc_iommu(struct dmar_drhd_unit *drhd) 648int alloc_iommu(struct dmar_drhd_unit *drhd)
574{ 649{
575 struct intel_iommu *iommu; 650 struct intel_iommu *iommu;
576 int map_size;
577 u32 ver; 651 u32 ver;
578 static int iommu_allocated = 0; 652 static int iommu_allocated = 0;
579 int agaw = 0; 653 int agaw = 0;
580 int msagaw = 0; 654 int msagaw = 0;
655 int err;
581 656
582 if (!drhd->reg_base_addr) { 657 if (!drhd->reg_base_addr) {
583 warn_invalid_dmar(0, ""); 658 warn_invalid_dmar(0, "");
@@ -591,19 +666,13 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
591 iommu->seq_id = iommu_allocated++; 666 iommu->seq_id = iommu_allocated++;
592 sprintf (iommu->name, "dmar%d", iommu->seq_id); 667 sprintf (iommu->name, "dmar%d", iommu->seq_id);
593 668
594 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); 669 err = map_iommu(iommu, drhd->reg_base_addr);
595 if (!iommu->reg) { 670 if (err) {
596 pr_err("IOMMU: can't map the region\n"); 671 pr_err("IOMMU: failed to map %s\n", iommu->name);
597 goto error; 672 goto error;
598 } 673 }
599 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
600 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
601
602 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
603 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
604 goto err_unmap;
605 }
606 674
675 err = -EINVAL;
607 agaw = iommu_calculate_agaw(iommu); 676 agaw = iommu_calculate_agaw(iommu);
608 if (agaw < 0) { 677 if (agaw < 0) {
609 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", 678 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
@@ -621,19 +690,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
621 690
622 iommu->node = -1; 691 iommu->node = -1;
623 692
624 /* the registers might be more than one page */
625 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
626 cap_max_fault_reg_offset(iommu->cap));
627 map_size = VTD_PAGE_ALIGN(map_size);
628 if (map_size > VTD_PAGE_SIZE) {
629 iounmap(iommu->reg);
630 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
631 if (!iommu->reg) {
632 pr_err("IOMMU: can't map the region\n");
633 goto error;
634 }
635 }
636
637 ver = readl(iommu->reg + DMAR_VER_REG); 693 ver = readl(iommu->reg + DMAR_VER_REG);
638 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", 694 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
639 iommu->seq_id, 695 iommu->seq_id,
@@ -648,10 +704,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
648 return 0; 704 return 0;
649 705
650 err_unmap: 706 err_unmap:
651 iounmap(iommu->reg); 707 unmap_iommu(iommu);
652 error: 708 error:
653 kfree(iommu); 709 kfree(iommu);
654 return -1; 710 return err;
655} 711}
656 712
657void free_iommu(struct intel_iommu *iommu) 713void free_iommu(struct intel_iommu *iommu)
@@ -662,7 +718,8 @@ void free_iommu(struct intel_iommu *iommu)
662 free_dmar_iommu(iommu); 718 free_dmar_iommu(iommu);
663 719
664 if (iommu->reg) 720 if (iommu->reg)
665 iounmap(iommu->reg); 721 unmap_iommu(iommu);
722
666 kfree(iommu); 723 kfree(iommu);
667} 724}
668 725