diff options
| -rw-r--r-- | drivers/pci/intel-iommu.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index ebc9b8dca881..2314ad7ee5fe 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
| @@ -1505,7 +1505,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
| 1505 | } | 1505 | } |
| 1506 | 1506 | ||
| 1507 | set_bit(num, iommu->domain_ids); | 1507 | set_bit(num, iommu->domain_ids); |
| 1508 | set_bit(iommu->seq_id, &domain->iommu_bmp); | ||
| 1509 | iommu->domains[num] = domain; | 1508 | iommu->domains[num] = domain; |
| 1510 | id = num; | 1509 | id = num; |
| 1511 | } | 1510 | } |
| @@ -1648,6 +1647,14 @@ static int domain_context_mapped(struct pci_dev *pdev) | |||
| 1648 | tmp->devfn); | 1647 | tmp->devfn); |
| 1649 | } | 1648 | } |
| 1650 | 1649 | ||
| 1650 | /* Returns a number of VTD pages, but aligned to MM page size */ | ||
| 1651 | static inline unsigned long aligned_nrpages(unsigned long host_addr, | ||
| 1652 | size_t size) | ||
| 1653 | { | ||
| 1654 | host_addr &= ~PAGE_MASK; | ||
| 1655 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; | ||
| 1656 | } | ||
| 1657 | |||
| 1651 | static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | 1658 | static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
| 1652 | struct scatterlist *sg, unsigned long phys_pfn, | 1659 | struct scatterlist *sg, unsigned long phys_pfn, |
| 1653 | unsigned long nr_pages, int prot) | 1660 | unsigned long nr_pages, int prot) |
| @@ -1675,7 +1682,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
| 1675 | uint64_t tmp; | 1682 | uint64_t tmp; |
| 1676 | 1683 | ||
| 1677 | if (!sg_res) { | 1684 | if (!sg_res) { |
| 1678 | sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; | 1685 | sg_res = aligned_nrpages(sg->offset, sg->length); |
| 1679 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; | 1686 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; |
| 1680 | sg->dma_length = sg->length; | 1687 | sg->dma_length = sg->length; |
| 1681 | pteval = page_to_phys(sg_page(sg)) | prot; | 1688 | pteval = page_to_phys(sg_page(sg)) | prot; |
| @@ -2415,14 +2422,6 @@ error: | |||
| 2415 | return ret; | 2422 | return ret; |
| 2416 | } | 2423 | } |
| 2417 | 2424 | ||
| 2418 | /* Returns a number of VTD pages, but aligned to MM page size */ | ||
| 2419 | static inline unsigned long aligned_nrpages(unsigned long host_addr, | ||
| 2420 | size_t size) | ||
| 2421 | { | ||
| 2422 | host_addr &= ~PAGE_MASK; | ||
| 2423 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; | ||
| 2424 | } | ||
| 2425 | |||
| 2426 | /* This takes a number of _MM_ pages, not VTD pages */ | 2425 | /* This takes a number of _MM_ pages, not VTD pages */ |
| 2427 | static struct iova *intel_alloc_iova(struct device *dev, | 2426 | static struct iova *intel_alloc_iova(struct device *dev, |
| 2428 | struct dmar_domain *domain, | 2427 | struct dmar_domain *domain, |
| @@ -2551,6 +2550,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
| 2551 | int prot = 0; | 2550 | int prot = 0; |
| 2552 | int ret; | 2551 | int ret; |
| 2553 | struct intel_iommu *iommu; | 2552 | struct intel_iommu *iommu; |
| 2553 | unsigned long paddr_pfn = paddr >> PAGE_SHIFT; | ||
| 2554 | 2554 | ||
| 2555 | BUG_ON(dir == DMA_NONE); | 2555 | BUG_ON(dir == DMA_NONE); |
| 2556 | 2556 | ||
| @@ -2585,7 +2585,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
| 2585 | * is not a big problem | 2585 | * is not a big problem |
| 2586 | */ | 2586 | */ |
| 2587 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), | 2587 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), |
| 2588 | paddr >> VTD_PAGE_SHIFT, size, prot); | 2588 | mm_to_dma_pfn(paddr_pfn), size, prot); |
| 2589 | if (ret) | 2589 | if (ret) |
| 2590 | goto error; | 2590 | goto error; |
| 2591 | 2591 | ||
| @@ -2875,7 +2875,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
| 2875 | 2875 | ||
| 2876 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); | 2876 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); |
| 2877 | 2877 | ||
| 2878 | ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); | 2878 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); |
| 2879 | if (unlikely(ret)) { | 2879 | if (unlikely(ret)) { |
| 2880 | /* clear the page */ | 2880 | /* clear the page */ |
| 2881 | dma_pte_clear_range(domain, start_vpfn, | 2881 | dma_pte_clear_range(domain, start_vpfn, |
| @@ -3408,6 +3408,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
| 3408 | 3408 | ||
| 3409 | domain->iommu_count = 0; | 3409 | domain->iommu_count = 0; |
| 3410 | domain->iommu_coherency = 0; | 3410 | domain->iommu_coherency = 0; |
| 3411 | domain->iommu_snooping = 0; | ||
| 3411 | domain->max_addr = 0; | 3412 | domain->max_addr = 0; |
| 3412 | 3413 | ||
| 3413 | /* always allocate the top pgd */ | 3414 | /* always allocate the top pgd */ |
