diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2009-08-08 06:25:28 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-08-08 06:26:15 -0400 |
commit | a131bc185528331451a93db6c50a7d2070376a61 (patch) | |
tree | 18cccd206d4835ee8df147ac3b0c0e30cc00680d /drivers/pci/intel-iommu.c | |
parent | 19943b0e30b05d42e494ae6fef78156ebc8c637e (diff) | |
parent | ff1649ff780fb7c0bfbf42d05ffc9b56336b9aa3 (diff) |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Pull fixes in from 2.6.31 so that people testing the iommu-2.6.git tree
no longer trip over bugs which were already fixed (sorry, Horms).
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 28 |
1 files changed, 16 insertions, 12 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 147b3b960b61..3f256b8d83c1 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -1503,7 +1503,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1503 | } | 1503 | } |
1504 | 1504 | ||
1505 | set_bit(num, iommu->domain_ids); | 1505 | set_bit(num, iommu->domain_ids); |
1506 | set_bit(iommu->seq_id, &domain->iommu_bmp); | ||
1507 | iommu->domains[num] = domain; | 1506 | iommu->domains[num] = domain; |
1508 | id = num; | 1507 | id = num; |
1509 | } | 1508 | } |
@@ -1646,6 +1645,14 @@ static int domain_context_mapped(struct pci_dev *pdev) | |||
1646 | tmp->devfn); | 1645 | tmp->devfn); |
1647 | } | 1646 | } |
1648 | 1647 | ||
1648 | /* Returns a number of VTD pages, but aligned to MM page size */ | ||
1649 | static inline unsigned long aligned_nrpages(unsigned long host_addr, | ||
1650 | size_t size) | ||
1651 | { | ||
1652 | host_addr &= ~PAGE_MASK; | ||
1653 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; | ||
1654 | } | ||
1655 | |||
1649 | static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | 1656 | static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
1650 | struct scatterlist *sg, unsigned long phys_pfn, | 1657 | struct scatterlist *sg, unsigned long phys_pfn, |
1651 | unsigned long nr_pages, int prot) | 1658 | unsigned long nr_pages, int prot) |
@@ -1673,7 +1680,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
1673 | uint64_t tmp; | 1680 | uint64_t tmp; |
1674 | 1681 | ||
1675 | if (!sg_res) { | 1682 | if (!sg_res) { |
1676 | sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; | 1683 | sg_res = aligned_nrpages(sg->offset, sg->length); |
1677 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; | 1684 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; |
1678 | sg->dma_length = sg->length; | 1685 | sg->dma_length = sg->length; |
1679 | pteval = page_to_phys(sg_page(sg)) | prot; | 1686 | pteval = page_to_phys(sg_page(sg)) | prot; |
@@ -2389,14 +2396,6 @@ error: | |||
2389 | return ret; | 2396 | return ret; |
2390 | } | 2397 | } |
2391 | 2398 | ||
2392 | /* Returns a number of VTD pages, but aligned to MM page size */ | ||
2393 | static inline unsigned long aligned_nrpages(unsigned long host_addr, | ||
2394 | size_t size) | ||
2395 | { | ||
2396 | host_addr &= ~PAGE_MASK; | ||
2397 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; | ||
2398 | } | ||
2399 | |||
2400 | /* This takes a number of _MM_ pages, not VTD pages */ | 2399 | /* This takes a number of _MM_ pages, not VTD pages */ |
2401 | static struct iova *intel_alloc_iova(struct device *dev, | 2400 | static struct iova *intel_alloc_iova(struct device *dev, |
2402 | struct dmar_domain *domain, | 2401 | struct dmar_domain *domain, |
@@ -2539,6 +2538,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2539 | int prot = 0; | 2538 | int prot = 0; |
2540 | int ret; | 2539 | int ret; |
2541 | struct intel_iommu *iommu; | 2540 | struct intel_iommu *iommu; |
2541 | unsigned long paddr_pfn = paddr >> PAGE_SHIFT; | ||
2542 | 2542 | ||
2543 | BUG_ON(dir == DMA_NONE); | 2543 | BUG_ON(dir == DMA_NONE); |
2544 | 2544 | ||
@@ -2573,7 +2573,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2573 | * is not a big problem | 2573 | * is not a big problem |
2574 | */ | 2574 | */ |
2575 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), | 2575 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), |
2576 | paddr >> VTD_PAGE_SHIFT, size, prot); | 2576 | mm_to_dma_pfn(paddr_pfn), size, prot); |
2577 | if (ret) | 2577 | if (ret) |
2578 | goto error; | 2578 | goto error; |
2579 | 2579 | ||
@@ -2864,7 +2864,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2864 | 2864 | ||
2865 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); | 2865 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); |
2866 | 2866 | ||
2867 | ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); | 2867 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); |
2868 | if (unlikely(ret)) { | 2868 | if (unlikely(ret)) { |
2869 | /* clear the page */ | 2869 | /* clear the page */ |
2870 | dma_pte_clear_range(domain, start_vpfn, | 2870 | dma_pte_clear_range(domain, start_vpfn, |
@@ -3390,6 +3390,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
3390 | 3390 | ||
3391 | domain->iommu_count = 0; | 3391 | domain->iommu_count = 0; |
3392 | domain->iommu_coherency = 0; | 3392 | domain->iommu_coherency = 0; |
3393 | domain->iommu_snooping = 0; | ||
3393 | domain->max_addr = 0; | 3394 | domain->max_addr = 0; |
3394 | 3395 | ||
3395 | /* always allocate the top pgd */ | 3396 | /* always allocate the top pgd */ |
@@ -3582,6 +3583,9 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain, | |||
3582 | { | 3583 | { |
3583 | struct dmar_domain *dmar_domain = domain->priv; | 3584 | struct dmar_domain *dmar_domain = domain->priv; |
3584 | 3585 | ||
3586 | if (!size) | ||
3587 | return; | ||
3588 | |||
3585 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 3589 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
3586 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 3590 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
3587 | 3591 | ||