aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-02 02:17:56 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-02 02:17:56 -0400
commit936e894a976dd3b0f07f1f6f43c17b77b7e6146d (patch)
tree5ed5c1f6735dcd26550594df23c8f7fe2aa21a15 /drivers/pci/intel-iommu.c
parent69575d388603365f2afbf4166df93152df59b165 (diff)
parent326ba5010a5429a5a528b268b36a5900d4ab0eba (diff)
Merge commit 'v2.6.31-rc8' into x86/txt
Conflicts: arch/x86/kernel/reboot.c security/Kconfig Merge reason: resolve the conflicts, bump up from rc3 to rc8. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 833509b53527..562221e11917 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1506,7 +1506,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1506 } 1506 }
1507 1507
1508 set_bit(num, iommu->domain_ids); 1508 set_bit(num, iommu->domain_ids);
1509 set_bit(iommu->seq_id, &domain->iommu_bmp);
1510 iommu->domains[num] = domain; 1509 iommu->domains[num] = domain;
1511 id = num; 1510 id = num;
1512 } 1511 }
@@ -1649,6 +1648,14 @@ static int domain_context_mapped(struct pci_dev *pdev)
1649 tmp->devfn); 1648 tmp->devfn);
1650} 1649}
1651 1650
1651/* Returns a number of VTD pages, but aligned to MM page size */
1652static inline unsigned long aligned_nrpages(unsigned long host_addr,
1653 size_t size)
1654{
1655 host_addr &= ~PAGE_MASK;
1656 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1657}
1658
1652static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 1659static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1653 struct scatterlist *sg, unsigned long phys_pfn, 1660 struct scatterlist *sg, unsigned long phys_pfn,
1654 unsigned long nr_pages, int prot) 1661 unsigned long nr_pages, int prot)
@@ -1676,7 +1683,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1676 uint64_t tmp; 1683 uint64_t tmp;
1677 1684
1678 if (!sg_res) { 1685 if (!sg_res) {
1679 sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; 1686 sg_res = aligned_nrpages(sg->offset, sg->length);
1680 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 1687 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1681 sg->dma_length = sg->length; 1688 sg->dma_length = sg->length;
1682 pteval = page_to_phys(sg_page(sg)) | prot; 1689 pteval = page_to_phys(sg_page(sg)) | prot;
@@ -2416,14 +2423,6 @@ error:
2416 return ret; 2423 return ret;
2417} 2424}
2418 2425
2419/* Returns a number of VTD pages, but aligned to MM page size */
2420static inline unsigned long aligned_nrpages(unsigned long host_addr,
2421 size_t size)
2422{
2423 host_addr &= ~PAGE_MASK;
2424 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2425}
2426
2427/* This takes a number of _MM_ pages, not VTD pages */ 2426/* This takes a number of _MM_ pages, not VTD pages */
2428static struct iova *intel_alloc_iova(struct device *dev, 2427static struct iova *intel_alloc_iova(struct device *dev,
2429 struct dmar_domain *domain, 2428 struct dmar_domain *domain,
@@ -2552,6 +2551,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2552 int prot = 0; 2551 int prot = 0;
2553 int ret; 2552 int ret;
2554 struct intel_iommu *iommu; 2553 struct intel_iommu *iommu;
2554 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2555 2555
2556 BUG_ON(dir == DMA_NONE); 2556 BUG_ON(dir == DMA_NONE);
2557 2557
@@ -2586,7 +2586,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2586 * is not a big problem 2586 * is not a big problem
2587 */ 2587 */
2588 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), 2588 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2589 paddr >> VTD_PAGE_SHIFT, size, prot); 2589 mm_to_dma_pfn(paddr_pfn), size, prot);
2590 if (ret) 2590 if (ret)
2591 goto error; 2591 goto error;
2592 2592
@@ -2876,7 +2876,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2876 2876
2877 start_vpfn = mm_to_dma_pfn(iova->pfn_lo); 2877 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2878 2878
2879 ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); 2879 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
2880 if (unlikely(ret)) { 2880 if (unlikely(ret)) {
2881 /* clear the page */ 2881 /* clear the page */
2882 dma_pte_clear_range(domain, start_vpfn, 2882 dma_pte_clear_range(domain, start_vpfn,
@@ -3421,6 +3421,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
3421 3421
3422 domain->iommu_count = 0; 3422 domain->iommu_count = 0;
3423 domain->iommu_coherency = 0; 3423 domain->iommu_coherency = 0;
3424 domain->iommu_snooping = 0;
3424 domain->max_addr = 0; 3425 domain->max_addr = 0;
3425 3426
3426 /* always allocate the top pgd */ 3427 /* always allocate the top pgd */