diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2009-06-27 11:21:20 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-06-29 07:38:11 -0400 |
commit | dd4e831960e4f0214480fa96a53ca9bb7dd04927 (patch) | |
tree | 740ef8b4c65d9314d337b618900cf22fdfcbdb17 /drivers/pci/intel-iommu.c | |
parent | c7ab48d2acaf959e4d59c3f55d12fdb7ca9afd7c (diff) |
intel-iommu: Change dma_set_pte_addr() to dma_set_pte_pfn()
Add some helpers for converting between VT-d and normal system pfns,
since system pages can be larger than VT-d pages.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 28 |
1 files changed, 24 insertions, 4 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index fc121967cb5b..852f40a913d4 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -65,6 +65,26 @@ | |||
65 | #define PHYSICAL_PAGE_MASK PAGE_MASK | 65 | #define PHYSICAL_PAGE_MASK PAGE_MASK |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | /* VT-d pages must always be _smaller_ than MM pages. Otherwise things | ||
69 | are never going to work. */ | ||
70 | static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) | ||
71 | { | ||
72 | return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT); | ||
73 | } | ||
74 | |||
75 | static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn) | ||
76 | { | ||
77 | return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); | ||
78 | } | ||
79 | static inline unsigned long page_to_dma_pfn(struct page *pg) | ||
80 | { | ||
81 | return mm_to_dma_pfn(page_to_pfn(pg)); | ||
82 | } | ||
83 | static inline unsigned long virt_to_dma_pfn(void *p) | ||
84 | { | ||
85 | return page_to_dma_pfn(virt_to_page(p)); | ||
86 | } | ||
87 | |||
68 | /* global iommu list, set NULL for ignored DMAR units */ | 88 | /* global iommu list, set NULL for ignored DMAR units */ |
69 | static struct intel_iommu **g_iommus; | 89 | static struct intel_iommu **g_iommus; |
70 | 90 | ||
@@ -207,9 +227,9 @@ static inline u64 dma_pte_addr(struct dma_pte *pte) | |||
207 | return (pte->val & VTD_PAGE_MASK); | 227 | return (pte->val & VTD_PAGE_MASK); |
208 | } | 228 | } |
209 | 229 | ||
210 | static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) | 230 | static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn) |
211 | { | 231 | { |
212 | pte->val |= (addr & VTD_PAGE_MASK); | 232 | pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT; |
213 | } | 233 | } |
214 | 234 | ||
215 | static inline bool dma_pte_present(struct dma_pte *pte) | 235 | static inline bool dma_pte_present(struct dma_pte *pte) |
@@ -702,7 +722,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
702 | return NULL; | 722 | return NULL; |
703 | } | 723 | } |
704 | domain_flush_cache(domain, tmp_page, PAGE_SIZE); | 724 | domain_flush_cache(domain, tmp_page, PAGE_SIZE); |
705 | dma_set_pte_addr(pte, virt_to_phys(tmp_page)); | 725 | dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page)); |
706 | /* | 726 | /* |
707 | * high level table always sets r/w, last level page | 727 | * high level table always sets r/w, last level page |
708 | * table control read/write | 728 | * table control read/write |
@@ -1648,7 +1668,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1648 | * touches the iova range | 1668 | * touches the iova range |
1649 | */ | 1669 | */ |
1650 | BUG_ON(dma_pte_addr(pte)); | 1670 | BUG_ON(dma_pte_addr(pte)); |
1651 | dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); | 1671 | dma_set_pte_pfn(pte, start_pfn); |
1652 | dma_set_pte_prot(pte, prot); | 1672 | dma_set_pte_prot(pte, prot); |
1653 | if (prot & DMA_PTE_SNP) | 1673 | if (prot & DMA_PTE_SNP) |
1654 | dma_set_pte_snp(pte); | 1674 | dma_set_pte_snp(pte); |