diff options
author | Mark McLoughlin <markmc@redhat.com> | 2008-11-21 11:56:53 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-01-03 05:57:36 -0500 |
commit | 19c239ce3d089fee339d1ab7e97b43d6f0557ce5 (patch) | |
tree | 712a8dd5e3a42d2e4c2a4514faa649a11e50a658 /drivers/pci/intel-iommu.c | |
parent | c07e7d217bef198422b7eface456ecfd4bb1ab87 (diff) |
intel-iommu: trivially inline DMA PTE macros
Signed-off-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 71 |
1 files changed, 48 insertions, 23 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 3be931b3bf98..213a5c87fde2 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -164,16 +164,41 @@ static inline void context_clear_entry(struct context_entry *context) | |||
164 | struct dma_pte { | 164 | struct dma_pte { |
165 | u64 val; | 165 | u64 val; |
166 | }; | 166 | }; |
167 | #define dma_clear_pte(p) do {(p).val = 0;} while (0) | ||
168 | 167 | ||
169 | #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) | 168 | static inline void dma_clear_pte(struct dma_pte *pte) |
170 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) | 169 | { |
171 | #define dma_set_pte_prot(p, prot) \ | 170 | pte->val = 0; |
172 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) | 171 | } |
173 | #define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK) | 172 | |
174 | #define dma_set_pte_addr(p, addr) do {\ | 173 | static inline void dma_set_pte_readable(struct dma_pte *pte) |
175 | (p).val |= ((addr) & VTD_PAGE_MASK); } while (0) | 174 | { |
176 | #define dma_pte_present(p) (((p).val & 3) != 0) | 175 | pte->val |= DMA_PTE_READ; |
176 | } | ||
177 | |||
178 | static inline void dma_set_pte_writable(struct dma_pte *pte) | ||
179 | { | ||
180 | pte->val |= DMA_PTE_WRITE; | ||
181 | } | ||
182 | |||
183 | static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) | ||
184 | { | ||
185 | pte->val = (pte->val & ~3) | (prot & 3); | ||
186 | } | ||
187 | |||
188 | static inline u64 dma_pte_addr(struct dma_pte *pte) | ||
189 | { | ||
190 | return (pte->val & VTD_PAGE_MASK); | ||
191 | } | ||
192 | |||
193 | static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) | ||
194 | { | ||
195 | pte->val |= (addr & VTD_PAGE_MASK); | ||
196 | } | ||
197 | |||
198 | static inline bool dma_pte_present(struct dma_pte *pte) | ||
199 | { | ||
200 | return (pte->val & 3) != 0; | ||
201 | } | ||
177 | 202 | ||
178 | struct dmar_domain { | 203 | struct dmar_domain { |
179 | int id; /* domain id */ | 204 | int id; /* domain id */ |
@@ -487,7 +512,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
487 | if (level == 1) | 512 | if (level == 1) |
488 | break; | 513 | break; |
489 | 514 | ||
490 | if (!dma_pte_present(*pte)) { | 515 | if (!dma_pte_present(pte)) { |
491 | tmp_page = alloc_pgtable_page(); | 516 | tmp_page = alloc_pgtable_page(); |
492 | 517 | ||
493 | if (!tmp_page) { | 518 | if (!tmp_page) { |
@@ -497,16 +522,16 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
497 | } | 522 | } |
498 | __iommu_flush_cache(domain->iommu, tmp_page, | 523 | __iommu_flush_cache(domain->iommu, tmp_page, |
499 | PAGE_SIZE); | 524 | PAGE_SIZE); |
500 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); | 525 | dma_set_pte_addr(pte, virt_to_phys(tmp_page)); |
501 | /* | 526 | /* |
502 | * high level table always sets r/w, last level page | 527 | * high level table always sets r/w, last level page |
503 | * table control read/write | 528 | * table control read/write |
504 | */ | 529 | */ |
505 | dma_set_pte_readable(*pte); | 530 | dma_set_pte_readable(pte); |
506 | dma_set_pte_writable(*pte); | 531 | dma_set_pte_writable(pte); |
507 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 532 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); |
508 | } | 533 | } |
509 | parent = phys_to_virt(dma_pte_addr(*pte)); | 534 | parent = phys_to_virt(dma_pte_addr(pte)); |
510 | level--; | 535 | level--; |
511 | } | 536 | } |
512 | 537 | ||
@@ -529,9 +554,9 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, | |||
529 | if (level == total) | 554 | if (level == total) |
530 | return pte; | 555 | return pte; |
531 | 556 | ||
532 | if (!dma_pte_present(*pte)) | 557 | if (!dma_pte_present(pte)) |
533 | break; | 558 | break; |
534 | parent = phys_to_virt(dma_pte_addr(*pte)); | 559 | parent = phys_to_virt(dma_pte_addr(pte)); |
535 | total--; | 560 | total--; |
536 | } | 561 | } |
537 | return NULL; | 562 | return NULL; |
@@ -546,7 +571,7 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) | |||
546 | pte = dma_addr_level_pte(domain, addr, 1); | 571 | pte = dma_addr_level_pte(domain, addr, 1); |
547 | 572 | ||
548 | if (pte) { | 573 | if (pte) { |
549 | dma_clear_pte(*pte); | 574 | dma_clear_pte(pte); |
550 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 575 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); |
551 | } | 576 | } |
552 | } | 577 | } |
@@ -593,8 +618,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
593 | pte = dma_addr_level_pte(domain, tmp, level); | 618 | pte = dma_addr_level_pte(domain, tmp, level); |
594 | if (pte) { | 619 | if (pte) { |
595 | free_pgtable_page( | 620 | free_pgtable_page( |
596 | phys_to_virt(dma_pte_addr(*pte))); | 621 | phys_to_virt(dma_pte_addr(pte))); |
597 | dma_clear_pte(*pte); | 622 | dma_clear_pte(pte); |
598 | __iommu_flush_cache(domain->iommu, | 623 | __iommu_flush_cache(domain->iommu, |
599 | pte, sizeof(*pte)); | 624 | pte, sizeof(*pte)); |
600 | } | 625 | } |
@@ -1421,9 +1446,9 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1421 | /* We don't need lock here, nobody else | 1446 | /* We don't need lock here, nobody else |
1422 | * touches the iova range | 1447 | * touches the iova range |
1423 | */ | 1448 | */ |
1424 | BUG_ON(dma_pte_addr(*pte)); | 1449 | BUG_ON(dma_pte_addr(pte)); |
1425 | dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); | 1450 | dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); |
1426 | dma_set_pte_prot(*pte, prot); | 1451 | dma_set_pte_prot(pte, prot); |
1427 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 1452 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); |
1428 | start_pfn++; | 1453 | start_pfn++; |
1429 | index++; | 1454 | index++; |
@@ -2584,7 +2609,7 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) | |||
2584 | pte = addr_to_dma_pte(domain, iova); | 2609 | pte = addr_to_dma_pte(domain, iova); |
2585 | 2610 | ||
2586 | if (pte) | 2611 | if (pte) |
2587 | pfn = dma_pte_addr(*pte); | 2612 | pfn = dma_pte_addr(pte); |
2588 | 2613 | ||
2589 | return pfn >> VTD_PAGE_SHIFT; | 2614 | return pfn >> VTD_PAGE_SHIFT; |
2590 | } | 2615 | } |