aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-24 11:43:32 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 08:20:30 -0500
commit308973d3b958b9328a1051642c81ee6dbc5021a4 (patch)
tree608d27556e7e3677c98555543e407e8bc083236d /arch/x86/kernel/amd_iommu.c
parent87a64d523825351a23743e69949c2a8c2077cecf (diff)
x86/amd-iommu: Move some pte allocation functions in the right section
This patch moves alloc_pte() and fetch_pte() into the page table handling code section so that the forward declarations for them could be removed. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c193
1 files changed, 94 insertions, 99 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 17e83ecb8b2..90b365024c2 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -59,15 +59,10 @@ struct iommu_cmd {
59 59
60static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, 60static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
61 struct unity_map_entry *e); 61 struct unity_map_entry *e);
62static u64 *alloc_pte(struct protection_domain *domain,
63 unsigned long address, int end_lvl,
64 u64 **pte_page, gfp_t gfp);
65static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, 62static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
66 unsigned long start_page, 63 unsigned long start_page,
67 unsigned int pages); 64 unsigned int pages);
68static void reset_iommu_command_buffer(struct amd_iommu *iommu); 65static void reset_iommu_command_buffer(struct amd_iommu *iommu);
69static u64 *fetch_pte(struct protection_domain *domain,
70 unsigned long address, int map_size);
71static void update_domain(struct protection_domain *domain); 66static void update_domain(struct protection_domain *domain);
72 67
73/**************************************************************************** 68/****************************************************************************
@@ -665,6 +660,100 @@ void amd_iommu_flush_all_devices(void)
665 ****************************************************************************/ 660 ****************************************************************************/
666 661
667/* 662/*
663 * This function is used to add another level to an IO page table. Adding
664 * another level increases the size of the address space by 9 bits to a size up
665 * to 64 bits.
666 */
667static bool increase_address_space(struct protection_domain *domain,
668 gfp_t gfp)
669{
670 u64 *pte;
671
672 if (domain->mode == PAGE_MODE_6_LEVEL)
673 /* address space already 64 bit large */
674 return false;
675
676 pte = (void *)get_zeroed_page(gfp);
677 if (!pte)
678 return false;
679
680 *pte = PM_LEVEL_PDE(domain->mode,
681 virt_to_phys(domain->pt_root));
682 domain->pt_root = pte;
683 domain->mode += 1;
684 domain->updated = true;
685
686 return true;
687}
688
689static u64 *alloc_pte(struct protection_domain *domain,
690 unsigned long address,
691 int end_lvl,
692 u64 **pte_page,
693 gfp_t gfp)
694{
695 u64 *pte, *page;
696 int level;
697
698 while (address > PM_LEVEL_SIZE(domain->mode))
699 increase_address_space(domain, gfp);
700
701 level = domain->mode - 1;
702 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
703
704 while (level > end_lvl) {
705 if (!IOMMU_PTE_PRESENT(*pte)) {
706 page = (u64 *)get_zeroed_page(gfp);
707 if (!page)
708 return NULL;
709 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
710 }
711
712 level -= 1;
713
714 pte = IOMMU_PTE_PAGE(*pte);
715
716 if (pte_page && level == end_lvl)
717 *pte_page = pte;
718
719 pte = &pte[PM_LEVEL_INDEX(level, address)];
720 }
721
722 return pte;
723}
724
725/*
726 * This function checks if there is a PTE for a given dma address. If
727 * there is one, it returns the pointer to it.
728 */
729static u64 *fetch_pte(struct protection_domain *domain,
730 unsigned long address, int map_size)
731{
732 int level;
733 u64 *pte;
734
735 level = domain->mode - 1;
736 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
737
738 while (level > map_size) {
739 if (!IOMMU_PTE_PRESENT(*pte))
740 return NULL;
741
742 level -= 1;
743
744 pte = IOMMU_PTE_PAGE(*pte);
745 pte = &pte[PM_LEVEL_INDEX(level, address)];
746
747 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
748 pte = NULL;
749 break;
750 }
751 }
752
753 return pte;
754}
755
756/*
668 * Generic mapping functions. It maps a physical address into a DMA 757 * Generic mapping functions. It maps a physical address into a DMA
669 * address space. It allocates the page table pages if necessary. 758 * address space. It allocates the page table pages if necessary.
670 * In the future it can be extended to a generic mapping function 759 * In the future it can be extended to a generic mapping function
@@ -820,37 +909,6 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
820 */ 909 */
821 910
822/* 911/*
823 * This function checks if there is a PTE for a given dma address. If
824 * there is one, it returns the pointer to it.
825 */
826static u64 *fetch_pte(struct protection_domain *domain,
827 unsigned long address, int map_size)
828{
829 int level;
830 u64 *pte;
831
832 level = domain->mode - 1;
833 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
834
835 while (level > map_size) {
836 if (!IOMMU_PTE_PRESENT(*pte))
837 return NULL;
838
839 level -= 1;
840
841 pte = IOMMU_PTE_PAGE(*pte);
842 pte = &pte[PM_LEVEL_INDEX(level, address)];
843
844 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
845 pte = NULL;
846 break;
847 }
848 }
849
850 return pte;
851}
852
853/*
854 * This function is used to add a new aperture range to an existing 912 * This function is used to add a new aperture range to an existing
855 * aperture in case of dma_ops domain allocation or address allocation 913 * aperture in case of dma_ops domain allocation or address allocation
856 * failure. 914 * failure.
@@ -1535,69 +1593,6 @@ static void update_domain(struct protection_domain *domain)
1535} 1593}
1536 1594
1537/* 1595/*
1538 * This function is used to add another level to an IO page table. Adding
1539 * another level increases the size of the address space by 9 bits to a size up
1540 * to 64 bits.
1541 */
1542static bool increase_address_space(struct protection_domain *domain,
1543 gfp_t gfp)
1544{
1545 u64 *pte;
1546
1547 if (domain->mode == PAGE_MODE_6_LEVEL)
1548 /* address space already 64 bit large */
1549 return false;
1550
1551 pte = (void *)get_zeroed_page(gfp);
1552 if (!pte)
1553 return false;
1554
1555 *pte = PM_LEVEL_PDE(domain->mode,
1556 virt_to_phys(domain->pt_root));
1557 domain->pt_root = pte;
1558 domain->mode += 1;
1559 domain->updated = true;
1560
1561 return true;
1562}
1563
1564static u64 *alloc_pte(struct protection_domain *domain,
1565 unsigned long address,
1566 int end_lvl,
1567 u64 **pte_page,
1568 gfp_t gfp)
1569{
1570 u64 *pte, *page;
1571 int level;
1572
1573 while (address > PM_LEVEL_SIZE(domain->mode))
1574 increase_address_space(domain, gfp);
1575
1576 level = domain->mode - 1;
1577 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1578
1579 while (level > end_lvl) {
1580 if (!IOMMU_PTE_PRESENT(*pte)) {
1581 page = (u64 *)get_zeroed_page(gfp);
1582 if (!page)
1583 return NULL;
1584 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1585 }
1586
1587 level -= 1;
1588
1589 pte = IOMMU_PTE_PAGE(*pte);
1590
1591 if (pte_page && level == end_lvl)
1592 *pte_page = pte;
1593
1594 pte = &pte[PM_LEVEL_INDEX(level, address)];
1595 }
1596
1597 return pte;
1598}
1599
1600/*
1601 * This function fetches the PTE for a given address in the aperture 1596 * This function fetches the PTE for a given address in the aperture
1602 */ 1597 */
1603static u64* dma_ops_get_pte(struct dma_ops_domain *dom, 1598static u64* dma_ops_get_pte(struct dma_ops_domain *dom,