aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h38
-rw-r--r--arch/x86/kernel/amd_iommu.c253
2 files changed, 178 insertions, 113 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 86a56b49f2c6..2a2cc7a78a81 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -147,19 +147,25 @@
147#define PAGE_MODE_1_LEVEL 0x01 147#define PAGE_MODE_1_LEVEL 0x01
148#define PAGE_MODE_2_LEVEL 0x02 148#define PAGE_MODE_2_LEVEL 0x02
149#define PAGE_MODE_3_LEVEL 0x03 149#define PAGE_MODE_3_LEVEL 0x03
150 150#define PAGE_MODE_4_LEVEL 0x04
151#define IOMMU_PDE_NL_0 0x000ULL 151#define PAGE_MODE_5_LEVEL 0x05
152#define IOMMU_PDE_NL_1 0x200ULL 152#define PAGE_MODE_6_LEVEL 0x06
153#define IOMMU_PDE_NL_2 0x400ULL 153
154#define IOMMU_PDE_NL_3 0x600ULL 154#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
155 155#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
156#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL) 156 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
157#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL) 157 (0xffffffffffffffffULL))
158#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL) 158#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
159 159#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
160#define IOMMU_MAP_SIZE_L1 (1ULL << 21) 160#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
161#define IOMMU_MAP_SIZE_L2 (1ULL << 30) 161 IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
162#define IOMMU_MAP_SIZE_L3 (1ULL << 39) 162#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
163
164#define PM_MAP_4k 0
165#define PM_ADDR_MASK 0x000ffffffffff000ULL
166#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
167 (~((1ULL << (12 + ((lvl) * 9))) - 1)))
168#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
163 169
164#define IOMMU_PTE_P (1ULL << 0) 170#define IOMMU_PTE_P (1ULL << 0)
165#define IOMMU_PTE_TV (1ULL << 1) 171#define IOMMU_PTE_TV (1ULL << 1)
@@ -168,11 +174,6 @@
168#define IOMMU_PTE_IR (1ULL << 61) 174#define IOMMU_PTE_IR (1ULL << 61)
169#define IOMMU_PTE_IW (1ULL << 62) 175#define IOMMU_PTE_IW (1ULL << 62)
170 176
171#define IOMMU_L1_PDE(address) \
172 ((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
173#define IOMMU_L2_PDE(address) \
174 ((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
175
176#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 177#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
177#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) 178#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
178#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) 179#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
@@ -230,6 +231,7 @@ struct protection_domain {
230 int mode; /* paging mode (0-6 levels) */ 231 int mode; /* paging mode (0-6 levels) */
231 u64 *pt_root; /* page table root pointer */ 232 u64 *pt_root; /* page table root pointer */
232 unsigned long flags; /* flags to find out type of domain */ 233 unsigned long flags; /* flags to find out type of domain */
234 bool updated; /* complete domain flush required */
233 unsigned dev_cnt; /* devices assigned to this domain */ 235 unsigned dev_cnt; /* devices assigned to this domain */
234 void *priv; /* private data */ 236 void *priv; /* private data */
235}; 237};
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index dc19ed43b54e..98f230f6a28d 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -47,7 +47,6 @@ static DEFINE_SPINLOCK(iommu_pd_list_lock);
47 */ 47 */
48static struct protection_domain *pt_domain; 48static struct protection_domain *pt_domain;
49 49
50#ifdef CONFIG_IOMMU_API
51static struct iommu_ops amd_iommu_ops; 50static struct iommu_ops amd_iommu_ops;
52 51
53/* 52/*
@@ -60,13 +59,16 @@ struct iommu_cmd {
60static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, 59static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
61 struct unity_map_entry *e); 60 struct unity_map_entry *e);
62static struct dma_ops_domain *find_protection_domain(u16 devid); 61static struct dma_ops_domain *find_protection_domain(u16 devid);
63static u64* alloc_pte(struct protection_domain *dom, 62static u64 *alloc_pte(struct protection_domain *domain,
64 unsigned long address, u64 63 unsigned long address, int end_lvl,
65 **pte_page, gfp_t gfp); 64 u64 **pte_page, gfp_t gfp);
66static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, 65static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
67 unsigned long start_page, 66 unsigned long start_page,
68 unsigned int pages); 67 unsigned int pages);
69static void reset_iommu_command_buffer(struct amd_iommu *iommu); 68static void reset_iommu_command_buffer(struct amd_iommu *iommu);
69static u64 *fetch_pte(struct protection_domain *domain,
70 unsigned long address, int map_size);
71static void update_domain(struct protection_domain *domain);
70 72
71#ifdef CONFIG_AMD_IOMMU_STATS 73#ifdef CONFIG_AMD_IOMMU_STATS
72 74
@@ -535,12 +537,15 @@ static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
535 } 537 }
536} 538}
537 539
538void amd_iommu_flush_all_devices(void) 540static void flush_devices_by_domain(struct protection_domain *domain)
539{ 541{
540 struct amd_iommu *iommu; 542 struct amd_iommu *iommu;
541 int i; 543 int i;
542 544
543 for (i = 0; i <= amd_iommu_last_bdf; ++i) { 545 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
546 if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
547 (amd_iommu_pd_table[i] != domain))
548 continue;
544 549
545 iommu = amd_iommu_rlookup_table[i]; 550 iommu = amd_iommu_rlookup_table[i];
546 if (!iommu) 551 if (!iommu)
@@ -567,6 +572,11 @@ static void reset_iommu_command_buffer(struct amd_iommu *iommu)
567 iommu->reset_in_progress = false; 572 iommu->reset_in_progress = false;
568} 573}
569 574
575void amd_iommu_flush_all_devices(void)
576{
577 flush_devices_by_domain(NULL);
578}
579
570/**************************************************************************** 580/****************************************************************************
571 * 581 *
572 * The functions below are used the create the page table mappings for 582 * The functions below are used the create the page table mappings for
@@ -584,18 +594,21 @@ static void reset_iommu_command_buffer(struct amd_iommu *iommu)
584static int iommu_map_page(struct protection_domain *dom, 594static int iommu_map_page(struct protection_domain *dom,
585 unsigned long bus_addr, 595 unsigned long bus_addr,
586 unsigned long phys_addr, 596 unsigned long phys_addr,
587 int prot) 597 int prot,
598 int map_size)
588{ 599{
589 u64 __pte, *pte; 600 u64 __pte, *pte;
590 601
591 bus_addr = PAGE_ALIGN(bus_addr); 602 bus_addr = PAGE_ALIGN(bus_addr);
592 phys_addr = PAGE_ALIGN(phys_addr); 603 phys_addr = PAGE_ALIGN(phys_addr);
593 604
594 /* only support 512GB address spaces for now */ 605 BUG_ON(!PM_ALIGNED(map_size, bus_addr));
595 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) 606 BUG_ON(!PM_ALIGNED(map_size, phys_addr));
607
608 if (!(prot & IOMMU_PROT_MASK))
596 return -EINVAL; 609 return -EINVAL;
597 610
598 pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL); 611 pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL);
599 612
600 if (IOMMU_PTE_PRESENT(*pte)) 613 if (IOMMU_PTE_PRESENT(*pte))
601 return -EBUSY; 614 return -EBUSY;
@@ -608,29 +621,18 @@ static int iommu_map_page(struct protection_domain *dom,
608 621
609 *pte = __pte; 622 *pte = __pte;
610 623
624 update_domain(dom);
625
611 return 0; 626 return 0;
612} 627}
613 628
614static void iommu_unmap_page(struct protection_domain *dom, 629static void iommu_unmap_page(struct protection_domain *dom,
615 unsigned long bus_addr) 630 unsigned long bus_addr, int map_size)
616{ 631{
617 u64 *pte; 632 u64 *pte = fetch_pte(dom, bus_addr, map_size);
618
619 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
620
621 if (!IOMMU_PTE_PRESENT(*pte))
622 return;
623 633
624 pte = IOMMU_PTE_PAGE(*pte); 634 if (pte)
625 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; 635 *pte = 0;
626
627 if (!IOMMU_PTE_PRESENT(*pte))
628 return;
629
630 pte = IOMMU_PTE_PAGE(*pte);
631 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
632
633 *pte = 0;
634} 636}
635 637
636/* 638/*
@@ -685,7 +687,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
685 687
686 for (addr = e->address_start; addr < e->address_end; 688 for (addr = e->address_start; addr < e->address_end;
687 addr += PAGE_SIZE) { 689 addr += PAGE_SIZE) {
688 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot); 690 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
691 PM_MAP_4k);
689 if (ret) 692 if (ret)
690 return ret; 693 return ret;
691 /* 694 /*
@@ -740,24 +743,29 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
740 * This function checks if there is a PTE for a given dma address. If 743 * This function checks if there is a PTE for a given dma address. If
741 * there is one, it returns the pointer to it. 744 * there is one, it returns the pointer to it.
742 */ 745 */
743static u64* fetch_pte(struct protection_domain *domain, 746static u64 *fetch_pte(struct protection_domain *domain,
744 unsigned long address) 747 unsigned long address, int map_size)
745{ 748{
749 int level;
746 u64 *pte; 750 u64 *pte;
747 751
748 pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)]; 752 level = domain->mode - 1;
753 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
749 754
750 if (!IOMMU_PTE_PRESENT(*pte)) 755 while (level > map_size) {
751 return NULL; 756 if (!IOMMU_PTE_PRESENT(*pte))
757 return NULL;
752 758
753 pte = IOMMU_PTE_PAGE(*pte); 759 level -= 1;
754 pte = &pte[IOMMU_PTE_L1_INDEX(address)];
755 760
756 if (!IOMMU_PTE_PRESENT(*pte)) 761 pte = IOMMU_PTE_PAGE(*pte);
757 return NULL; 762 pte = &pte[PM_LEVEL_INDEX(level, address)];
758 763
759 pte = IOMMU_PTE_PAGE(*pte); 764 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
760 pte = &pte[IOMMU_PTE_L0_INDEX(address)]; 765 pte = NULL;
766 break;
767 }
768 }
761 769
762 return pte; 770 return pte;
763} 771}
@@ -797,7 +805,7 @@ static int alloc_new_range(struct amd_iommu *iommu,
797 u64 *pte, *pte_page; 805 u64 *pte, *pte_page;
798 806
799 for (i = 0; i < num_ptes; ++i) { 807 for (i = 0; i < num_ptes; ++i) {
800 pte = alloc_pte(&dma_dom->domain, address, 808 pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k,
801 &pte_page, gfp); 809 &pte_page, gfp);
802 if (!pte) 810 if (!pte)
803 goto out_free; 811 goto out_free;
@@ -830,16 +838,20 @@ static int alloc_new_range(struct amd_iommu *iommu,
830 for (i = dma_dom->aperture[index]->offset; 838 for (i = dma_dom->aperture[index]->offset;
831 i < dma_dom->aperture_size; 839 i < dma_dom->aperture_size;
832 i += PAGE_SIZE) { 840 i += PAGE_SIZE) {
833 u64 *pte = fetch_pte(&dma_dom->domain, i); 841 u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k);
834 if (!pte || !IOMMU_PTE_PRESENT(*pte)) 842 if (!pte || !IOMMU_PTE_PRESENT(*pte))
835 continue; 843 continue;
836 844
837 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); 845 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
838 } 846 }
839 847
848 update_domain(&dma_dom->domain);
849
840 return 0; 850 return 0;
841 851
842out_free: 852out_free:
853 update_domain(&dma_dom->domain);
854
843 free_page((unsigned long)dma_dom->aperture[index]->bitmap); 855 free_page((unsigned long)dma_dom->aperture[index]->bitmap);
844 856
845 kfree(dma_dom->aperture[index]); 857 kfree(dma_dom->aperture[index]);
@@ -1079,7 +1091,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
1079 dma_dom->domain.id = domain_id_alloc(); 1091 dma_dom->domain.id = domain_id_alloc();
1080 if (dma_dom->domain.id == 0) 1092 if (dma_dom->domain.id == 0)
1081 goto free_dma_dom; 1093 goto free_dma_dom;
1082 dma_dom->domain.mode = PAGE_MODE_3_LEVEL; 1094 dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
1083 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); 1095 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1084 dma_dom->domain.flags = PD_DMA_OPS_MASK; 1096 dma_dom->domain.flags = PD_DMA_OPS_MASK;
1085 dma_dom->domain.priv = dma_dom; 1097 dma_dom->domain.priv = dma_dom;
@@ -1133,20 +1145,9 @@ static struct protection_domain *domain_for_device(u16 devid)
1133 return dom; 1145 return dom;
1134} 1146}
1135 1147
1136/* 1148static void set_dte_entry(u16 devid, struct protection_domain *domain)
1137 * If a device is not yet associated with a domain, this function does
1138 * assigns it visible for the hardware
1139 */
1140static void __attach_device(struct amd_iommu *iommu,
1141 struct protection_domain *domain,
1142 u16 devid)
1143{ 1149{
1144 u64 pte_root; 1150 u64 pte_root = virt_to_phys(domain->pt_root);
1145
1146 /* lock domain */
1147 spin_lock(&domain->lock);
1148
1149 pte_root = virt_to_phys(domain->pt_root);
1150 1151
1151 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) 1152 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1152 << DEV_ENTRY_MODE_SHIFT; 1153 << DEV_ENTRY_MODE_SHIFT;
@@ -1157,6 +1158,21 @@ static void __attach_device(struct amd_iommu *iommu,
1157 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); 1158 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
1158 1159
1159 amd_iommu_pd_table[devid] = domain; 1160 amd_iommu_pd_table[devid] = domain;
1161}
1162
1163/*
1164 * If a device is not yet associated with a domain, this function does
1165 * assigns it visible for the hardware
1166 */
1167static void __attach_device(struct amd_iommu *iommu,
1168 struct protection_domain *domain,
1169 u16 devid)
1170{
1171 /* lock domain */
1172 spin_lock(&domain->lock);
1173
1174 /* update DTE entry */
1175 set_dte_entry(devid, domain);
1160 1176
1161 domain->dev_cnt += 1; 1177 domain->dev_cnt += 1;
1162 1178
@@ -1164,6 +1180,10 @@ static void __attach_device(struct amd_iommu *iommu,
1164 spin_unlock(&domain->lock); 1180 spin_unlock(&domain->lock);
1165} 1181}
1166 1182
1183/*
1184 * If a device is not yet associated with a domain, this function does
1185 * assigns it visible for the hardware
1186 */
1167static void attach_device(struct amd_iommu *iommu, 1187static void attach_device(struct amd_iommu *iommu,
1168 struct protection_domain *domain, 1188 struct protection_domain *domain,
1169 u16 devid) 1189 u16 devid)
@@ -1389,39 +1409,91 @@ static int get_device_resources(struct device *dev,
1389 return 1; 1409 return 1;
1390} 1410}
1391 1411
1412static void update_device_table(struct protection_domain *domain)
1413{
1414 unsigned long flags;
1415 int i;
1416
1417 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
1418 if (amd_iommu_pd_table[i] != domain)
1419 continue;
1420 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1421 set_dte_entry(i, domain);
1422 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1423 }
1424}
1425
1426static void update_domain(struct protection_domain *domain)
1427{
1428 if (!domain->updated)
1429 return;
1430
1431 update_device_table(domain);
1432 flush_devices_by_domain(domain);
1433 iommu_flush_domain(domain->id);
1434
1435 domain->updated = false;
1436}
1437
1392/* 1438/*
1393 * If the pte_page is not yet allocated this function is called 1439 * This function is used to add another level to an IO page table. Adding
1440 * another level increases the size of the address space by 9 bits to a size up
1441 * to 64 bits.
1394 */ 1442 */
1395static u64* alloc_pte(struct protection_domain *dom, 1443static bool increase_address_space(struct protection_domain *domain,
1396 unsigned long address, u64 **pte_page, gfp_t gfp) 1444 gfp_t gfp)
1445{
1446 u64 *pte;
1447
1448 if (domain->mode == PAGE_MODE_6_LEVEL)
1449 /* address space already 64 bit large */
1450 return false;
1451
1452 pte = (void *)get_zeroed_page(gfp);
1453 if (!pte)
1454 return false;
1455
1456 *pte = PM_LEVEL_PDE(domain->mode,
1457 virt_to_phys(domain->pt_root));
1458 domain->pt_root = pte;
1459 domain->mode += 1;
1460 domain->updated = true;
1461
1462 return true;
1463}
1464
1465static u64 *alloc_pte(struct protection_domain *domain,
1466 unsigned long address,
1467 int end_lvl,
1468 u64 **pte_page,
1469 gfp_t gfp)
1397{ 1470{
1398 u64 *pte, *page; 1471 u64 *pte, *page;
1472 int level;
1399 1473
1400 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)]; 1474 while (address > PM_LEVEL_SIZE(domain->mode))
1475 increase_address_space(domain, gfp);
1401 1476
1402 if (!IOMMU_PTE_PRESENT(*pte)) { 1477 level = domain->mode - 1;
1403 page = (u64 *)get_zeroed_page(gfp); 1478 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1404 if (!page)
1405 return NULL;
1406 *pte = IOMMU_L2_PDE(virt_to_phys(page));
1407 }
1408 1479
1409 pte = IOMMU_PTE_PAGE(*pte); 1480 while (level > end_lvl) {
1410 pte = &pte[IOMMU_PTE_L1_INDEX(address)]; 1481 if (!IOMMU_PTE_PRESENT(*pte)) {
1482 page = (u64 *)get_zeroed_page(gfp);
1483 if (!page)
1484 return NULL;
1485 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1486 }
1411 1487
1412 if (!IOMMU_PTE_PRESENT(*pte)) { 1488 level -= 1;
1413 page = (u64 *)get_zeroed_page(gfp);
1414 if (!page)
1415 return NULL;
1416 *pte = IOMMU_L1_PDE(virt_to_phys(page));
1417 }
1418 1489
1419 pte = IOMMU_PTE_PAGE(*pte); 1490 pte = IOMMU_PTE_PAGE(*pte);
1420 1491
1421 if (pte_page) 1492 if (pte_page && level == end_lvl)
1422 *pte_page = pte; 1493 *pte_page = pte;
1423 1494
1424 pte = &pte[IOMMU_PTE_L0_INDEX(address)]; 1495 pte = &pte[PM_LEVEL_INDEX(level, address)];
1496 }
1425 1497
1426 return pte; 1498 return pte;
1427} 1499}
@@ -1441,10 +1513,13 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1441 1513
1442 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; 1514 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
1443 if (!pte) { 1515 if (!pte) {
1444 pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC); 1516 pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page,
1517 GFP_ATOMIC);
1445 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; 1518 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
1446 } else 1519 } else
1447 pte += IOMMU_PTE_L0_INDEX(address); 1520 pte += PM_LEVEL_INDEX(0, address);
1521
1522 update_domain(&dom->domain);
1448 1523
1449 return pte; 1524 return pte;
1450} 1525}
@@ -1506,7 +1581,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
1506 if (!pte) 1581 if (!pte)
1507 return; 1582 return;
1508 1583
1509 pte += IOMMU_PTE_L0_INDEX(address); 1584 pte += PM_LEVEL_INDEX(0, address);
1510 1585
1511 WARN_ON(!*pte); 1586 WARN_ON(!*pte);
1512 1587
@@ -2240,7 +2315,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2240 paddr &= PAGE_MASK; 2315 paddr &= PAGE_MASK;
2241 2316
2242 for (i = 0; i < npages; ++i) { 2317 for (i = 0; i < npages; ++i) {
2243 ret = iommu_map_page(domain, iova, paddr, prot); 2318 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
2244 if (ret) 2319 if (ret)
2245 return ret; 2320 return ret;
2246 2321
@@ -2261,7 +2336,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
2261 iova &= PAGE_MASK; 2336 iova &= PAGE_MASK;
2262 2337
2263 for (i = 0; i < npages; ++i) { 2338 for (i = 0; i < npages; ++i) {
2264 iommu_unmap_page(domain, iova); 2339 iommu_unmap_page(domain, iova, PM_MAP_4k);
2265 iova += PAGE_SIZE; 2340 iova += PAGE_SIZE;
2266 } 2341 }
2267 2342
@@ -2276,21 +2351,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2276 phys_addr_t paddr; 2351 phys_addr_t paddr;
2277 u64 *pte; 2352 u64 *pte;
2278 2353
2279 pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)]; 2354 pte = fetch_pte(domain, iova, PM_MAP_4k);
2280
2281 if (!IOMMU_PTE_PRESENT(*pte))
2282 return 0;
2283
2284 pte = IOMMU_PTE_PAGE(*pte);
2285 pte = &pte[IOMMU_PTE_L1_INDEX(iova)];
2286
2287 if (!IOMMU_PTE_PRESENT(*pte))
2288 return 0;
2289
2290 pte = IOMMU_PTE_PAGE(*pte);
2291 pte = &pte[IOMMU_PTE_L0_INDEX(iova)];
2292 2355
2293 if (!IOMMU_PTE_PRESENT(*pte)) 2356 if (!pte || !IOMMU_PTE_PRESENT(*pte))
2294 return 0; 2357 return 0;
2295 2358
2296 paddr = *pte & IOMMU_PAGE_MASK; 2359 paddr = *pte & IOMMU_PAGE_MASK;