diff options
author | Joerg Roedel <jroedel@suse.de> | 2015-04-01 08:58:48 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2015-04-02 07:31:07 -0400 |
commit | 3039ca1b1c37e61cc9239dbb3903db55141ecabd (patch) | |
tree | 33d23e9b45abb6dcfb7fe1189688ebf0135cb3a6 | |
parent | 3b839a57998515bb44c091bbcb8ea0da9d2adef4 (diff) |
iommu/amd: Return the pte page-size in fetch_pte
Extend the fetch_pte function to also return the page-size
that is mapped by the returned pte.
Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/amd_iommu.c | 52 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_types.h | 6 |
2 files changed, 36 insertions, 22 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 49ecf003f7ca..24ef9e600289 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -1322,7 +1322,9 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
1322 | * This function checks if there is a PTE for a given dma address. If | 1322 | * This function checks if there is a PTE for a given dma address. If |
1323 | * there is one, it returns the pointer to it. | 1323 | * there is one, it returns the pointer to it. |
1324 | */ | 1324 | */ |
1325 | static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) | 1325 | static u64 *fetch_pte(struct protection_domain *domain, |
1326 | unsigned long address, | ||
1327 | unsigned long *page_size) | ||
1326 | { | 1328 | { |
1327 | int level; | 1329 | int level; |
1328 | u64 *pte; | 1330 | u64 *pte; |
@@ -1330,8 +1332,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) | |||
1330 | if (address > PM_LEVEL_SIZE(domain->mode)) | 1332 | if (address > PM_LEVEL_SIZE(domain->mode)) |
1331 | return NULL; | 1333 | return NULL; |
1332 | 1334 | ||
1333 | level = domain->mode - 1; | 1335 | level = domain->mode - 1; |
1334 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | 1336 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
1337 | *page_size = PTE_LEVEL_PAGE_SIZE(level); | ||
1335 | 1338 | ||
1336 | while (level > 0) { | 1339 | while (level > 0) { |
1337 | 1340 | ||
@@ -1340,19 +1343,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) | |||
1340 | return NULL; | 1343 | return NULL; |
1341 | 1344 | ||
1342 | /* Large PTE */ | 1345 | /* Large PTE */ |
1343 | if (PM_PTE_LEVEL(*pte) == 0x07) { | 1346 | if (PM_PTE_LEVEL(*pte) == 7 || |
1344 | unsigned long pte_mask, __pte; | 1347 | PM_PTE_LEVEL(*pte) == 0) |
1345 | 1348 | break; | |
1346 | /* | ||
1347 | * If we have a series of large PTEs, make | ||
1348 | * sure to return a pointer to the first one. | ||
1349 | */ | ||
1350 | pte_mask = PTE_PAGE_SIZE(*pte); | ||
1351 | pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); | ||
1352 | __pte = ((unsigned long)pte) & pte_mask; | ||
1353 | |||
1354 | return (u64 *)__pte; | ||
1355 | } | ||
1356 | 1349 | ||
1357 | /* No level skipping support yet */ | 1350 | /* No level skipping support yet */ |
1358 | if (PM_PTE_LEVEL(*pte) != level) | 1351 | if (PM_PTE_LEVEL(*pte) != level) |
@@ -1361,8 +1354,21 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) | |||
1361 | level -= 1; | 1354 | level -= 1; |
1362 | 1355 | ||
1363 | /* Walk to the next level */ | 1356 | /* Walk to the next level */ |
1364 | pte = IOMMU_PTE_PAGE(*pte); | 1357 | pte = IOMMU_PTE_PAGE(*pte); |
1365 | pte = &pte[PM_LEVEL_INDEX(level, address)]; | 1358 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
1359 | *page_size = PTE_LEVEL_PAGE_SIZE(level); | ||
1360 | } | ||
1361 | |||
1362 | if (PM_PTE_LEVEL(*pte) == 0x07) { | ||
1363 | unsigned long pte_mask; | ||
1364 | |||
1365 | /* | ||
1366 | * If we have a series of large PTEs, make | ||
1367 | * sure to return a pointer to the first one. | ||
1368 | */ | ||
1369 | *page_size = pte_mask = PTE_PAGE_SIZE(*pte); | ||
1370 | pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); | ||
1371 | pte = (u64 *)(((unsigned long)pte) & pte_mask); | ||
1366 | } | 1372 | } |
1367 | 1373 | ||
1368 | return pte; | 1374 | return pte; |
@@ -1423,6 +1429,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, | |||
1423 | unsigned long page_size) | 1429 | unsigned long page_size) |
1424 | { | 1430 | { |
1425 | unsigned long long unmap_size, unmapped; | 1431 | unsigned long long unmap_size, unmapped; |
1432 | unsigned long pte_pgsize; | ||
1426 | u64 *pte; | 1433 | u64 *pte; |
1427 | 1434 | ||
1428 | BUG_ON(!is_power_of_2(page_size)); | 1435 | BUG_ON(!is_power_of_2(page_size)); |
@@ -1431,7 +1438,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, | |||
1431 | 1438 | ||
1432 | while (unmapped < page_size) { | 1439 | while (unmapped < page_size) { |
1433 | 1440 | ||
1434 | pte = fetch_pte(dom, bus_addr); | 1441 | pte = fetch_pte(dom, bus_addr, &pte_pgsize); |
1435 | 1442 | ||
1436 | if (!pte) { | 1443 | if (!pte) { |
1437 | /* | 1444 | /* |
@@ -1674,7 +1681,8 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
1674 | for (i = dma_dom->aperture[index]->offset; | 1681 | for (i = dma_dom->aperture[index]->offset; |
1675 | i < dma_dom->aperture_size; | 1682 | i < dma_dom->aperture_size; |
1676 | i += PAGE_SIZE) { | 1683 | i += PAGE_SIZE) { |
1677 | u64 *pte = fetch_pte(&dma_dom->domain, i); | 1684 | unsigned long pte_pgsize; |
1685 | u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize); | ||
1678 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 1686 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
1679 | continue; | 1687 | continue; |
1680 | 1688 | ||
@@ -3382,14 +3390,14 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | |||
3382 | dma_addr_t iova) | 3390 | dma_addr_t iova) |
3383 | { | 3391 | { |
3384 | struct protection_domain *domain = dom->priv; | 3392 | struct protection_domain *domain = dom->priv; |
3385 | unsigned long offset_mask; | 3393 | unsigned long offset_mask, pte_pgsize; |
3386 | phys_addr_t paddr; | 3394 | phys_addr_t paddr; |
3387 | u64 *pte, __pte; | 3395 | u64 *pte, __pte; |
3388 | 3396 | ||
3389 | if (domain->mode == PAGE_MODE_NONE) | 3397 | if (domain->mode == PAGE_MODE_NONE) |
3390 | return iova; | 3398 | return iova; |
3391 | 3399 | ||
3392 | pte = fetch_pte(domain, iova); | 3400 | pte = fetch_pte(domain, iova, &pte_pgsize); |
3393 | 3401 | ||
3394 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 3402 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
3395 | return 0; | 3403 | return 0; |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index c4fffb710c58..60e87d2e140a 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
@@ -282,6 +282,12 @@ | |||
282 | #define PTE_PAGE_SIZE(pte) \ | 282 | #define PTE_PAGE_SIZE(pte) \ |
283 | (1ULL << (1 + ffz(((pte) | 0xfffULL)))) | 283 | (1ULL << (1 + ffz(((pte) | 0xfffULL)))) |
284 | 284 | ||
285 | /* | ||
286 | * Takes a page-table level and returns the default page-size for this level | ||
287 | */ | ||
288 | #define PTE_LEVEL_PAGE_SIZE(level) \ | ||
289 | (1ULL << (12 + (9 * (level)))) | ||
290 | |||
285 | #define IOMMU_PTE_P (1ULL << 0) | 291 | #define IOMMU_PTE_P (1ULL << 0) |
286 | #define IOMMU_PTE_TV (1ULL << 1) | 292 | #define IOMMU_PTE_TV (1ULL << 1) |
287 | #define IOMMU_PTE_U (1ULL << 59) | 293 | #define IOMMU_PTE_U (1ULL << 59) |