aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/omap-iovmm.c
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-11-10 04:32:26 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2011-11-10 05:40:37 -0500
commit7d3002cc8c160dbda0e6ab9cd66dc6eb401b8b70 (patch)
tree453bad0319d12fc0a3fe6594b8e212615e7e2a70 /drivers/iommu/omap-iovmm.c
parent5009065d38c95455bd2d27c2838313e3dd0c5bc7 (diff)
iommu/core: split mapping to page sizes as supported by the hardware
When mapping a memory region, split it to page sizes as supported by the iommu hardware. Always prefer bigger pages, when possible, in order to reduce the TLB pressure. The logic to do that is now added to the IOMMU core, so neither the iommu drivers themselves nor users of the IOMMU API have to duplicate it. This allows a more lenient granularity of mappings; traditionally the IOMMU API took 'order' (of a page) as a mapping size, and directly let the low level iommu drivers handle the mapping, but now that the IOMMU core can split arbitrary memory regions into pages, we can remove this limitation, so users don't have to split those regions by themselves. Currently the supported page sizes are advertised once and they then remain static. That works well for OMAP and MSM but it would probably not fly well with intel's hardware, where the page size capabilities seem to have the potential to be different between several DMA remapping devices. register_iommu() currently sets a default pgsize behavior, so we can convert the IOMMU drivers in subsequent patches. After all the drivers are converted, the temporary default settings will be removed. Mainline users of the IOMMU API (kvm and omap-iovmm) are adopted to deal with bytes instead of page order. Many thanks to Joerg Roedel <Joerg.Roedel@amd.com> for significant review! Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Cc: David Brown <davidb@codeaurora.org> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <Joerg.Roedel@amd.com> Cc: Stepan Moskovchenko <stepanm@codeaurora.org> Cc: KyongHo Cho <pullip.cho@samsung.com> Cc: Hiroshi DOYU <hdoyu@nvidia.com> Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Cc: kvm@vger.kernel.org Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/omap-iovmm.c')
-rw-r--r--drivers/iommu/omap-iovmm.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index e8fdb8830f69..0b7b14cb030b 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -409,7 +409,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
409 unsigned int i, j; 409 unsigned int i, j;
410 struct scatterlist *sg; 410 struct scatterlist *sg;
411 u32 da = new->da_start; 411 u32 da = new->da_start;
412 int order;
413 412
414 if (!domain || !sgt) 413 if (!domain || !sgt)
415 return -EINVAL; 414 return -EINVAL;
@@ -428,12 +427,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
428 if (bytes_to_iopgsz(bytes) < 0) 427 if (bytes_to_iopgsz(bytes) < 0)
429 goto err_out; 428 goto err_out;
430 429
431 order = get_order(bytes);
432
433 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, 430 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
434 i, da, pa, bytes); 431 i, da, pa, bytes);
435 432
436 err = iommu_map(domain, da, pa, order, flags); 433 err = iommu_map(domain, da, pa, bytes, flags);
437 if (err) 434 if (err)
438 goto err_out; 435 goto err_out;
439 436
@@ -448,10 +445,9 @@ err_out:
448 size_t bytes; 445 size_t bytes;
449 446
450 bytes = sg->length + sg->offset; 447 bytes = sg->length + sg->offset;
451 order = get_order(bytes);
452 448
453 /* ignore failures.. we're already handling one */ 449 /* ignore failures.. we're already handling one */
454 iommu_unmap(domain, da, order); 450 iommu_unmap(domain, da, bytes);
455 451
456 da += bytes; 452 da += bytes;
457 } 453 }
@@ -466,7 +462,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
466 size_t total = area->da_end - area->da_start; 462 size_t total = area->da_end - area->da_start;
467 const struct sg_table *sgt = area->sgt; 463 const struct sg_table *sgt = area->sgt;
468 struct scatterlist *sg; 464 struct scatterlist *sg;
469 int i, err; 465 int i;
466 size_t unmapped;
470 467
471 BUG_ON(!sgtable_ok(sgt)); 468 BUG_ON(!sgtable_ok(sgt));
472 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); 469 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
@@ -474,13 +471,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
474 start = area->da_start; 471 start = area->da_start;
475 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 472 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
476 size_t bytes; 473 size_t bytes;
477 int order;
478 474
479 bytes = sg->length + sg->offset; 475 bytes = sg->length + sg->offset;
480 order = get_order(bytes);
481 476
482 err = iommu_unmap(domain, start, order); 477 unmapped = iommu_unmap(domain, start, bytes);
483 if (err < 0) 478 if (unmapped < bytes)
484 break; 479 break;
485 480
486 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", 481 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",