summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/gmmu.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-09-19 18:28:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-04 05:29:53 -0400
commit7a3dbdd43f142f7f94a19ff6a320e589f0b23324 (patch)
tree8d2341fea4c6e146dae8fdaa69845b86e9c6a7e6 /drivers/gpu/nvgpu/common/mm/gmmu.c
parent0e8aee1c1a38abbc2dccf3f604a9843cf38071e0 (diff)
gpu: nvgpu: Add for_each construct for nvgpu_sgts
Add a macro to iterate across nvgpu_sgts. This makes it easier on developers who may accidentally forget to move to the next SGL. JIRA NVGPU-243 Change-Id: I90154a5d23f0014cb79bbcd5b6e8d8dbda303820 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1566627 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/gmmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index ab9d0d41..cad53fa1 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -497,8 +497,7 @@ static int __nvgpu_gmmu_update_page_table_vidmem(struct vm_gk20a *vm,
497 * Otherwise iterate across all the chunks in this allocation and 497 * Otherwise iterate across all the chunks in this allocation and
498 * map them. 498 * map them.
499 */ 499 */
500 sgl = sgt->sgl; 500 nvgpu_sgt_for_each_sgl(sgl, sgt) {
501 while (sgl) {
502 if (space_to_skip && 501 if (space_to_skip &&
503 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) { 502 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) {
504 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl); 503 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl);
@@ -526,7 +525,6 @@ static int __nvgpu_gmmu_update_page_table_vidmem(struct vm_gk20a *vm,
526 */ 525 */
527 virt_addr += chunk_length; 526 virt_addr += chunk_length;
528 length -= chunk_length; 527 length -= chunk_length;
529 sgl = nvgpu_sgt_get_next(sgt, sgl);
530 528
531 if (length == 0) 529 if (length == 0)
532 break; 530 break;
@@ -544,7 +542,7 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm,
544{ 542{
545 struct gk20a *g = gk20a_from_vm(vm); 543 struct gk20a *g = gk20a_from_vm(vm);
546 void *sgl; 544 void *sgl;
547 int err; 545 int err = 0;
548 546
549 if (!sgt) { 547 if (!sgt) {
550 /* 548 /*
@@ -567,10 +565,8 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm,
567 * mapping is simple since the "physical" address is actually a virtual 565 * mapping is simple since the "physical" address is actually a virtual
568 * IO address and will be contiguous. 566 * IO address and will be contiguous.
569 */ 567 */
570 sgl = sgt->sgl;
571
572 if (!g->mm.bypass_smmu) { 568 if (!g->mm.bypass_smmu) {
573 u64 io_addr = nvgpu_sgt_get_gpu_addr(sgt, g, sgl, attrs); 569 u64 io_addr = nvgpu_sgt_get_gpu_addr(sgt, g, sgt->sgl, attrs);
574 570
575 io_addr += space_to_skip; 571 io_addr += space_to_skip;
576 572
@@ -588,7 +584,7 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm,
588 * Finally: last possible case: do the no-IOMMU mapping. In this case we 584 * Finally: last possible case: do the no-IOMMU mapping. In this case we
589 * really are mapping physical pages directly. 585 * really are mapping physical pages directly.
590 */ 586 */
591 while (sgl) { 587 nvgpu_sgt_for_each_sgl(sgl, sgt) {
592 u64 phys_addr; 588 u64 phys_addr;
593 u64 chunk_length; 589 u64 chunk_length;
594 590
@@ -616,7 +612,6 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm,
616 space_to_skip = 0; 612 space_to_skip = 0;
617 virt_addr += chunk_length; 613 virt_addr += chunk_length;
618 length -= chunk_length; 614 length -= chunk_length;
619 sgl = nvgpu_sgt_get_next(sgt, sgl);
620 615
621 if (length == 0) 616 if (length == 0)
622 break; 617 break;