diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-09-19 18:28:00 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-10-04 05:29:53 -0400 |
commit | 7a3dbdd43f142f7f94a19ff6a320e589f0b23324 (patch) | |
tree | 8d2341fea4c6e146dae8fdaa69845b86e9c6a7e6 /drivers/gpu/nvgpu | |
parent | 0e8aee1c1a38abbc2dccf3f604a9843cf38071e0 (diff) |
gpu: nvgpu: Add for_each construct for nvgpu_sgts
Add a macro to iterate across nvgpu_sgts. This makes it easier on
developers who may accidentally forget to move to the next SGL.
JIRA NVGPU-243
Change-Id: I90154a5d23f0014cb79bbcd5b6e8d8dbda303820
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1566627
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/gmmu.c | 13 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/pramin.c | 3 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h | 8 |
3 files changed, 14 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index ab9d0d41..cad53fa1 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c | |||
@@ -497,8 +497,7 @@ static int __nvgpu_gmmu_update_page_table_vidmem(struct vm_gk20a *vm, | |||
497 | * Otherwise iterate across all the chunks in this allocation and | 497 | * Otherwise iterate across all the chunks in this allocation and |
498 | * map them. | 498 | * map them. |
499 | */ | 499 | */ |
500 | sgl = sgt->sgl; | 500 | nvgpu_sgt_for_each_sgl(sgl, sgt) { |
501 | while (sgl) { | ||
502 | if (space_to_skip && | 501 | if (space_to_skip && |
503 | space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) { | 502 | space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) { |
504 | space_to_skip -= nvgpu_sgt_get_length(sgt, sgl); | 503 | space_to_skip -= nvgpu_sgt_get_length(sgt, sgl); |
@@ -526,7 +525,6 @@ static int __nvgpu_gmmu_update_page_table_vidmem(struct vm_gk20a *vm, | |||
526 | */ | 525 | */ |
527 | virt_addr += chunk_length; | 526 | virt_addr += chunk_length; |
528 | length -= chunk_length; | 527 | length -= chunk_length; |
529 | sgl = nvgpu_sgt_get_next(sgt, sgl); | ||
530 | 528 | ||
531 | if (length == 0) | 529 | if (length == 0) |
532 | break; | 530 | break; |
@@ -544,7 +542,7 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm, | |||
544 | { | 542 | { |
545 | struct gk20a *g = gk20a_from_vm(vm); | 543 | struct gk20a *g = gk20a_from_vm(vm); |
546 | void *sgl; | 544 | void *sgl; |
547 | int err; | 545 | int err = 0; |
548 | 546 | ||
549 | if (!sgt) { | 547 | if (!sgt) { |
550 | /* | 548 | /* |
@@ -567,10 +565,8 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm, | |||
567 | * mapping is simple since the "physical" address is actually a virtual | 565 | * mapping is simple since the "physical" address is actually a virtual |
568 | * IO address and will be contiguous. | 566 | * IO address and will be contiguous. |
569 | */ | 567 | */ |
570 | sgl = sgt->sgl; | ||
571 | |||
572 | if (!g->mm.bypass_smmu) { | 568 | if (!g->mm.bypass_smmu) { |
573 | u64 io_addr = nvgpu_sgt_get_gpu_addr(sgt, g, sgl, attrs); | 569 | u64 io_addr = nvgpu_sgt_get_gpu_addr(sgt, g, sgt->sgl, attrs); |
574 | 570 | ||
575 | io_addr += space_to_skip; | 571 | io_addr += space_to_skip; |
576 | 572 | ||
@@ -588,7 +584,7 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm, | |||
588 | * Finally: last possible case: do the no-IOMMU mapping. In this case we | 584 | * Finally: last possible case: do the no-IOMMU mapping. In this case we |
589 | * really are mapping physical pages directly. | 585 | * really are mapping physical pages directly. |
590 | */ | 586 | */ |
591 | while (sgl) { | 587 | nvgpu_sgt_for_each_sgl(sgl, sgt) { |
592 | u64 phys_addr; | 588 | u64 phys_addr; |
593 | u64 chunk_length; | 589 | u64 chunk_length; |
594 | 590 | ||
@@ -616,7 +612,6 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm, | |||
616 | space_to_skip = 0; | 612 | space_to_skip = 0; |
617 | virt_addr += chunk_length; | 613 | virt_addr += chunk_length; |
618 | length -= chunk_length; | 614 | length -= chunk_length; |
619 | sgl = nvgpu_sgt_get_next(sgt, sgl); | ||
620 | 615 | ||
621 | if (length == 0) | 616 | if (length == 0) |
622 | break; | 617 | break; |
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c index 9d59f61d..4f7d6248 100644 --- a/drivers/gpu/nvgpu/common/pramin.c +++ b/drivers/gpu/nvgpu/common/pramin.c | |||
@@ -105,7 +105,8 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem, | |||
105 | 105 | ||
106 | alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); | 106 | alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); |
107 | sgt = &alloc->sgt; | 107 | sgt = &alloc->sgt; |
108 | for (sgl = sgt->sgl; sgl; sgl = nvgpu_sgt_get_next(sgt, sgl)) { | 108 | |
109 | nvgpu_sgt_for_each_sgl(sgl, sgt) { | ||
109 | if (offset >= nvgpu_sgt_get_length(sgt, sgl)) | 110 | if (offset >= nvgpu_sgt_get_length(sgt, sgl)) |
110 | offset -= nvgpu_sgt_get_length(sgt, sgl); | 111 | offset -= nvgpu_sgt_get_length(sgt, sgl); |
111 | else | 112 | else |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h index c2f0e37b..23a1bad7 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h +++ b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h | |||
@@ -104,6 +104,14 @@ struct nvgpu_mem_sgl { | |||
104 | u64 length; | 104 | u64 length; |
105 | }; | 105 | }; |
106 | 106 | ||
107 | /* | ||
108 | * Iterate over the SGL entries in an SGT. | ||
109 | */ | ||
110 | #define nvgpu_sgt_for_each_sgl(__sgl__, __sgt__) \ | ||
111 | for ((__sgl__) = (__sgt__)->sgl; \ | ||
112 | (__sgl__) != NULL; \ | ||
113 | (__sgl__) = nvgpu_sgt_get_next(__sgt__, __sgl__)) | ||
114 | |||
107 | struct nvgpu_mem { | 115 | struct nvgpu_mem { |
108 | /* | 116 | /* |
109 | * Populated for all nvgpu_mem structs - vidmem or system. | 117 | * Populated for all nvgpu_mem structs - vidmem or system. |