diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-04-10 16:51:43 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-04-25 17:26:00 -0400 |
commit | 126c735d3015f515bde9f26d10b4e34d6e194e36 (patch) | |
tree | e3c034e2b854d71b356e2701382d40dce70e1a6d /drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |
parent | 6a14d980cfdce5609c0eb7b20e2da3d98fbbccb8 (diff) |
gpu: nvgpu: Move and rename gk20a_sgtable*
Move and rename the functions that build sgtables for nvgpu_mems into
the Linux specific DMA code.
One place outside of the Linux code do include the Linux DMA header.
That will be fixed in a subsequent patch.
JIRA NVGPU-12
JIRA NVGPU-30
Change-Id: Ie43c752b8f998f122af70f7c7eb727af0b0d98df
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1464078
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 73 |
1 files changed, 3 insertions, 70 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 69e00c5e..79aa44a5 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -38,6 +38,8 @@ | |||
38 | #include <nvgpu/bug.h> | 38 | #include <nvgpu/bug.h> |
39 | #include <nvgpu/log2.h> | 39 | #include <nvgpu/log2.h> |
40 | 40 | ||
41 | #include <nvgpu/linux/dma.h> | ||
42 | |||
41 | #include "gk20a.h" | 43 | #include "gk20a.h" |
42 | #include "mm_gk20a.h" | 44 | #include "mm_gk20a.h" |
43 | #include "fence_gk20a.h" | 45 | #include "fence_gk20a.h" |
@@ -2621,7 +2623,7 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work) | |||
2621 | gk20a_gmmu_clear_vidmem_mem(g, mem); | 2623 | gk20a_gmmu_clear_vidmem_mem(g, mem); |
2622 | nvgpu_free(mem->allocator, | 2624 | nvgpu_free(mem->allocator, |
2623 | (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl)); | 2625 | (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl)); |
2624 | gk20a_free_sgtable(g, &mem->priv.sgt); | 2626 | nvgpu_free_sgtable(g, &mem->priv.sgt); |
2625 | 2627 | ||
2626 | WARN_ON(atomic64_sub_return(mem->size, | 2628 | WARN_ON(atomic64_sub_return(mem->size, |
2627 | &g->mm.vidmem.bytes_pending) < 0); | 2629 | &g->mm.vidmem.bytes_pending) < 0); |
@@ -2668,75 +2670,6 @@ void gk20a_gmmu_unmap(struct vm_gk20a *vm, | |||
2668 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 2670 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2669 | } | 2671 | } |
2670 | 2672 | ||
2671 | /* get sg_table from already allocated buffer */ | ||
2672 | int gk20a_get_sgtable(struct device *d, struct sg_table **sgt, | ||
2673 | void *cpuva, u64 iova, | ||
2674 | size_t size) | ||
2675 | { | ||
2676 | struct gk20a *g = get_gk20a(d); | ||
2677 | |||
2678 | int err = 0; | ||
2679 | *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); | ||
2680 | if (!(*sgt)) { | ||
2681 | nvgpu_err(g, "failed to allocate memory\n"); | ||
2682 | err = -ENOMEM; | ||
2683 | goto fail; | ||
2684 | } | ||
2685 | err = dma_get_sgtable(d, *sgt, | ||
2686 | cpuva, iova, | ||
2687 | size); | ||
2688 | if (err) { | ||
2689 | nvgpu_err(g, "failed to create sg table\n"); | ||
2690 | goto fail; | ||
2691 | } | ||
2692 | sg_dma_address((*sgt)->sgl) = iova; | ||
2693 | |||
2694 | return 0; | ||
2695 | fail: | ||
2696 | if (*sgt) { | ||
2697 | nvgpu_kfree(g, *sgt); | ||
2698 | *sgt = NULL; | ||
2699 | } | ||
2700 | return err; | ||
2701 | } | ||
2702 | |||
2703 | int gk20a_get_sgtable_from_pages(struct device *d, struct sg_table **sgt, | ||
2704 | struct page **pages, u64 iova, | ||
2705 | size_t size) | ||
2706 | { | ||
2707 | int err = 0; | ||
2708 | struct gk20a *g = get_gk20a(d); | ||
2709 | |||
2710 | *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); | ||
2711 | if (!(*sgt)) { | ||
2712 | nvgpu_err(g, "failed to allocate memory\n"); | ||
2713 | err = -ENOMEM; | ||
2714 | goto fail; | ||
2715 | } | ||
2716 | err = sg_alloc_table_from_pages(*sgt, pages, | ||
2717 | DIV_ROUND_UP(size, PAGE_SIZE), 0, size, GFP_KERNEL); | ||
2718 | if (err) { | ||
2719 | nvgpu_err(g, "failed to allocate sg_table\n"); | ||
2720 | goto fail; | ||
2721 | } | ||
2722 | sg_dma_address((*sgt)->sgl) = iova; | ||
2723 | |||
2724 | return 0; | ||
2725 | fail: | ||
2726 | if (*sgt) { | ||
2727 | nvgpu_kfree(get_gk20a(d), *sgt); | ||
2728 | *sgt = NULL; | ||
2729 | } | ||
2730 | return err; | ||
2731 | } | ||
2732 | |||
2733 | void gk20a_free_sgtable(struct gk20a *g, struct sg_table **sgt) | ||
2734 | { | ||
2735 | sg_free_table(*sgt); | ||
2736 | nvgpu_kfree(g, *sgt); | ||
2737 | *sgt = NULL; | ||
2738 | } | ||
2739 | |||
2740 | u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova) | 2673 | u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova) |
2741 | { | 2674 | { |
2742 | /* ensure it is not vidmem allocation */ | 2675 | /* ensure it is not vidmem allocation */ |