diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-03-15 19:42:12 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-04-06 21:14:48 -0400 |
commit | b69020bff5dfa69cad926c9374cdbe9a62509ffd (patch) | |
tree | 222f6b6bc23561a38004a257cbac401e431ff3be /drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |
parent | fa4ecf5730a75269e85cc41c2ad2ee61307e72a9 (diff) |
gpu: nvgpu: Rename gk20a_mem_* functions
Rename the functions used for mem_desc access to nvgpu_mem_*.
JIRA NVGPU-12
Change-Id: Ibfdc1112d43f0a125e4487c250e3f977ffd2cd75
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1323325
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 57 |
1 files changed, 29 insertions, 28 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index e78eb941..9c9fad1b 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <nvgpu/timers.h> | 34 | #include <nvgpu/timers.h> |
35 | #include <nvgpu/pramin.h> | 35 | #include <nvgpu/pramin.h> |
36 | #include <nvgpu/list.h> | 36 | #include <nvgpu/list.h> |
37 | #include <nvgpu/mem_desc.h> | ||
37 | #include <nvgpu/allocator.h> | 38 | #include <nvgpu/allocator.h> |
38 | #include <nvgpu/semaphore.h> | 39 | #include <nvgpu/semaphore.h> |
39 | #include <nvgpu/page_allocator.h> | 40 | #include <nvgpu/page_allocator.h> |
@@ -139,7 +140,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm, | |||
139 | bool umapped_pte, int rw_flag, | 140 | bool umapped_pte, int rw_flag, |
140 | bool sparse, | 141 | bool sparse, |
141 | bool priv, | 142 | bool priv, |
142 | enum gk20a_aperture aperture); | 143 | enum nvgpu_aperture aperture); |
143 | static int __must_check gk20a_init_system_vm(struct mm_gk20a *mm); | 144 | static int __must_check gk20a_init_system_vm(struct mm_gk20a *mm); |
144 | static int __must_check gk20a_init_bar1_vm(struct mm_gk20a *mm); | 145 | static int __must_check gk20a_init_bar1_vm(struct mm_gk20a *mm); |
145 | static int __must_check gk20a_init_hwpm(struct mm_gk20a *mm); | 146 | static int __must_check gk20a_init_hwpm(struct mm_gk20a *mm); |
@@ -945,7 +946,7 @@ int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) | |||
945 | sg_phys(entry->mem.sgt->sgl), | 946 | sg_phys(entry->mem.sgt->sgl), |
946 | entry->mem.size); | 947 | entry->mem.size); |
947 | } else { | 948 | } else { |
948 | int err = gk20a_mem_begin(g, &entry->mem); | 949 | int err = nvgpu_mem_begin(g, &entry->mem); |
949 | 950 | ||
950 | if (err) | 951 | if (err) |
951 | return err; | 952 | return err; |
@@ -971,7 +972,7 @@ void unmap_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) | |||
971 | sg_phys(entry->mem.sgt->sgl), | 972 | sg_phys(entry->mem.sgt->sgl), |
972 | entry->mem.size); | 973 | entry->mem.size); |
973 | } else { | 974 | } else { |
974 | gk20a_mem_end(g, &entry->mem); | 975 | nvgpu_mem_end(g, &entry->mem); |
975 | } | 976 | } |
976 | } | 977 | } |
977 | 978 | ||
@@ -1510,7 +1511,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, | |||
1510 | bool sparse, | 1511 | bool sparse, |
1511 | bool priv, | 1512 | bool priv, |
1512 | struct vm_gk20a_mapping_batch *batch, | 1513 | struct vm_gk20a_mapping_batch *batch, |
1513 | enum gk20a_aperture aperture) | 1514 | enum nvgpu_aperture aperture) |
1514 | { | 1515 | { |
1515 | int err = 0; | 1516 | int err = 0; |
1516 | bool allocated = false; | 1517 | bool allocated = false; |
@@ -1543,7 +1544,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, | |||
1543 | sgt ? lo32((u64)sg_phys(sgt->sgl)) : 0, | 1544 | sgt ? lo32((u64)sg_phys(sgt->sgl)) : 0, |
1544 | vm->gmmu_page_sizes[pgsz_idx] >> 10, vm_aspace_id(vm), | 1545 | vm->gmmu_page_sizes[pgsz_idx] >> 10, vm_aspace_id(vm), |
1545 | ctag_lines, ctag_offset, | 1546 | ctag_lines, ctag_offset, |
1546 | kind_v, flags, gk20a_aperture_str(aperture)); | 1547 | kind_v, flags, nvgpu_aperture_str(aperture)); |
1547 | 1548 | ||
1548 | err = update_gmmu_ptes_locked(vm, pgsz_idx, | 1549 | err = update_gmmu_ptes_locked(vm, pgsz_idx, |
1549 | sgt, | 1550 | sgt, |
@@ -1634,7 +1635,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, | |||
1634 | } | 1635 | } |
1635 | } | 1636 | } |
1636 | 1637 | ||
1637 | static enum gk20a_aperture gk20a_dmabuf_aperture(struct gk20a *g, | 1638 | static enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, |
1638 | struct dma_buf *dmabuf) | 1639 | struct dma_buf *dmabuf) |
1639 | { | 1640 | { |
1640 | struct gk20a *buf_owner = gk20a_vidmem_buf_owner(dmabuf); | 1641 | struct gk20a *buf_owner = gk20a_vidmem_buf_owner(dmabuf); |
@@ -1723,7 +1724,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm, | |||
1723 | vm_aspace_id(vm), | 1724 | vm_aspace_id(vm), |
1724 | mapped_buffer->ctag_lines, mapped_buffer->ctag_offset, | 1725 | mapped_buffer->ctag_lines, mapped_buffer->ctag_offset, |
1725 | mapped_buffer->flags, | 1726 | mapped_buffer->flags, |
1726 | gk20a_aperture_str(gk20a_dmabuf_aperture(g, dmabuf))); | 1727 | nvgpu_aperture_str(gk20a_dmabuf_aperture(g, dmabuf))); |
1727 | 1728 | ||
1728 | if (sgt) | 1729 | if (sgt) |
1729 | *sgt = mapped_buffer->sgt; | 1730 | *sgt = mapped_buffer->sgt; |
@@ -1941,11 +1942,11 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf, | |||
1941 | 1942 | ||
1942 | switch (cmd) { | 1943 | switch (cmd) { |
1943 | case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ: | 1944 | case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ: |
1944 | gk20a_mem_rd_n(g, mem, offset, buffer, size); | 1945 | nvgpu_mem_rd_n(g, mem, offset, buffer, size); |
1945 | break; | 1946 | break; |
1946 | 1947 | ||
1947 | case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE: | 1948 | case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE: |
1948 | gk20a_mem_wr_n(g, mem, offset, buffer, size); | 1949 | nvgpu_mem_wr_n(g, mem, offset, buffer, size); |
1949 | break; | 1950 | break; |
1950 | 1951 | ||
1951 | default: | 1952 | default: |
@@ -1959,7 +1960,7 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf, | |||
1959 | } | 1960 | } |
1960 | 1961 | ||
1961 | static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl, | 1962 | static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl, |
1962 | enum gk20a_aperture aperture) | 1963 | enum nvgpu_aperture aperture) |
1963 | { | 1964 | { |
1964 | u64 align = 0, chunk_align = 0; | 1965 | u64 align = 0, chunk_align = 0; |
1965 | u64 buf_addr; | 1966 | u64 buf_addr; |
@@ -2030,7 +2031,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, | |||
2030 | u32 ctag_map_win_ctagline = 0; | 2031 | u32 ctag_map_win_ctagline = 0; |
2031 | struct vm_reserved_va_node *va_node = NULL; | 2032 | struct vm_reserved_va_node *va_node = NULL; |
2032 | u32 ctag_offset; | 2033 | u32 ctag_offset; |
2033 | enum gk20a_aperture aperture; | 2034 | enum nvgpu_aperture aperture; |
2034 | 2035 | ||
2035 | if (user_mapped && vm->userspace_managed && | 2036 | if (user_mapped && vm->userspace_managed && |
2036 | !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { | 2037 | !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { |
@@ -2462,7 +2463,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm, | |||
2462 | u32 flags, | 2463 | u32 flags, |
2463 | int rw_flag, | 2464 | int rw_flag, |
2464 | bool priv, | 2465 | bool priv, |
2465 | enum gk20a_aperture aperture) | 2466 | enum nvgpu_aperture aperture) |
2466 | { | 2467 | { |
2467 | struct gk20a *g = gk20a_from_vm(vm); | 2468 | struct gk20a *g = gk20a_from_vm(vm); |
2468 | u64 vaddr; | 2469 | u64 vaddr; |
@@ -2496,7 +2497,7 @@ u64 gk20a_gmmu_map(struct vm_gk20a *vm, | |||
2496 | u32 flags, | 2497 | u32 flags, |
2497 | int rw_flag, | 2498 | int rw_flag, |
2498 | bool priv, | 2499 | bool priv, |
2499 | enum gk20a_aperture aperture) | 2500 | enum nvgpu_aperture aperture) |
2500 | { | 2501 | { |
2501 | return __gk20a_gmmu_map(vm, sgt, 0, size, flags, rw_flag, priv, | 2502 | return __gk20a_gmmu_map(vm, sgt, 0, size, flags, rw_flag, priv, |
2502 | aperture); | 2503 | aperture); |
@@ -2512,7 +2513,7 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm, | |||
2512 | u32 flags, | 2513 | u32 flags, |
2513 | int rw_flag, | 2514 | int rw_flag, |
2514 | bool priv, | 2515 | bool priv, |
2515 | enum gk20a_aperture aperture) | 2516 | enum nvgpu_aperture aperture) |
2516 | { | 2517 | { |
2517 | return __gk20a_gmmu_map(vm, sgt, addr, size, flags, rw_flag, priv, | 2518 | return __gk20a_gmmu_map(vm, sgt, addr, size, flags, rw_flag, priv, |
2518 | aperture); | 2519 | aperture); |
@@ -2851,7 +2852,7 @@ static void gk20a_gmmu_free_vid(struct gk20a *g, struct mem_desc *mem) | |||
2851 | schedule_work(&g->mm.vidmem.clear_mem_worker); | 2852 | schedule_work(&g->mm.vidmem.clear_mem_worker); |
2852 | } | 2853 | } |
2853 | } else { | 2854 | } else { |
2854 | gk20a_memset(g, mem, 0, 0, mem->size); | 2855 | nvgpu_memset(g, mem, 0, 0, mem->size); |
2855 | nvgpu_free(mem->allocator, | 2856 | nvgpu_free(mem->allocator, |
2856 | (u64)get_vidmem_page_alloc(mem->sgt->sgl)); | 2857 | (u64)get_vidmem_page_alloc(mem->sgt->sgl)); |
2857 | gk20a_free_sgtable(g, &mem->sgt); | 2858 | gk20a_free_sgtable(g, &mem->sgt); |
@@ -3170,7 +3171,7 @@ u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl, | |||
3170 | void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry, | 3171 | void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry, |
3171 | size_t w, size_t data) | 3172 | size_t w, size_t data) |
3172 | { | 3173 | { |
3173 | gk20a_mem_wr32(g, &entry->mem, entry->woffset + w, data); | 3174 | nvgpu_mem_wr32(g, &entry->mem, entry->woffset + w, data); |
3174 | } | 3175 | } |
3175 | 3176 | ||
3176 | u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry) | 3177 | u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry) |
@@ -3191,7 +3192,7 @@ static inline u32 big_valid_pde0_bits(struct gk20a *g, | |||
3191 | { | 3192 | { |
3192 | u64 pte_addr = gk20a_pde_addr(g, entry); | 3193 | u64 pte_addr = gk20a_pde_addr(g, entry); |
3193 | u32 pde0_bits = | 3194 | u32 pde0_bits = |
3194 | gk20a_aperture_mask(g, &entry->mem, | 3195 | nvgpu_aperture_mask(g, &entry->mem, |
3195 | gmmu_pde_aperture_big_sys_mem_ncoh_f(), | 3196 | gmmu_pde_aperture_big_sys_mem_ncoh_f(), |
3196 | gmmu_pde_aperture_big_video_memory_f()) | | 3197 | gmmu_pde_aperture_big_video_memory_f()) | |
3197 | gmmu_pde_address_big_sys_f( | 3198 | gmmu_pde_address_big_sys_f( |
@@ -3205,7 +3206,7 @@ static inline u32 small_valid_pde1_bits(struct gk20a *g, | |||
3205 | { | 3206 | { |
3206 | u64 pte_addr = gk20a_pde_addr(g, entry); | 3207 | u64 pte_addr = gk20a_pde_addr(g, entry); |
3207 | u32 pde1_bits = | 3208 | u32 pde1_bits = |
3208 | gk20a_aperture_mask(g, &entry->mem, | 3209 | nvgpu_aperture_mask(g, &entry->mem, |
3209 | gmmu_pde_aperture_small_sys_mem_ncoh_f(), | 3210 | gmmu_pde_aperture_small_sys_mem_ncoh_f(), |
3210 | gmmu_pde_aperture_small_video_memory_f()) | | 3211 | gmmu_pde_aperture_small_video_memory_f()) | |
3211 | gmmu_pde_vol_small_true_f() | /* tbd: why? */ | 3212 | gmmu_pde_vol_small_true_f() | /* tbd: why? */ |
@@ -3230,7 +3231,7 @@ static int update_gmmu_pde_locked(struct vm_gk20a *vm, | |||
3230 | u32 kind_v, u64 *ctag, | 3231 | u32 kind_v, u64 *ctag, |
3231 | bool cacheable, bool unammped_pte, | 3232 | bool cacheable, bool unammped_pte, |
3232 | int rw_flag, bool sparse, bool priv, | 3233 | int rw_flag, bool sparse, bool priv, |
3233 | enum gk20a_aperture aperture) | 3234 | enum nvgpu_aperture aperture) |
3234 | { | 3235 | { |
3235 | struct gk20a *g = gk20a_from_vm(vm); | 3236 | struct gk20a *g = gk20a_from_vm(vm); |
3236 | bool small_valid, big_valid; | 3237 | bool small_valid, big_valid; |
@@ -3275,7 +3276,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm, | |||
3275 | u32 kind_v, u64 *ctag, | 3276 | u32 kind_v, u64 *ctag, |
3276 | bool cacheable, bool unmapped_pte, | 3277 | bool cacheable, bool unmapped_pte, |
3277 | int rw_flag, bool sparse, bool priv, | 3278 | int rw_flag, bool sparse, bool priv, |
3278 | enum gk20a_aperture aperture) | 3279 | enum nvgpu_aperture aperture) |
3279 | { | 3280 | { |
3280 | struct gk20a *g = gk20a_from_vm(vm); | 3281 | struct gk20a *g = gk20a_from_vm(vm); |
3281 | int ctag_shift = ilog2(g->ops.fb.compression_page_size(g)); | 3282 | int ctag_shift = ilog2(g->ops.fb.compression_page_size(g)); |
@@ -3296,7 +3297,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm, | |||
3296 | if (priv) | 3297 | if (priv) |
3297 | pte_w[0] |= gmmu_pte_privilege_true_f(); | 3298 | pte_w[0] |= gmmu_pte_privilege_true_f(); |
3298 | 3299 | ||
3299 | pte_w[1] = __gk20a_aperture_mask(g, aperture, | 3300 | pte_w[1] = __nvgpu_aperture_mask(g, aperture, |
3300 | gmmu_pte_aperture_sys_mem_ncoh_f(), | 3301 | gmmu_pte_aperture_sys_mem_ncoh_f(), |
3301 | gmmu_pte_aperture_video_memory_f()) | | 3302 | gmmu_pte_aperture_video_memory_f()) | |
3302 | gmmu_pte_kind_f(kind_v) | | 3303 | gmmu_pte_kind_f(kind_v) | |
@@ -3379,7 +3380,7 @@ static int update_gmmu_level_locked(struct vm_gk20a *vm, | |||
3379 | bool sparse, | 3380 | bool sparse, |
3380 | int lvl, | 3381 | int lvl, |
3381 | bool priv, | 3382 | bool priv, |
3382 | enum gk20a_aperture aperture) | 3383 | enum nvgpu_aperture aperture) |
3383 | { | 3384 | { |
3384 | struct gk20a *g = gk20a_from_vm(vm); | 3385 | struct gk20a *g = gk20a_from_vm(vm); |
3385 | const struct gk20a_mmu_level *l = &vm->mmu_levels[lvl]; | 3386 | const struct gk20a_mmu_level *l = &vm->mmu_levels[lvl]; |
@@ -3477,7 +3478,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm, | |||
3477 | int rw_flag, | 3478 | int rw_flag, |
3478 | bool sparse, | 3479 | bool sparse, |
3479 | bool priv, | 3480 | bool priv, |
3480 | enum gk20a_aperture aperture) | 3481 | enum nvgpu_aperture aperture) |
3481 | { | 3482 | { |
3482 | struct gk20a *g = gk20a_from_vm(vm); | 3483 | struct gk20a *g = gk20a_from_vm(vm); |
3483 | int ctag_granularity = g->ops.fb.compression_page_size(g); | 3484 | int ctag_granularity = g->ops.fb.compression_page_size(g); |
@@ -4735,14 +4736,14 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block, | |||
4735 | 4736 | ||
4736 | gk20a_dbg_info("pde pa=0x%llx", pdb_addr); | 4737 | gk20a_dbg_info("pde pa=0x%llx", pdb_addr); |
4737 | 4738 | ||
4738 | gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), | 4739 | nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), |
4739 | gk20a_aperture_mask(g, &vm->pdb.mem, | 4740 | nvgpu_aperture_mask(g, &vm->pdb.mem, |
4740 | ram_in_page_dir_base_target_sys_mem_ncoh_f(), | 4741 | ram_in_page_dir_base_target_sys_mem_ncoh_f(), |
4741 | ram_in_page_dir_base_target_vid_mem_f()) | | 4742 | ram_in_page_dir_base_target_vid_mem_f()) | |
4742 | ram_in_page_dir_base_vol_true_f() | | 4743 | ram_in_page_dir_base_vol_true_f() | |
4743 | ram_in_page_dir_base_lo_f(pdb_addr_lo)); | 4744 | ram_in_page_dir_base_lo_f(pdb_addr_lo)); |
4744 | 4745 | ||
4745 | gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(), | 4746 | nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(), |
4746 | ram_in_page_dir_base_hi_f(pdb_addr_hi)); | 4747 | ram_in_page_dir_base_hi_f(pdb_addr_hi)); |
4747 | } | 4748 | } |
4748 | 4749 | ||
@@ -4756,10 +4757,10 @@ void gk20a_init_inst_block(struct mem_desc *inst_block, struct vm_gk20a *vm, | |||
4756 | 4757 | ||
4757 | g->ops.mm.init_pdb(g, inst_block, vm); | 4758 | g->ops.mm.init_pdb(g, inst_block, vm); |
4758 | 4759 | ||
4759 | gk20a_mem_wr32(g, inst_block, ram_in_adr_limit_lo_w(), | 4760 | nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_lo_w(), |
4760 | u64_lo32(vm->va_limit - 1) & ~0xfff); | 4761 | u64_lo32(vm->va_limit - 1) & ~0xfff); |
4761 | 4762 | ||
4762 | gk20a_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(), | 4763 | nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(), |
4763 | ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1))); | 4764 | ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1))); |
4764 | 4765 | ||
4765 | if (big_page_size && g->ops.mm.set_big_page_size) | 4766 | if (big_page_size && g->ops.mm.set_big_page_size) |