diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-08-16 19:19:53 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-10-04 05:21:47 -0400 |
commit | edb116661348f1bc843849cdcc318fa47cf9724a (patch) | |
tree | 61d978a3518a51bdb82e2d3681abf5fc9c75821e /drivers/gpu/nvgpu/gk20a | |
parent | 2559fa295d0c478466e47496174fa2108ab01c33 (diff) |
gpu: nvgpu: rename ops.mm.get_physical_addr_bits
Rename get_physical_addr_bits and related functions to something that
more clearly conveys what they are doing. The basic idea of these
functions is to translate from a physical GPU address to a IOMMU GPU
address. To do that a particular bit (that varies from chip to chip)
is added to the physical address.
JIRA NVGPU-68
Change-Id: I536cc595c4397aad69a24f740bc74db03f52bc0a
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1542966
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 13 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 4 |
3 files changed, 3 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 8dabee63..db38fae4 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -741,7 +741,7 @@ struct gpu_ops { | |||
741 | struct nvgpu_mem *mem, int size); | 741 | struct nvgpu_mem *mem, int size); |
742 | u32 (*get_big_page_sizes)(void); | 742 | u32 (*get_big_page_sizes)(void); |
743 | u32 (*get_default_big_page_size)(void); | 743 | u32 (*get_default_big_page_size)(void); |
744 | u32 (*get_physical_addr_bits)(struct gk20a *g); | 744 | u32 (*get_iommu_bit)(struct gk20a *g); |
745 | int (*init_mm_setup_hw)(struct gk20a *g); | 745 | int (*init_mm_setup_hw)(struct gk20a *g); |
746 | bool (*is_bar1_supported)(struct gk20a *g); | 746 | bool (*is_bar1_supported)(struct gk20a *g); |
747 | int (*init_bar2_vm)(struct gk20a *g); | 747 | int (*init_bar2_vm)(struct gk20a *g); |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 3d1f8d28..795f7bda 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -1271,17 +1271,6 @@ dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr) | |||
1271 | return addr; | 1271 | return addr; |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, u64 iova) | ||
1275 | { | ||
1276 | /* ensure it is not vidmem allocation */ | ||
1277 | WARN_ON(is_vidmem_page_alloc(iova)); | ||
1278 | |||
1279 | if (nvgpu_iommuable(g) && g->ops.mm.get_physical_addr_bits) | ||
1280 | return iova | 1ULL << g->ops.mm.get_physical_addr_bits(g); | ||
1281 | |||
1282 | return iova; | ||
1283 | } | ||
1284 | |||
1285 | /* for gk20a the "video memory" apertures here are misnomers. */ | 1274 | /* for gk20a the "video memory" apertures here are misnomers. */ |
1286 | static inline u32 big_valid_pde0_bits(struct gk20a *g, | 1275 | static inline u32 big_valid_pde0_bits(struct gk20a *g, |
1287 | struct nvgpu_gmmu_pd *pd, u64 addr) | 1276 | struct nvgpu_gmmu_pd *pd, u64 addr) |
@@ -2170,7 +2159,7 @@ int gk20a_mm_suspend(struct gk20a *g) | |||
2170 | return 0; | 2159 | return 0; |
2171 | } | 2160 | } |
2172 | 2161 | ||
2173 | u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g) | 2162 | u32 gk20a_mm_get_iommu_bit(struct gk20a *g) |
2174 | { | 2163 | { |
2175 | return 34; | 2164 | return 34; |
2176 | } | 2165 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 13a3dcd0..9f03a495 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -342,8 +342,6 @@ void gk20a_mm_dump_vm(struct vm_gk20a *vm, | |||
342 | 342 | ||
343 | int gk20a_mm_suspend(struct gk20a *g); | 343 | int gk20a_mm_suspend(struct gk20a *g); |
344 | 344 | ||
345 | u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova); | ||
346 | |||
347 | void gk20a_mm_ltc_isr(struct gk20a *g); | 345 | void gk20a_mm_ltc_isr(struct gk20a *g); |
348 | 346 | ||
349 | bool gk20a_mm_mmu_debug_mode_enabled(struct gk20a *g); | 347 | bool gk20a_mm_mmu_debug_mode_enabled(struct gk20a *g); |
@@ -420,7 +418,7 @@ void pde_range_from_vaddr_range(struct vm_gk20a *vm, | |||
420 | u64 addr_lo, u64 addr_hi, | 418 | u64 addr_lo, u64 addr_hi, |
421 | u32 *pde_lo, u32 *pde_hi); | 419 | u32 *pde_lo, u32 *pde_hi); |
422 | int gk20a_mm_pde_coverage_bit_count(struct vm_gk20a *vm); | 420 | int gk20a_mm_pde_coverage_bit_count(struct vm_gk20a *vm); |
423 | u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g); | 421 | u32 gk20a_mm_get_iommu_bit(struct gk20a *g); |
424 | 422 | ||
425 | const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, | 423 | const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, |
426 | u32 big_page_size); | 424 | u32 big_page_size); |