summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c37
1 files changed, 18 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 7660c949..37813ad3 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/mm_gk20a.c
3 *
4 * GK20A memory management 2 * GK20A memory management
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -14,9 +12,8 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 13 * more details.
16 * 14 *
17 * You should have received a copy of the GNU General Public License along with 15 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 17 */
21 18
22#include <linux/delay.h> 19#include <linux/delay.h>
@@ -29,6 +26,7 @@
29#include <linux/tegra-soc.h> 26#include <linux/tegra-soc.h>
30#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
31#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29#include <linux/nvhost_as_ioctl.h>
32 30
33#include "gk20a.h" 31#include "gk20a.h"
34#include "mm_gk20a.h" 32#include "mm_gk20a.h"
@@ -42,6 +40,7 @@
42#include "hw_ltc_gk20a.h" 40#include "hw_ltc_gk20a.h"
43 41
44#include "kind_gk20a.h" 42#include "kind_gk20a.h"
43#include "semaphore_gk20a.h"
45 44
46/* 45/*
47 * GPU mapping life cycle 46 * GPU mapping life cycle
@@ -819,7 +818,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset)
819 return; 818 return;
820 } 819 }
821 820
822 if (mapped_buffer->flags & NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 821 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
823 mutex_unlock(&vm->update_gmmu_lock); 822 mutex_unlock(&vm->update_gmmu_lock);
824 823
825 if (tegra_platform_is_silicon()) 824 if (tegra_platform_is_silicon())
@@ -1175,7 +1174,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1175 kind_v, 1174 kind_v,
1176 ctag_offset, 1175 ctag_offset,
1177 flags & 1176 flags &
1178 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1177 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
1179 rw_flag); 1178 rw_flag);
1180 if (err) { 1179 if (err) {
1181 gk20a_err(d, "failed to update ptes on map"); 1180 gk20a_err(d, "failed to update ptes on map");
@@ -1256,7 +1255,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
1256 if (mapped_buffer->flags != flags) 1255 if (mapped_buffer->flags != flags)
1257 return 0; 1256 return 0;
1258 1257
1259 if (flags & NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET && 1258 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET &&
1260 mapped_buffer->addr != offset_align) 1259 mapped_buffer->addr != offset_align)
1261 return 0; 1260 return 0;
1262 1261
@@ -1303,7 +1302,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
1303u64 gk20a_vm_map(struct vm_gk20a *vm, 1302u64 gk20a_vm_map(struct vm_gk20a *vm,
1304 struct dma_buf *dmabuf, 1303 struct dma_buf *dmabuf,
1305 u64 offset_align, 1304 u64 offset_align,
1306 u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/, 1305 u32 flags /*NVGPU_AS_MAP_BUFFER_FLAGS_*/,
1307 int kind, 1306 int kind,
1308 struct sg_table **sgt, 1307 struct sg_table **sgt,
1309 bool user_mapped, 1308 bool user_mapped,
@@ -1364,7 +1363,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
1364 1363
1365 /* If FIX_OFFSET is set, pgsz is determined. Otherwise, select 1364 /* If FIX_OFFSET is set, pgsz is determined. Otherwise, select
1366 * page size according to memory alignment */ 1365 * page size according to memory alignment */
1367 if (flags & NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 1366 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
1368 bfr.pgsz_idx = NV_GMMU_VA_IS_UPPER(offset_align) ? 1367 bfr.pgsz_idx = NV_GMMU_VA_IS_UPPER(offset_align) ?
1369 gmmu_page_size_big : gmmu_page_size_small; 1368 gmmu_page_size_big : gmmu_page_size_small;
1370 } else { 1369 } else {
@@ -1390,7 +1389,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
1390 1389
1391 /* Check if we should use a fixed offset for mapping this buffer */ 1390 /* Check if we should use a fixed offset for mapping this buffer */
1392 1391
1393 if (flags & NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 1392 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
1394 err = validate_fixed_buffer(vm, &bfr, 1393 err = validate_fixed_buffer(vm, &bfr,
1395 offset_align, mapping_size); 1394 offset_align, mapping_size);
1396 if (err) 1395 if (err)
@@ -1996,7 +1995,7 @@ static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr,
1996 for (i = 0; i < num_pages; i++) { 1995 for (i = 0; i < num_pages; i++) {
1997 u64 page_vaddr = g->ops.mm.gmmu_map(vm, vaddr, 1996 u64 page_vaddr = g->ops.mm.gmmu_map(vm, vaddr,
1998 vm->zero_page_sgt, 0, pgsz, pgsz_idx, 0, 0, 1997 vm->zero_page_sgt, 0, pgsz, pgsz_idx, 0, 0,
1999 NVHOST_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET, 1998 NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET,
2000 gk20a_mem_flag_none, false); 1999 gk20a_mem_flag_none, false);
2001 2000
2002 if (!page_vaddr) { 2001 if (!page_vaddr) {
@@ -2322,7 +2321,7 @@ int gk20a_vm_release_share(struct gk20a_as_share *as_share)
2322 2321
2323 2322
2324int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, 2323int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
2325 struct nvhost_as_alloc_space_args *args) 2324 struct nvgpu_as_alloc_space_args *args)
2326 2325
2327{ int err = -ENOMEM; 2326{ int err = -ENOMEM;
2328 int pgsz_idx; 2327 int pgsz_idx;
@@ -2356,7 +2355,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
2356 goto clean_up; 2355 goto clean_up;
2357 } 2356 }
2358 2357
2359 if (args->flags & NVHOST_AS_ALLOC_SPACE_FLAGS_SPARSE && 2358 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE &&
2360 pgsz_idx != gmmu_page_size_big) { 2359 pgsz_idx != gmmu_page_size_big) {
2361 err = -ENOSYS; 2360 err = -ENOSYS;
2362 kfree(va_node); 2361 kfree(va_node);
@@ -2364,7 +2363,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
2364 } 2363 }
2365 2364
2366 start_page_nr = 0; 2365 start_page_nr = 0;
2367 if (args->flags & NVHOST_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) 2366 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET)
2368 start_page_nr = (u32)(args->o_a.offset >> 2367 start_page_nr = (u32)(args->o_a.offset >>
2369 gmmu_page_shifts[pgsz_idx]); 2368 gmmu_page_shifts[pgsz_idx]);
2370 2369
@@ -2386,7 +2385,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
2386 mutex_lock(&vm->update_gmmu_lock); 2385 mutex_lock(&vm->update_gmmu_lock);
2387 2386
2388 /* mark that we need to use sparse mappings here */ 2387 /* mark that we need to use sparse mappings here */
2389 if (args->flags & NVHOST_AS_ALLOC_SPACE_FLAGS_SPARSE) { 2388 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE) {
2390 err = g->ops.mm.set_sparse(vm, vaddr_start, args->pages, 2389 err = g->ops.mm.set_sparse(vm, vaddr_start, args->pages,
2391 pgsz_idx, true); 2390 pgsz_idx, true);
2392 if (err) { 2391 if (err) {
@@ -2409,7 +2408,7 @@ clean_up:
2409} 2408}
2410 2409
2411int gk20a_vm_free_space(struct gk20a_as_share *as_share, 2410int gk20a_vm_free_space(struct gk20a_as_share *as_share,
2412 struct nvhost_as_free_space_args *args) 2411 struct nvgpu_as_free_space_args *args)
2413{ 2412{
2414 int err = -ENOMEM; 2413 int err = -ENOMEM;
2415 int pgsz_idx; 2414 int pgsz_idx;
@@ -2580,7 +2579,7 @@ static int gk20a_dmabuf_get_kind(struct dma_buf *dmabuf)
2580int gk20a_vm_map_buffer(struct gk20a_as_share *as_share, 2579int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
2581 int dmabuf_fd, 2580 int dmabuf_fd,
2582 u64 *offset_align, 2581 u64 *offset_align,
2583 u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/ 2582 u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/
2584 int kind, 2583 int kind,
2585 u64 buffer_offset, 2584 u64 buffer_offset,
2586 u64 mapping_size) 2585 u64 mapping_size)
@@ -3147,7 +3146,7 @@ bool gk20a_mm_mmu_debug_mode_enabled(struct gk20a *g)
3147 3146
3148void gk20a_init_mm(struct gpu_ops *gops) 3147void gk20a_init_mm(struct gpu_ops *gops)
3149{ 3148{
3150 /* remember to remove NVHOST_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS in 3149 /* remember to remove NVGPU_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS in
3151 * characteristics flags if sparse support is removed */ 3150 * characteristics flags if sparse support is removed */
3152 gops->mm.set_sparse = gk20a_vm_put_sparse; 3151 gops->mm.set_sparse = gk20a_vm_put_sparse;
3153 gops->mm.put_empty = gk20a_vm_put_empty; 3152 gops->mm.put_empty = gk20a_vm_put_empty;