summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vgpu/gv11b/vgpu_fifo_gv11b.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2018-01-16 06:07:37 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-02-07 18:35:47 -0500
commitf0cbe19b12524f5df6466eaf86acbfb349def6b1 (patch)
treebed8a312e29592d41d9de4afb331756c2d38fb96 /drivers/gpu/nvgpu/common/linux/vgpu/gv11b/vgpu_fifo_gv11b.c
parent0c8deb74aff6d0781cdf3278f56d7bce42b16a67 (diff)
gpu: nvgpu: add user API to get read-only syncpoint address map
Add User space API NVGPU_AS_IOCTL_GET_SYNC_RO_MAP to get read-only syncpoint address map in user space We already map whole syncpoint shim to each address space with base address being vm->syncpt_ro_map_gpu_va This new API exposes this base GPU_VA address of syncpoint map, and unit size of each syncpoint to user space. User space can then calculate address of each syncpoint as syncpoint_address = base_gpu_va + (syncpoint_id * syncpoint_unit_size) Note that this syncpoint address is read_only, and should be only used for inserting semaphore acquires. Adding semaphore release with this address would result in MMU_FAULT Define new HAL g->ops.fifo.get_sync_ro_map and set this for all GPUs supported on Xavier SoC Bug 200327559 Change-Id: Ica0db48fc28fdd0ff2a5eb09574dac843dc5e4fd Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1649365 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/gv11b/vgpu_fifo_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gv11b/vgpu_fifo_gv11b.c93
1 files changed, 64 insertions, 29 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gv11b/vgpu_fifo_gv11b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gv11b/vgpu_fifo_gv11b.c
index 134ca67a..af25e486 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gv11b/vgpu_fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/gv11b/vgpu_fifo_gv11b.c
@@ -23,12 +23,52 @@
23#include <linux/tegra_vgpu.h> 23#include <linux/tegra_vgpu.h>
24 24
25#ifdef CONFIG_TEGRA_GK20A_NVHOST 25#ifdef CONFIG_TEGRA_GK20A_NVHOST
26
27static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
28{
29 int err;
30 struct gk20a *g = gk20a_from_vm(vm);
31 struct tegra_vgpu_cmd_msg msg = {};
32 struct tegra_vgpu_map_syncpt_params *p = &msg.params.map_syncpt;
33
34 if (vm->syncpt_ro_map_gpu_va)
35 return 0;
36
37 vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm,
38 g->syncpt_unit_size,
39 gmmu_page_size_kernel);
40 if (!vm->syncpt_ro_map_gpu_va) {
41 nvgpu_err(g, "allocating read-only va space failed");
42 return -ENOMEM;
43 }
44
45 msg.cmd = TEGRA_VGPU_CMD_MAP_SYNCPT;
46 msg.handle = vgpu_get_handle(g);
47 p->as_handle = vm->handle;
48 p->gpu_va = vm->syncpt_ro_map_gpu_va;
49 p->len = g->syncpt_unit_size;
50 p->offset = 0;
51 p->prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
52 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
53 err = err ? err : msg.ret;
54 if (err) {
55 nvgpu_err(g,
56 "mapping read-only va space failed err %d",
57 err);
58 __nvgpu_vm_free_va(vm, vm->syncpt_ro_map_gpu_va,
59 gmmu_page_size_kernel);
60 vm->syncpt_ro_map_gpu_va = 0;
61 return err;
62 }
63
64 return 0;
65}
66
26int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c, 67int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
27 u32 syncpt_id, struct nvgpu_mem *syncpt_buf) 68 u32 syncpt_id, struct nvgpu_mem *syncpt_buf)
28{ 69{
29 int err; 70 int err;
30 struct gk20a *g = c->g; 71 struct gk20a *g = c->g;
31 struct vm_gk20a *vm = c->vm;
32 struct tegra_vgpu_cmd_msg msg = {}; 72 struct tegra_vgpu_cmd_msg msg = {};
33 struct tegra_vgpu_map_syncpt_params *p = &msg.params.map_syncpt; 73 struct tegra_vgpu_map_syncpt_params *p = &msg.params.map_syncpt;
34 74
@@ -37,34 +77,11 @@ int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
37 * All channels sharing same vm will share same ro mapping. 77 * All channels sharing same vm will share same ro mapping.
38 * Create rw map for current channel sync point. 78 * Create rw map for current channel sync point.
39 */ 79 */
40 if (!vm->syncpt_ro_map_gpu_va) { 80 nvgpu_mutex_acquire(&c->vm->syncpt_ro_map_lock);
41 vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm, 81 err = set_syncpt_ro_map_gpu_va_locked(c->vm);
42 g->syncpt_unit_size, 82 nvgpu_mutex_release(&c->vm->syncpt_ro_map_lock);
43 gmmu_page_size_kernel); 83 if (err)
44 if (!vm->syncpt_ro_map_gpu_va) { 84 return err;
45 nvgpu_err(g, "allocating read-only va space failed");
46 return -ENOMEM;
47 }
48
49 msg.cmd = TEGRA_VGPU_CMD_MAP_SYNCPT;
50 msg.handle = vgpu_get_handle(g);
51 p->as_handle = c->vm->handle;
52 p->gpu_va = vm->syncpt_ro_map_gpu_va;
53 p->len = g->syncpt_unit_size;
54 p->offset = 0;
55 p->prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
56 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
57 err = err ? err : msg.ret;
58 if (err) {
59 nvgpu_err(g,
60 "mapping read-only va space failed err %d",
61 err);
62 __nvgpu_vm_free_va(c->vm, vm->syncpt_ro_map_gpu_va,
63 gmmu_page_size_kernel);
64 vm->syncpt_ro_map_gpu_va = 0;
65 return err;
66 }
67 }
68 85
69 syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size, 86 syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size,
70 gmmu_page_size_kernel); 87 gmmu_page_size_kernel);
@@ -92,6 +109,24 @@ int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
92 109
93 return 0; 110 return 0;
94} 111}
112
113int vgpu_gv11b_fifo_get_sync_ro_map(struct vm_gk20a *vm,
114 u64 *base_gpuva, u32 *sync_size)
115{
116 struct gk20a *g = gk20a_from_vm(vm);
117 int err;
118
119 nvgpu_mutex_acquire(&vm->syncpt_ro_map_lock);
120 err = set_syncpt_ro_map_gpu_va_locked(vm);
121 nvgpu_mutex_release(&vm->syncpt_ro_map_lock);
122 if (err)
123 return err;
124
125 *base_gpuva = vm->syncpt_ro_map_gpu_va;
126 *sync_size = g->syncpt_size;
127
128 return 0;
129}
95#endif /* CONFIG_TEGRA_GK20A_NVHOST */ 130#endif /* CONFIG_TEGRA_GK20A_NVHOST */
96 131
97int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g) 132int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g)