summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c11
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c40
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h3
-rw-r--r--include/uapi/linux/nvgpu.h20
4 files changed, 72 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
index 3b5ca298..0b6b5913 100644
--- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
@@ -514,6 +514,13 @@ static int gk20a_ctrl_vsm_mapping(struct gk20a *g,
514 return err; 514 return err;
515} 515}
516 516
517static int gk20a_ctrl_get_buffer_info(
518 struct gk20a *g, struct nvgpu_gpu_get_buffer_info_args *args)
519{
520 return gk20a_mm_get_buffer_info(dev_from_gk20a(g), args->in.dmabuf_fd,
521 &args->out.id, &args->out.length);
522}
523
517long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 524long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
518{ 525{
519 struct platform_device *dev = filp->private_data; 526 struct platform_device *dev = filp->private_data;
@@ -729,6 +736,10 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
729 (struct nvgpu_gpu_vsms_mapping *)buf); 736 (struct nvgpu_gpu_vsms_mapping *)buf);
730 break; 737 break;
731 738
739 case NVGPU_GPU_IOCTL_GET_BUFFER_INFO:
740 err = gk20a_ctrl_get_buffer_info(g,
741 (struct nvgpu_gpu_get_buffer_info_args *)buf);
742 break;
732 743
733 default: 744 default:
734 dev_dbg(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd); 745 dev_dbg(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd);
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 1bc35597..141a37af 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -120,6 +120,8 @@ struct gk20a_dmabuf_priv {
120 int pin_count; 120 int pin_count;
121 121
122 struct list_head states; 122 struct list_head states;
123
124 u64 buffer_id;
123}; 125};
124 126
125static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm); 127static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm);
@@ -3044,6 +3046,7 @@ int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev)
3044{ 3046{
3045 struct gk20a_dmabuf_priv *priv; 3047 struct gk20a_dmabuf_priv *priv;
3046 static DEFINE_MUTEX(priv_lock); 3048 static DEFINE_MUTEX(priv_lock);
3049 static u64 priv_count = 0;
3047 3050
3048 priv = dma_buf_get_drvdata(dmabuf, dev); 3051 priv = dma_buf_get_drvdata(dmabuf, dev);
3049 if (likely(priv)) 3052 if (likely(priv))
@@ -3060,6 +3063,7 @@ int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev)
3060 } 3063 }
3061 mutex_init(&priv->lock); 3064 mutex_init(&priv->lock);
3062 INIT_LIST_HEAD(&priv->states); 3065 INIT_LIST_HEAD(&priv->states);
3066 priv->buffer_id = ++priv_count;
3063 dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv); 3067 dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv);
3064priv_exist_or_err: 3068priv_exist_or_err:
3065 mutex_unlock(&priv_lock); 3069 mutex_unlock(&priv_lock);
@@ -3145,8 +3149,11 @@ int gk20a_vm_map_buffer(struct vm_gk20a *vm,
3145 3149
3146 /* get ref to the mem handle (released on unmap_locked) */ 3150 /* get ref to the mem handle (released on unmap_locked) */
3147 dmabuf = dma_buf_get(dmabuf_fd); 3151 dmabuf = dma_buf_get(dmabuf_fd);
3148 if (IS_ERR(dmabuf)) 3152 if (IS_ERR(dmabuf)) {
3153 dev_warn(dev_from_vm(vm), "%s: fd %d is not a dmabuf",
3154 __func__, dmabuf_fd);
3149 return PTR_ERR(dmabuf); 3155 return PTR_ERR(dmabuf);
3156 }
3150 3157
3151 err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev_from_vm(vm)); 3158 err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev_from_vm(vm));
3152 if (err) { 3159 if (err) {
@@ -3653,6 +3660,37 @@ const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
3653 gk20a_mm_levels_64k : gk20a_mm_levels_128k; 3660 gk20a_mm_levels_64k : gk20a_mm_levels_128k;
3654} 3661}
3655 3662
3663int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd,
3664 u64 *buffer_id, u64 *buffer_len)
3665{
3666 struct dma_buf *dmabuf;
3667 struct gk20a_dmabuf_priv *priv;
3668 int err = 0;
3669
3670 dmabuf = dma_buf_get(dmabuf_fd);
3671 if (IS_ERR(dmabuf)) {
3672 dev_warn(dev, "%s: fd %d is not a dmabuf", __func__, dmabuf_fd);
3673 return PTR_ERR(dmabuf);
3674 }
3675
3676 err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev);
3677 if (err) {
3678 dev_warn(dev, "Failed to allocate dmabuf drvdata (err = %d)",
3679 err);
3680 goto clean_up;
3681 }
3682
3683 priv = dma_buf_get_drvdata(dmabuf, dev);
3684 if (likely(priv)) {
3685 *buffer_id = priv->buffer_id;
3686 *buffer_len = dmabuf->size;
3687 }
3688
3689clean_up:
3690 dma_buf_put(dmabuf);
3691 return err;
3692}
3693
3656void gk20a_init_mm(struct gpu_ops *gops) 3694void gk20a_init_mm(struct gpu_ops *gops)
3657{ 3695{
3658 gops->mm.is_debug_mode_enabled = gk20a_mm_mmu_debug_mode_enabled; 3696 gops->mm.is_debug_mode_enabled = gk20a_mm_mmu_debug_mode_enabled;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 7bbaf283..7be4383b 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -688,4 +688,7 @@ static inline void nvgpu_free(void *p)
688 vfree(p); 688 vfree(p);
689} 689}
690 690
691int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd,
692 u64 *buffer_id, u64 *buffer_len);
693
691#endif /* MM_GK20A_H */ 694#endif /* MM_GK20A_H */
diff --git a/include/uapi/linux/nvgpu.h b/include/uapi/linux/nvgpu.h
index 6024edee..6a8e44c5 100644
--- a/include/uapi/linux/nvgpu.h
+++ b/include/uapi/linux/nvgpu.h
@@ -314,6 +314,22 @@ struct nvgpu_gpu_vsms_mapping {
314 __u64 vsms_map_buf_addr; 314 __u64 vsms_map_buf_addr;
315}; 315};
316 316
317struct nvgpu_gpu_get_buffer_info_args {
318 union {
319 struct {
320 __u32 dmabuf_fd; /* dma-buf fd */
321 } in;
322 struct {
323 __u64 id; /* Unique within live
324 * buffers */
325 __u64 length; /* Allocated length of the
326 * buffer */
327 __u64 reserved0;
328 __u64 reserved1;
329 } out;
330 };
331};
332
317#define NVGPU_GPU_IOCTL_ZCULL_GET_CTX_SIZE \ 333#define NVGPU_GPU_IOCTL_ZCULL_GET_CTX_SIZE \
318 _IOR(NVGPU_GPU_IOCTL_MAGIC, 1, struct nvgpu_gpu_zcull_get_ctx_size_args) 334 _IOR(NVGPU_GPU_IOCTL_MAGIC, 1, struct nvgpu_gpu_zcull_get_ctx_size_args)
319#define NVGPU_GPU_IOCTL_ZCULL_GET_INFO \ 335#define NVGPU_GPU_IOCTL_ZCULL_GET_INFO \
@@ -352,9 +368,11 @@ struct nvgpu_gpu_vsms_mapping {
352 _IOWR(NVGPU_GPU_IOCTL_MAGIC, 18, struct nvgpu_gpu_num_vsms) 368 _IOWR(NVGPU_GPU_IOCTL_MAGIC, 18, struct nvgpu_gpu_num_vsms)
353#define NVGPU_GPU_IOCTL_VSMS_MAPPING \ 369#define NVGPU_GPU_IOCTL_VSMS_MAPPING \
354 _IOWR(NVGPU_GPU_IOCTL_MAGIC, 19, struct nvgpu_gpu_vsms_mapping) 370 _IOWR(NVGPU_GPU_IOCTL_MAGIC, 19, struct nvgpu_gpu_vsms_mapping)
371#define NVGPU_GPU_IOCTL_GET_BUFFER_INFO \
372 _IOWR(NVGPU_GPU_IOCTL_MAGIC, 20, struct nvgpu_gpu_get_buffer_info_args)
355 373
356#define NVGPU_GPU_IOCTL_LAST \ 374#define NVGPU_GPU_IOCTL_LAST \
357 _IOC_NR(NVGPU_GPU_IOCTL_VSMS_MAPPING) 375 _IOC_NR(NVGPU_GPU_IOCTL_GET_BUFFER_INFO)
358#define NVGPU_GPU_IOCTL_MAX_ARG_SIZE \ 376#define NVGPU_GPU_IOCTL_MAX_ARG_SIZE \
359 sizeof(struct nvgpu_gpu_prepare_compressible_read_args) 377 sizeof(struct nvgpu_gpu_prepare_compressible_read_args)
360 378