summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-11-23 06:59:14 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-28 12:46:54 -0500
commitce06f74d6ba9eb495661c29eabcd6da2f52c7c8b (patch)
tree307c045b4df7861d20f46a120a3726e274f6e472 /drivers/gpu/nvgpu/common/linux
parent861b11a968b1f51f45832486e62bfe23fc29fc19 (diff)
gpu: nvgpu: move cycle state buffer handler to linux
We use dma_buf pointer cyclestate_buffer_handler in common code But since this is linux specific, we need to move this out of common code and into linux specific code Move dma_buf pointer cyclestate_buffer_handler from common channel code to struct nvgpu_channel_linux Fix all pointer accesses to this handle Move gk20a_channel_free_cycle_stats_buffer() to ioctl_channel.c since it is mostly linux specific And since gk20a_channel_free_cycle_stats_buffer() needs to be called while closing the channel, call it from nvgpu_channel_close_linux() Jira NVGPU-397 Jira NVGPU-415 Change-Id: Ifb429e49b8f7a1c9e2bc757f3efdd50b28ceca1f Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1603909 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux')
-rw-r--r--drivers/gpu/nvgpu/common/linux/channel.c1
-rw-r--r--drivers/gpu/nvgpu/common/linux/channel.h3
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c28
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.h1
4 files changed, 27 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/channel.c b/drivers/gpu/nvgpu/common/linux/channel.c
index 0ed596ac..a725cd6b 100644
--- a/drivers/gpu/nvgpu/common/linux/channel.c
+++ b/drivers/gpu/nvgpu/common/linux/channel.c
@@ -245,6 +245,7 @@ static void nvgpu_channel_close_linux(struct channel_gk20a *ch)
245 nvgpu_channel_work_completion_clear(ch); 245 nvgpu_channel_work_completion_clear(ch);
246 246
247#if defined(CONFIG_GK20A_CYCLE_STATS) 247#if defined(CONFIG_GK20A_CYCLE_STATS)
248 gk20a_channel_free_cycle_stats_buffer(ch);
248 gk20a_channel_free_cycle_stats_snapshot(ch); 249 gk20a_channel_free_cycle_stats_snapshot(ch);
249#endif 250#endif
250} 251}
diff --git a/drivers/gpu/nvgpu/common/linux/channel.h b/drivers/gpu/nvgpu/common/linux/channel.h
index a4df75d6..35be425a 100644
--- a/drivers/gpu/nvgpu/common/linux/channel.h
+++ b/drivers/gpu/nvgpu/common/linux/channel.h
@@ -17,6 +17,7 @@
17#define __NVGPU_CHANNEL_H__ 17#define __NVGPU_CHANNEL_H__
18 18
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/dma-buf.h>
20 21
21#include <nvgpu/types.h> 22#include <nvgpu/types.h>
22 23
@@ -56,6 +57,8 @@ struct nvgpu_channel_linux {
56 57
57 struct nvgpu_channel_completion_cb completion_cb; 58 struct nvgpu_channel_completion_cb completion_cb;
58 struct nvgpu_error_notifier error_notifier; 59 struct nvgpu_error_notifier error_notifier;
60
61 struct dma_buf *cyclestate_buffer_handler;
59}; 62};
60 63
61int nvgpu_init_channel_support_linux(struct nvgpu_os_linux *l); 64int nvgpu_init_channel_support_linux(struct nvgpu_os_linux *l);
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index 13355605..3a2f7b5e 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -102,17 +102,35 @@ struct channel_priv {
102 102
103#if defined(CONFIG_GK20A_CYCLE_STATS) 103#if defined(CONFIG_GK20A_CYCLE_STATS)
104 104
105void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch)
106{
107 struct nvgpu_channel_linux *priv = ch->os_priv;
108
109 /* disable existing cyclestats buffer */
110 nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex);
111 if (priv->cyclestate_buffer_handler) {
112 dma_buf_vunmap(priv->cyclestate_buffer_handler,
113 ch->cyclestate.cyclestate_buffer);
114 dma_buf_put(priv->cyclestate_buffer_handler);
115 priv->cyclestate_buffer_handler = NULL;
116 ch->cyclestate.cyclestate_buffer = NULL;
117 ch->cyclestate.cyclestate_buffer_size = 0;
118 }
119 nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex);
120}
121
105static int gk20a_channel_cycle_stats(struct channel_gk20a *ch, 122static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
106 struct nvgpu_cycle_stats_args *args) 123 struct nvgpu_cycle_stats_args *args)
107{ 124{
108 struct dma_buf *dmabuf; 125 struct dma_buf *dmabuf;
109 void *virtual_address; 126 void *virtual_address;
127 struct nvgpu_channel_linux *priv = ch->os_priv;
110 128
111 /* is it allowed to handle calls for current GPU? */ 129 /* is it allowed to handle calls for current GPU? */
112 if (!nvgpu_is_enabled(ch->g, NVGPU_SUPPORT_CYCLE_STATS)) 130 if (!nvgpu_is_enabled(ch->g, NVGPU_SUPPORT_CYCLE_STATS))
113 return -ENOSYS; 131 return -ENOSYS;
114 132
115 if (args->dmabuf_fd && !ch->cyclestate.cyclestate_buffer_handler) { 133 if (args->dmabuf_fd && !priv->cyclestate_buffer_handler) {
116 134
117 /* set up new cyclestats buffer */ 135 /* set up new cyclestats buffer */
118 dmabuf = dma_buf_get(args->dmabuf_fd); 136 dmabuf = dma_buf_get(args->dmabuf_fd);
@@ -122,18 +140,16 @@ static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
122 if (!virtual_address) 140 if (!virtual_address)
123 return -ENOMEM; 141 return -ENOMEM;
124 142
125 ch->cyclestate.cyclestate_buffer_handler = dmabuf; 143 priv->cyclestate_buffer_handler = dmabuf;
126 ch->cyclestate.cyclestate_buffer = virtual_address; 144 ch->cyclestate.cyclestate_buffer = virtual_address;
127 ch->cyclestate.cyclestate_buffer_size = dmabuf->size; 145 ch->cyclestate.cyclestate_buffer_size = dmabuf->size;
128 return 0; 146 return 0;
129 147
130 } else if (!args->dmabuf_fd && 148 } else if (!args->dmabuf_fd && priv->cyclestate_buffer_handler) {
131 ch->cyclestate.cyclestate_buffer_handler) {
132 gk20a_channel_free_cycle_stats_buffer(ch); 149 gk20a_channel_free_cycle_stats_buffer(ch);
133 return 0; 150 return 0;
134 151
135 } else if (!args->dmabuf_fd && 152 } else if (!args->dmabuf_fd && !priv->cyclestate_buffer_handler) {
136 !ch->cyclestate.cyclestate_buffer_handler) {
137 /* no requst from GL */ 153 /* no requst from GL */
138 return 0; 154 return 0;
139 155
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.h b/drivers/gpu/nvgpu/common/linux/ioctl_channel.h
index 3ea8d765..c37108c4 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.h
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.h
@@ -37,6 +37,7 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
37 struct nvgpu_channel_open_args *args); 37 struct nvgpu_channel_open_args *args);
38 38
39int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch); 39int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch);
40void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch);
40 41
41extern const struct file_operations gk20a_event_id_ops; 42extern const struct file_operations gk20a_event_id_ops;
42extern const struct file_operations gk20a_channel_ops; 43extern const struct file_operations gk20a_channel_ops;