summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/channel.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/channel.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/channel.c157
1 files changed, 157 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/channel.c b/drivers/gpu/nvgpu/common/linux/channel.c
index 40b11b86..8366ed88 100644
--- a/drivers/gpu/nvgpu/common/linux/channel.c
+++ b/drivers/gpu/nvgpu/common/linux/channel.c
@@ -27,6 +27,9 @@
27 27
28#include "gk20a/gk20a.h" 28#include "gk20a/gk20a.h"
29 29
30#include "channel.h"
31#include "os_linux.h"
32
30#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 33#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
31 34
32#include <linux/uaccess.h> 35#include <linux/uaccess.h>
@@ -34,6 +37,160 @@
34#include <trace/events/gk20a.h> 37#include <trace/events/gk20a.h>
35#include <uapi/linux/nvgpu.h> 38#include <uapi/linux/nvgpu.h>
36 39
40static void gk20a_channel_update_runcb_fn(struct work_struct *work)
41{
42 struct nvgpu_channel_completion_cb *completion_cb =
43 container_of(work, struct nvgpu_channel_completion_cb, work);
44 struct nvgpu_channel_linux *priv =
45 container_of(completion_cb,
46 struct nvgpu_channel_linux, completion_cb);
47 struct channel_gk20a *ch = priv->ch;
48 void (*fn)(struct channel_gk20a *, void *);
49 void *user_data;
50
51 nvgpu_spinlock_acquire(&completion_cb->lock);
52 fn = completion_cb->fn;
53 user_data = completion_cb->user_data;
54 nvgpu_spinlock_release(&completion_cb->lock);
55
56 if (fn)
57 fn(ch, user_data);
58}
59
60static void nvgpu_channel_work_completion_init(struct channel_gk20a *ch)
61{
62 struct nvgpu_channel_linux *priv = ch->os_priv;
63
64 priv->completion_cb.fn = NULL;
65 priv->completion_cb.user_data = NULL;
66 nvgpu_spinlock_init(&priv->completion_cb.lock);
67 INIT_WORK(&priv->completion_cb.work, gk20a_channel_update_runcb_fn);
68}
69
70static void nvgpu_channel_work_completion_clear(struct channel_gk20a *ch)
71{
72 struct nvgpu_channel_linux *priv = ch->os_priv;
73
74 nvgpu_spinlock_acquire(&priv->completion_cb.lock);
75 priv->completion_cb.fn = NULL;
76 priv->completion_cb.user_data = NULL;
77 nvgpu_spinlock_release(&priv->completion_cb.lock);
78 cancel_work_sync(&priv->completion_cb.work);
79}
80
81static void nvgpu_channel_work_completion_signal(struct channel_gk20a *ch)
82{
83 struct nvgpu_channel_linux *priv = ch->os_priv;
84
85 if (priv->completion_cb.fn)
86 schedule_work(&priv->completion_cb.work);
87}
88
89static void nvgpu_channel_work_completion_cancel_sync(struct channel_gk20a *ch)
90{
91 struct nvgpu_channel_linux *priv = ch->os_priv;
92
93 if (priv->completion_cb.fn)
94 cancel_work_sync(&priv->completion_cb.work);
95}
96
97struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g,
98 void (*update_fn)(struct channel_gk20a *, void *),
99 void *update_fn_data,
100 int runlist_id,
101 bool is_privileged_channel)
102{
103 struct channel_gk20a *ch;
104 struct nvgpu_channel_linux *priv;
105
106 ch = gk20a_open_new_channel(g, runlist_id, is_privileged_channel);
107
108 if (ch) {
109 priv = ch->os_priv;
110 nvgpu_spinlock_acquire(&priv->completion_cb.lock);
111 priv->completion_cb.fn = update_fn;
112 priv->completion_cb.user_data = update_fn_data;
113 nvgpu_spinlock_release(&priv->completion_cb.lock);
114 }
115
116 return ch;
117}
118
119static void nvgpu_channel_open_linux(struct channel_gk20a *ch)
120{
121}
122
123static void nvgpu_channel_close_linux(struct channel_gk20a *ch)
124{
125 nvgpu_channel_work_completion_clear(ch);
126}
127
128static int nvgpu_channel_alloc_linux(struct gk20a *g, struct channel_gk20a *ch)
129{
130 struct nvgpu_channel_linux *priv;
131
132 priv = nvgpu_kzalloc(g, sizeof(*priv));
133 if (!priv)
134 return -ENOMEM;
135
136 ch->os_priv = priv;
137 priv->ch = ch;
138
139 nvgpu_channel_work_completion_init(ch);
140
141 return 0;
142}
143
144static void nvgpu_channel_free_linux(struct gk20a *g, struct channel_gk20a *ch)
145{
146 nvgpu_kfree(g, ch->os_priv);
147}
148
149int nvgpu_init_channel_support_linux(struct nvgpu_os_linux *l)
150{
151 struct gk20a *g = &l->g;
152 struct fifo_gk20a *f = &g->fifo;
153 int chid;
154 int err;
155
156 for (chid = 0; chid < (int)f->num_channels; chid++) {
157 struct channel_gk20a *ch = &f->channel[chid];
158
159 err = nvgpu_channel_alloc_linux(g, ch);
160 if (err)
161 goto err_clean;
162 }
163
164 g->os_channel.open = nvgpu_channel_open_linux;
165 g->os_channel.close = nvgpu_channel_close_linux;
166 g->os_channel.work_completion_signal =
167 nvgpu_channel_work_completion_signal;
168 g->os_channel.work_completion_cancel_sync =
169 nvgpu_channel_work_completion_cancel_sync;
170 return 0;
171
172err_clean:
173 for (; chid >= 0; chid--) {
174 struct channel_gk20a *ch = &f->channel[chid];
175
176 nvgpu_channel_free_linux(g, ch);
177 }
178 return err;
179}
180
181void nvgpu_remove_channel_support_linux(struct nvgpu_os_linux *l)
182{
183 struct gk20a *g = &l->g;
184 struct fifo_gk20a *f = &g->fifo;
185 unsigned int chid;
186
187 for (chid = 0; chid < f->num_channels; chid++) {
188 struct channel_gk20a *ch = &f->channel[chid];
189
190 nvgpu_channel_free_linux(g, ch);
191 }
192}
193
37u32 nvgpu_get_gpfifo_entry_size(void) 194u32 nvgpu_get_gpfifo_entry_size(void)
38{ 195{
39 return sizeof(struct nvgpu_gpfifo); 196 return sizeof(struct nvgpu_gpfifo);