summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/tsg_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c87
1 files changed, 26 insertions, 61 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index d4ece147..42d92f43 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -30,63 +30,27 @@ bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
30} 30}
31 31
32/* 32/*
33 * API to add channel to runnable list of TSG.
34 *
35 * After this call, a channel will be scheduled as TSG channel
36 * in runlist
37 */
38int gk20a_bind_runnable_channel_to_tsg(struct channel_gk20a *ch, int tsgid)
39{
40 struct gk20a *g = ch->g;
41 struct tsg_gk20a *tsg = NULL;
42
43 if (ch->tsgid != tsgid)
44 return -EINVAL;
45
46 tsg = &g->fifo.tsg[tsgid];
47
48 mutex_lock(&tsg->ch_list_lock);
49 list_add_tail(&ch->ch_entry, &tsg->ch_runnable_list);
50 tsg->num_runnable_channels += 1;
51 mutex_unlock(&tsg->ch_list_lock);
52
53 return tsg->num_runnable_channels;
54}
55
56int gk20a_unbind_channel_from_tsg(struct channel_gk20a *ch, int tsgid)
57{
58 struct gk20a *g = ch->g;
59 struct tsg_gk20a *tsg = NULL;
60
61 if (ch->tsgid != tsgid)
62 return -EINVAL;
63
64 tsg = &g->fifo.tsg[tsgid];
65
66 mutex_lock(&tsg->ch_list_lock);
67 list_del_init(&ch->ch_entry);
68 tsg->num_runnable_channels -= 1;
69 mutex_unlock(&tsg->ch_list_lock);
70
71 return tsg->num_runnable_channels;
72}
73
74/*
75 * API to mark channel as part of TSG 33 * API to mark channel as part of TSG
76 * 34 *
77 * Note that channel is not runnable when we bind it to TSG 35 * Note that channel is not runnable when we bind it to TSG
78 */ 36 */
79static int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd) 37static int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
80{ 38{
81 struct file *f = fget(ch_fd); 39 struct file *f = fget(ch_fd);
82 struct channel_gk20a *ch = f->private_data; 40 struct channel_gk20a *ch = f->private_data;
83 41
84 /* check if channel is already bound to some TSG */ 42 /* check if channel is already bound to some TSG */
85 if (gk20a_is_channel_marked_as_tsg(ch)) 43 if (gk20a_is_channel_marked_as_tsg(ch)) {
44 fput(f);
86 return -EINVAL; 45 return -EINVAL;
46 }
87 47
88 ch->tsgid = tsg->tsgid; 48 ch->tsgid = tsg->tsgid;
89 49
50 mutex_lock(&tsg->ch_list_lock);
51 list_add_tail(&ch->ch_entry, &tsg->ch_list);
52 mutex_unlock(&tsg->ch_list_lock);
53
90 gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", 54 gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
91 tsg->tsgid, ch->hw_chid); 55 tsg->tsgid, ch->hw_chid);
92 56
@@ -95,11 +59,17 @@ static int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
95 return 0; 59 return 0;
96} 60}
97 61
98static int nvgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, int ch_fd) 62int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
99{ 63{
100 /* We do not support explicitly unbinding channel from TSG. 64 struct fifo_gk20a *f = &ch->g->fifo;
101 * Channel will be unbounded from TSG when it is closed. 65 struct tsg_gk20a *tsg = &f->tsg[ch->tsgid];
102 */ 66
67 mutex_lock(&tsg->ch_list_lock);
68 list_del_init(&ch->ch_entry);
69 mutex_unlock(&tsg->ch_list_lock);
70
71 ch->tsgid = NVGPU_INVALID_TSG_ID;
72
103 return 0; 73 return 0;
104} 74}
105 75
@@ -115,7 +85,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
115 tsg->in_use = false; 85 tsg->in_use = false;
116 tsg->tsgid = tsgid; 86 tsg->tsgid = tsgid;
117 87
118 INIT_LIST_HEAD(&tsg->ch_runnable_list); 88 INIT_LIST_HEAD(&tsg->ch_list);
119 mutex_init(&tsg->ch_list_lock); 89 mutex_init(&tsg->ch_list_lock);
120 90
121 return 0; 91 return 0;
@@ -163,7 +133,7 @@ int gk20a_tsg_dev_open(struct inode *inode, struct file *filp)
163 return -ENOMEM; 133 return -ENOMEM;
164 134
165 tsg->g = g; 135 tsg->g = g;
166 tsg->num_runnable_channels = 0; 136 tsg->num_active_channels = 0;
167 137
168 tsg->tsg_gr_ctx = NULL; 138 tsg->tsg_gr_ctx = NULL;
169 tsg->vm = NULL; 139 tsg->vm = NULL;
@@ -181,10 +151,10 @@ int gk20a_tsg_dev_release(struct inode *inode, struct file *filp)
181 struct gk20a *g = container_of(inode->i_cdev, 151 struct gk20a *g = container_of(inode->i_cdev,
182 struct gk20a, tsg.cdev); 152 struct gk20a, tsg.cdev);
183 153
184 if (tsg->num_runnable_channels) { 154 if (tsg->num_active_channels) {
185 gk20a_err(dev_from_gk20a(g), 155 gk20a_err(dev_from_gk20a(g),
186 "Trying to free TSG %d with active channels %d\n", 156 "Trying to free TSG %d with active channels %d\n",
187 tsg->tsgid, tsg->num_runnable_channels); 157 tsg->tsgid, tsg->num_active_channels);
188 return -EBUSY; 158 return -EBUSY;
189 } 159 }
190 160
@@ -240,20 +210,15 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
240 err = -EINVAL; 210 err = -EINVAL;
241 break; 211 break;
242 } 212 }
243 err = nvgpu_tsg_bind_channel(tsg, ch_fd); 213 err = gk20a_tsg_bind_channel(tsg, ch_fd);
244 break; 214 break;
245 } 215 }
246 216
247 case NVGPU_TSG_IOCTL_UNBIND_CHANNEL: 217 case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
248 { 218 /* We do not support explicitly unbinding channel from TSG.
249 int ch_fd = *(int *)buf; 219 * Channel will be unbounded from TSG when it is closed.
250 if (ch_fd < 0) { 220 */
251 err = -EINVAL;
252 break;
253 }
254 err = nvgpu_tsg_unbind_channel(tsg, ch_fd);
255 break; 221 break;
256 }
257 222
258 default: 223 default:
259 gk20a_err(dev_from_gk20a(g), 224 gk20a_err(dev_from_gk20a(g),