summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2016-03-31 14:16:23 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-05-31 13:47:22 -0400
commitd707c5a444e024e1184213a75f44a73dbb1707d2 (patch)
tree09711370df9d9078e4f604e60983877bbf30b9de /drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
parenta71ce831fbbca3ba8602e0b07ecd630c4a39f376 (diff)
gpu: nvgpu: add tsg support for vgpu
- make tsg_gk20a.c call HAL for enable/disable channels - add preempt_tsg HAL callbacks - add tsg bind/unbind channel HAL callbacks - add according tsg callbacks for vgpu Bug 1702773 JIRA VFND-1003 Change-Id: I2cba74b3ebd3920ef09219a168e6433d9574dbe8 Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: http://git-master/r/1144932 (cherry picked from commit c3787de7d38651d46969348f5acae2ba86b31ec7) Reviewed-on: http://git-master/r/1126942 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/tsg_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c48
1 files changed, 28 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 5b77bf80..1e479395 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -37,13 +37,12 @@ bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
37 37
38int gk20a_enable_tsg(struct tsg_gk20a *tsg) 38int gk20a_enable_tsg(struct tsg_gk20a *tsg)
39{ 39{
40 struct gk20a *g = tsg->g;
40 struct channel_gk20a *ch; 41 struct channel_gk20a *ch;
41 42
42 mutex_lock(&tsg->ch_list_lock); 43 mutex_lock(&tsg->ch_list_lock);
43 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 44 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
44 gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), 45 g->ops.fifo.enable_channel(ch);
45 gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid))
46 | ccsr_channel_enable_set_true_f());
47 } 46 }
48 mutex_unlock(&tsg->ch_list_lock); 47 mutex_unlock(&tsg->ch_list_lock);
49 48
@@ -52,13 +51,12 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
52 51
53int gk20a_disable_tsg(struct tsg_gk20a *tsg) 52int gk20a_disable_tsg(struct tsg_gk20a *tsg)
54{ 53{
54 struct gk20a *g = tsg->g;
55 struct channel_gk20a *ch; 55 struct channel_gk20a *ch;
56 56
57 mutex_lock(&tsg->ch_list_lock); 57 mutex_lock(&tsg->ch_list_lock);
58 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 58 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
59 gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), 59 g->ops.fifo.disable_channel(ch);
60 gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid))
61 | ccsr_channel_enable_clr_true_f());
62 } 60 }
63 mutex_unlock(&tsg->ch_list_lock); 61 mutex_unlock(&tsg->ch_list_lock);
64 62
@@ -80,31 +78,37 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
80 return false; 78 return false;
81} 79}
82 80
83/* 81static int gk20a_tsg_bind_channel_fd(struct tsg_gk20a *tsg, int ch_fd)
84 * API to mark channel as part of TSG
85 *
86 * Note that channel is not runnable when we bind it to TSG
87 */
88static int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
89{ 82{
90 struct file *f = fget(ch_fd); 83 struct file *f = fget(ch_fd);
91 struct channel_gk20a *ch; 84 struct channel_gk20a *ch;
92 85 int err;
93 gk20a_dbg_fn("");
94 86
95 ch = gk20a_get_channel_from_file(ch_fd); 87 ch = gk20a_get_channel_from_file(ch_fd);
96 if (!ch) 88 if (!ch)
97 return -EINVAL; 89 return -EINVAL;
90 err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
91 fput(f);
92 return err;
93}
94
95/*
96 * API to mark channel as part of TSG
97 *
98 * Note that channel is not runnable when we bind it to TSG
99 */
100int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
101 struct channel_gk20a *ch)
102{
103 gk20a_dbg_fn("");
98 104
99 /* check if channel is already bound to some TSG */ 105 /* check if channel is already bound to some TSG */
100 if (gk20a_is_channel_marked_as_tsg(ch)) { 106 if (gk20a_is_channel_marked_as_tsg(ch)) {
101 fput(f);
102 return -EINVAL; 107 return -EINVAL;
103 } 108 }
104 109
105 /* channel cannot be bound to TSG if it is already active */ 110 /* channel cannot be bound to TSG if it is already active */
106 if (gk20a_is_channel_active(tsg->g, ch)) { 111 if (gk20a_is_channel_active(tsg->g, ch)) {
107 fput(f);
108 return -EINVAL; 112 return -EINVAL;
109 } 113 }
110 114
@@ -119,8 +123,6 @@ static int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
119 gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", 123 gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
120 tsg->tsgid, ch->hw_chid); 124 tsg->tsgid, ch->hw_chid);
121 125
122 fput(f);
123
124 gk20a_dbg_fn("done"); 126 gk20a_dbg_fn("done");
125 return 0; 127 return 0;
126} 128}
@@ -494,7 +496,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
494 err = -EINVAL; 496 err = -EINVAL;
495 break; 497 break;
496 } 498 }
497 err = gk20a_tsg_bind_channel(tsg, ch_fd); 499 err = gk20a_tsg_bind_channel_fd(tsg, ch_fd);
498 break; 500 break;
499 } 501 }
500 502
@@ -539,7 +541,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
539 return err; 541 return err;
540 } 542 }
541 /* preempt TSG */ 543 /* preempt TSG */
542 err = gk20a_fifo_preempt_tsg(g, tsg->tsgid); 544 err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
543 gk20a_idle(g->dev); 545 gk20a_idle(g->dev);
544 break; 546 break;
545 } 547 }
@@ -600,3 +602,9 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
600 602
601 return err; 603 return err;
602} 604}
605
606void gk20a_init_tsg_ops(struct gpu_ops *gops)
607{
608 gops->fifo.tsg_bind_channel = gk20a_tsg_bind_channel;
609 gops->fifo.tsg_unbind_channel = gk20a_tsg_unbind_channel;
610}