summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2014-08-04 07:28:56 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:45 -0400
commit76993ba18c6969cd26bb500eee4ecf734deb7bcb (patch)
tree780194830cf7a2537cf06f4fd6bfcd33ade64247 /drivers
parentb33020008b727d75827d670ca7a6c969769ca1a0 (diff)
gpu: nvgpu: rework TSG's channel list
Modify TSG's channel list as "ch_list" for all channels instead of "ch_runnable_list" for only runnable list We can traverse this list and check runnable status of channel in active_channels to get runnable channels Remove below APIs as they are no longer required : gk20a_bind_runnable_channel_to_tsg() gk20a_unbind_channel_from_tsg() While closing the channel, call gk20a_tsg_unbind_channel() to unbind the channel from TSG bug 1470692 Change-Id: I0178fa74b3e8bb4e5c0b3e3b2b2f031491761ba7 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/449227 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c3
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c43
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c87
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.h7
4 files changed, 50 insertions, 90 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 9f8876c3..5bb62dd3 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -680,6 +680,9 @@ void gk20a_free_channel(struct channel_gk20a *ch, bool finish)
680 gk20a_vm_put(ch_vm); 680 gk20a_vm_put(ch_vm);
681 681
682unbind: 682unbind:
683 if (gk20a_is_channel_marked_as_tsg(ch))
684 gk20a_tsg_unbind_channel(ch);
685
683 channel_gk20a_unbind(ch); 686 channel_gk20a_unbind(ch);
684 channel_gk20a_free_inst(g, ch); 687 channel_gk20a_free_inst(g, ch);
685 688
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index c8cc4373..a41955bd 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1722,41 +1722,32 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
1722 phys_addr_t runlist_pa; 1722 phys_addr_t runlist_pa;
1723 u32 old_buf, new_buf; 1723 u32 old_buf, new_buf;
1724 u32 chid, tsgid; 1724 u32 chid, tsgid;
1725 struct channel_gk20a *ch; 1725 struct channel_gk20a *ch = NULL;
1726 struct tsg_gk20a *tsg; 1726 struct tsg_gk20a *tsg = NULL;
1727 u32 count = 0; 1727 u32 count = 0;
1728 int num_ch;
1729 runlist = &f->runlist_info[runlist_id]; 1728 runlist = &f->runlist_info[runlist_id];
1730 1729
1731 /* valid channel, add/remove it from active list. 1730 /* valid channel, add/remove it from active list.
1732 Otherwise, keep active list untouched for suspend/resume. */ 1731 Otherwise, keep active list untouched for suspend/resume. */
1733 if (hw_chid != ~0) { 1732 if (hw_chid != ~0) {
1733 ch = &f->channel[hw_chid];
1734 if (gk20a_is_channel_marked_as_tsg(ch))
1735 tsg = &f->tsg[ch->tsgid];
1736
1734 if (add) { 1737 if (add) {
1735 if (test_and_set_bit(hw_chid, 1738 if (test_and_set_bit(hw_chid,
1736 runlist->active_channels) == 1) 1739 runlist->active_channels) == 1)
1737 return 0; 1740 return 0;
1738 if (gk20a_is_channel_marked_as_tsg( 1741 if (tsg && ++tsg->num_active_channels > 0)
1739 &f->channel[hw_chid])) { 1742 set_bit(f->channel[hw_chid].tsgid,
1740 num_ch = gk20a_bind_runnable_channel_to_tsg( 1743 runlist->active_tsgs);
1741 &f->channel[hw_chid],
1742 f->channel[hw_chid].tsgid);
1743 if (num_ch > 0)
1744 set_bit(f->channel[hw_chid].tsgid,
1745 runlist->active_tsgs);
1746 }
1747 } else { 1744 } else {
1748 if (test_and_clear_bit(hw_chid, 1745 if (test_and_clear_bit(hw_chid,
1749 runlist->active_channels) == 0) 1746 runlist->active_channels) == 0)
1750 return 0; 1747 return 0;
1751 if (gk20a_is_channel_marked_as_tsg( 1748 if (tsg && --tsg->num_active_channels == 0)
1752 &f->channel[hw_chid])) { 1749 clear_bit(f->channel[hw_chid].tsgid,
1753 num_ch = gk20a_unbind_channel_from_tsg( 1750 runlist->active_tsgs);
1754 &f->channel[hw_chid],
1755 f->channel[hw_chid].tsgid);
1756 if (!num_ch)
1757 clear_bit(f->channel[hw_chid].tsgid,
1758 runlist->active_tsgs);
1759 }
1760 } 1751 }
1761 } 1752 }
1762 1753
@@ -1811,15 +1802,17 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
1811 ram_rl_entry_timeslice_timeout_f( 1802 ram_rl_entry_timeslice_timeout_f(
1812 ram_rl_entry_timeslice_timeout_128_f()) | 1803 ram_rl_entry_timeslice_timeout_128_f()) |
1813 ram_rl_entry_tsg_length_f( 1804 ram_rl_entry_tsg_length_f(
1814 tsg->num_runnable_channels); 1805 tsg->num_active_channels);
1815 runlist_entry[1] = 0; 1806 runlist_entry[1] = 0;
1816 runlist_entry += 2; 1807 runlist_entry += 2;
1817 count++; 1808 count++;
1818 1809
1819 /* add channels bound to this TSG */ 1810 /* add runnable channels bound to this TSG */
1820 mutex_lock(&tsg->ch_list_lock); 1811 mutex_lock(&tsg->ch_list_lock);
1821 list_for_each_entry(ch, 1812 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1822 &tsg->ch_runnable_list, ch_entry) { 1813 if (!test_bit(ch->hw_chid,
1814 runlist->active_channels))
1815 continue;
1823 gk20a_dbg_info("add channel %d to runlist", 1816 gk20a_dbg_info("add channel %d to runlist",
1824 ch->hw_chid); 1817 ch->hw_chid);
1825 runlist_entry[0] = 1818 runlist_entry[0] =
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index d4ece147..42d92f43 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -30,63 +30,27 @@ bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
30} 30}
31 31
32/* 32/*
33 * API to add channel to runnable list of TSG.
34 *
35 * After this call, a channel will be scheduled as TSG channel
36 * in runlist
37 */
38int gk20a_bind_runnable_channel_to_tsg(struct channel_gk20a *ch, int tsgid)
39{
40 struct gk20a *g = ch->g;
41 struct tsg_gk20a *tsg = NULL;
42
43 if (ch->tsgid != tsgid)
44 return -EINVAL;
45
46 tsg = &g->fifo.tsg[tsgid];
47
48 mutex_lock(&tsg->ch_list_lock);
49 list_add_tail(&ch->ch_entry, &tsg->ch_runnable_list);
50 tsg->num_runnable_channels += 1;
51 mutex_unlock(&tsg->ch_list_lock);
52
53 return tsg->num_runnable_channels;
54}
55
56int gk20a_unbind_channel_from_tsg(struct channel_gk20a *ch, int tsgid)
57{
58 struct gk20a *g = ch->g;
59 struct tsg_gk20a *tsg = NULL;
60
61 if (ch->tsgid != tsgid)
62 return -EINVAL;
63
64 tsg = &g->fifo.tsg[tsgid];
65
66 mutex_lock(&tsg->ch_list_lock);
67 list_del_init(&ch->ch_entry);
68 tsg->num_runnable_channels -= 1;
69 mutex_unlock(&tsg->ch_list_lock);
70
71 return tsg->num_runnable_channels;
72}
73
74/*
75 * API to mark channel as part of TSG 33 * API to mark channel as part of TSG
76 * 34 *
77 * Note that channel is not runnable when we bind it to TSG 35 * Note that channel is not runnable when we bind it to TSG
78 */ 36 */
79static int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd) 37static int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
80{ 38{
81 struct file *f = fget(ch_fd); 39 struct file *f = fget(ch_fd);
82 struct channel_gk20a *ch = f->private_data; 40 struct channel_gk20a *ch = f->private_data;
83 41
84 /* check if channel is already bound to some TSG */ 42 /* check if channel is already bound to some TSG */
85 if (gk20a_is_channel_marked_as_tsg(ch)) 43 if (gk20a_is_channel_marked_as_tsg(ch)) {
44 fput(f);
86 return -EINVAL; 45 return -EINVAL;
46 }
87 47
88 ch->tsgid = tsg->tsgid; 48 ch->tsgid = tsg->tsgid;
89 49
50 mutex_lock(&tsg->ch_list_lock);
51 list_add_tail(&ch->ch_entry, &tsg->ch_list);
52 mutex_unlock(&tsg->ch_list_lock);
53
90 gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", 54 gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
91 tsg->tsgid, ch->hw_chid); 55 tsg->tsgid, ch->hw_chid);
92 56
@@ -95,11 +59,17 @@ static int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
95 return 0; 59 return 0;
96} 60}
97 61
98static int nvgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, int ch_fd) 62int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
99{ 63{
100 /* We do not support explicitly unbinding channel from TSG. 64 struct fifo_gk20a *f = &ch->g->fifo;
101 * Channel will be unbounded from TSG when it is closed. 65 struct tsg_gk20a *tsg = &f->tsg[ch->tsgid];
102 */ 66
67 mutex_lock(&tsg->ch_list_lock);
68 list_del_init(&ch->ch_entry);
69 mutex_unlock(&tsg->ch_list_lock);
70
71 ch->tsgid = NVGPU_INVALID_TSG_ID;
72
103 return 0; 73 return 0;
104} 74}
105 75
@@ -115,7 +85,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
115 tsg->in_use = false; 85 tsg->in_use = false;
116 tsg->tsgid = tsgid; 86 tsg->tsgid = tsgid;
117 87
118 INIT_LIST_HEAD(&tsg->ch_runnable_list); 88 INIT_LIST_HEAD(&tsg->ch_list);
119 mutex_init(&tsg->ch_list_lock); 89 mutex_init(&tsg->ch_list_lock);
120 90
121 return 0; 91 return 0;
@@ -163,7 +133,7 @@ int gk20a_tsg_dev_open(struct inode *inode, struct file *filp)
163 return -ENOMEM; 133 return -ENOMEM;
164 134
165 tsg->g = g; 135 tsg->g = g;
166 tsg->num_runnable_channels = 0; 136 tsg->num_active_channels = 0;
167 137
168 tsg->tsg_gr_ctx = NULL; 138 tsg->tsg_gr_ctx = NULL;
169 tsg->vm = NULL; 139 tsg->vm = NULL;
@@ -181,10 +151,10 @@ int gk20a_tsg_dev_release(struct inode *inode, struct file *filp)
181 struct gk20a *g = container_of(inode->i_cdev, 151 struct gk20a *g = container_of(inode->i_cdev,
182 struct gk20a, tsg.cdev); 152 struct gk20a, tsg.cdev);
183 153
184 if (tsg->num_runnable_channels) { 154 if (tsg->num_active_channels) {
185 gk20a_err(dev_from_gk20a(g), 155 gk20a_err(dev_from_gk20a(g),
186 "Trying to free TSG %d with active channels %d\n", 156 "Trying to free TSG %d with active channels %d\n",
187 tsg->tsgid, tsg->num_runnable_channels); 157 tsg->tsgid, tsg->num_active_channels);
188 return -EBUSY; 158 return -EBUSY;
189 } 159 }
190 160
@@ -240,20 +210,15 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
240 err = -EINVAL; 210 err = -EINVAL;
241 break; 211 break;
242 } 212 }
243 err = nvgpu_tsg_bind_channel(tsg, ch_fd); 213 err = gk20a_tsg_bind_channel(tsg, ch_fd);
244 break; 214 break;
245 } 215 }
246 216
247 case NVGPU_TSG_IOCTL_UNBIND_CHANNEL: 217 case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
248 { 218 /* We do not support explicitly unbinding channel from TSG.
249 int ch_fd = *(int *)buf; 219 * Channel will be unbounded from TSG when it is closed.
250 if (ch_fd < 0) { 220 */
251 err = -EINVAL;
252 break;
253 }
254 err = nvgpu_tsg_unbind_channel(tsg, ch_fd);
255 break; 221 break;
256 }
257 222
258 default: 223 default:
259 gk20a_err(dev_from_gk20a(g), 224 gk20a_err(dev_from_gk20a(g),
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
index 63113b60..dd8679be 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
@@ -27,8 +27,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp,
27 27
28int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid); 28int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid);
29 29
30int gk20a_bind_runnable_channel_to_tsg(struct channel_gk20a *ch, int tsgid); 30int gk20a_tsg_unbind_channel(struct channel_gk20a *ch);
31int gk20a_unbind_channel_from_tsg(struct channel_gk20a *ch, int tsgid);
32 31
33struct tsg_gk20a { 32struct tsg_gk20a {
34 struct gk20a *g; 33 struct gk20a *g;
@@ -36,8 +35,8 @@ struct tsg_gk20a {
36 bool in_use; 35 bool in_use;
37 int tsgid; 36 int tsgid;
38 37
39 struct list_head ch_runnable_list; 38 struct list_head ch_list;
40 int num_runnable_channels; 39 int num_active_channels;
41 struct mutex ch_list_lock; 40 struct mutex ch_list_lock;
42 41
43 struct gr_ctx_desc *tsg_gr_ctx; 42 struct gr_ctx_desc *tsg_gr_ctx;