summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-12-15 13:25:22 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-12-28 13:01:32 -0500
commitf19f22fcc8ef21b363b873c499cbd2e690af29f8 (patch)
tree02d6b8280af305d0339ed860e331ab091d4b49d2 /drivers/gpu/nvgpu/gk20a
parentaa52601f620423fdd98b79e2c2c5e1d767a5f685 (diff)
gpu: nvgpu: Remove support for channel events
Remove support for events for bare channels. All users have already moved to TSGs and TSG events. Bug 1842197 Change-Id: Ib3ff68134ad9515ee761d0f0e19a3150a0b744ab Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1618906 Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c22
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h26
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c1
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c34
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.h19
5 files changed, 29 insertions, 73 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index e10be3c9..07ae5a16 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -689,7 +689,6 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
689{ 689{
690 struct fifo_gk20a *f = &g->fifo; 690 struct fifo_gk20a *f = &g->fifo;
691 struct channel_gk20a *ch; 691 struct channel_gk20a *ch;
692 struct gk20a_event_id_data *event_id_data, *event_id_data_temp;
693 692
694 /* compatibility with existing code */ 693 /* compatibility with existing code */
695 if (!gk20a_fifo_is_valid_runlist_id(g, runlist_id)) { 694 if (!gk20a_fifo_is_valid_runlist_id(g, runlist_id)) {
@@ -730,16 +729,6 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
730 ch->pid = current->pid; 729 ch->pid = current->pid;
731 ch->tgid = current->tgid; /* process granularity for FECS traces */ 730 ch->tgid = current->tgid; /* process granularity for FECS traces */
732 731
733 /* unhook all events created on this channel */
734 nvgpu_mutex_acquire(&ch->event_id_list_lock);
735 nvgpu_list_for_each_entry_safe(event_id_data, event_id_data_temp,
736 &ch->event_id_list,
737 gk20a_event_id_data,
738 event_id_node) {
739 nvgpu_list_del(&event_id_data->event_id_node);
740 }
741 nvgpu_mutex_release(&ch->event_id_list_lock);
742
743 /* By default, channel is regular (non-TSG) channel */ 732 /* By default, channel is regular (non-TSG) channel */
744 ch->tsgid = NVGPU_INVALID_TSG_ID; 733 ch->tsgid = NVGPU_INVALID_TSG_ID;
745 734
@@ -2134,7 +2123,6 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2134 2123
2135 nvgpu_init_list_node(&c->joblist.dynamic.jobs); 2124 nvgpu_init_list_node(&c->joblist.dynamic.jobs);
2136 nvgpu_init_list_node(&c->dbg_s_list); 2125 nvgpu_init_list_node(&c->dbg_s_list);
2137 nvgpu_init_list_node(&c->event_id_list);
2138 nvgpu_init_list_node(&c->worker_item); 2126 nvgpu_init_list_node(&c->worker_item);
2139 2127
2140 err = nvgpu_mutex_init(&c->ioctl_lock); 2128 err = nvgpu_mutex_init(&c->ioctl_lock);
@@ -2157,19 +2145,14 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2157 if (err) 2145 if (err)
2158 goto fail_5; 2146 goto fail_5;
2159#endif 2147#endif
2160 err = nvgpu_mutex_init(&c->event_id_list_lock);
2161 if (err)
2162 goto fail_6;
2163 err = nvgpu_mutex_init(&c->dbg_s_lock); 2148 err = nvgpu_mutex_init(&c->dbg_s_lock);
2164 if (err) 2149 if (err)
2165 goto fail_7; 2150 goto fail_6;
2166 2151
2167 nvgpu_list_add(&c->free_chs, &g->fifo.free_chs); 2152 nvgpu_list_add(&c->free_chs, &g->fifo.free_chs);
2168 2153
2169 return 0; 2154 return 0;
2170 2155
2171fail_7:
2172 nvgpu_mutex_destroy(&c->event_id_list_lock);
2173fail_6: 2156fail_6:
2174#if defined(CONFIG_GK20A_CYCLE_STATS) 2157#if defined(CONFIG_GK20A_CYCLE_STATS)
2175 nvgpu_mutex_destroy(&c->cs_client_mutex); 2158 nvgpu_mutex_destroy(&c->cs_client_mutex);
@@ -2286,9 +2269,6 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
2286 2269
2287 gk20a_tsg_event_id_post_event(tsg, 2270 gk20a_tsg_event_id_post_event(tsg,
2288 NVGPU_EVENT_ID_BLOCKING_SYNC); 2271 NVGPU_EVENT_ID_BLOCKING_SYNC);
2289 } else {
2290 gk20a_channel_event_id_post_event(c,
2291 NVGPU_EVENT_ID_BLOCKING_SYNC);
2292 } 2272 }
2293 } 2273 }
2294 /* 2274 /*
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index e6f73cf6..b43c5638 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -124,27 +124,6 @@ struct channel_gk20a_timeout {
124 u64 pb_get; 124 u64 pb_get;
125}; 125};
126 126
127struct gk20a_event_id_data {
128 struct gk20a *g;
129
130 int id; /* ch or tsg */
131 bool is_tsg;
132 u32 event_id;
133
134 bool event_posted;
135
136 struct nvgpu_cond event_id_wq;
137 struct nvgpu_mutex lock;
138 struct nvgpu_list_node event_id_node;
139};
140
141static inline struct gk20a_event_id_data *
142gk20a_event_id_data_from_event_id_node(struct nvgpu_list_node *node)
143{
144 return (struct gk20a_event_id_data *)
145 ((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node));
146};
147
148/* 127/*
149 * Track refcount actions, saving their stack traces. This number specifies how 128 * Track refcount actions, saving their stack traces. This number specifies how
150 * many most recent actions are stored in a buffer. Set to 0 to disable. 128 129 * many most recent actions are stored in a buffer. Set to 0 to disable. 128
@@ -265,9 +244,6 @@ struct channel_gk20a {
265 struct nvgpu_mutex dbg_s_lock; 244 struct nvgpu_mutex dbg_s_lock;
266 struct nvgpu_list_node dbg_s_list; 245 struct nvgpu_list_node dbg_s_list;
267 246
268 struct nvgpu_list_node event_id_list;
269 struct nvgpu_mutex event_id_list_lock;
270
271 bool has_timedout; 247 bool has_timedout;
272 u32 timeout_ms_max; 248 u32 timeout_ms_max;
273 bool timeout_debug_dump; 249 bool timeout_debug_dump;
@@ -385,8 +361,6 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
385 int *__timeslice_timeout, int *__timeslice_scale); 361 int *__timeslice_timeout, int *__timeslice_scale);
386int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch, 362int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
387 u32 level); 363 u32 level);
388void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
389 u32 event_id);
390 364
391int channel_gk20a_alloc_job(struct channel_gk20a *c, 365int channel_gk20a_alloc_job(struct channel_gk20a *c,
392 struct channel_gk20a_job **job_out); 366 struct channel_gk20a_job **job_out);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index c5c06df9..fc71e907 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -564,7 +564,6 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
564 nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex); 564 nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex);
565 nvgpu_mutex_destroy(&c->cs_client_mutex); 565 nvgpu_mutex_destroy(&c->cs_client_mutex);
566#endif 566#endif
567 nvgpu_mutex_destroy(&c->event_id_list_lock);
568 nvgpu_mutex_destroy(&c->dbg_s_lock); 567 nvgpu_mutex_destroy(&c->dbg_s_lock);
569 568
570 } 569 }
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index ea4d1d24..f07a54b1 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -5256,16 +5256,10 @@ static int gk20a_gr_handle_semaphore_pending(struct gk20a *g,
5256{ 5256{
5257 struct fifo_gk20a *f = &g->fifo; 5257 struct fifo_gk20a *f = &g->fifo;
5258 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 5258 struct channel_gk20a *ch = &f->channel[isr_data->chid];
5259 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
5259 5260
5260 if (gk20a_is_channel_marked_as_tsg(ch)) { 5261 gk20a_tsg_event_id_post_event(tsg,
5261 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid]; 5262 NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
5262
5263 gk20a_tsg_event_id_post_event(tsg,
5264 NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
5265 } else {
5266 gk20a_channel_event_id_post_event(ch,
5267 NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
5268 }
5269 5263
5270 nvgpu_cond_broadcast(&ch->semaphore_wq); 5264 nvgpu_cond_broadcast(&ch->semaphore_wq);
5271 5265
@@ -5806,26 +5800,16 @@ static int gk20a_gr_post_bpt_events(struct gk20a *g, struct channel_gk20a *ch,
5806 u32 global_esr) 5800 u32 global_esr)
5807{ 5801{
5808 if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) { 5802 if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) {
5809 if (gk20a_is_channel_marked_as_tsg(ch)) { 5803 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
5810 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
5811 5804
5812 gk20a_tsg_event_id_post_event(tsg, 5805 gk20a_tsg_event_id_post_event(tsg,
5813 NVGPU_EVENT_ID_BPT_INT); 5806 NVGPU_EVENT_ID_BPT_INT);
5814 } else {
5815 gk20a_channel_event_id_post_event(ch,
5816 NVGPU_EVENT_ID_BPT_INT);
5817 }
5818 } 5807 }
5819 if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) { 5808 if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f()) {
5820 if (gk20a_is_channel_marked_as_tsg(ch)) { 5809 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
5821 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
5822 5810
5823 gk20a_tsg_event_id_post_event(tsg, 5811 gk20a_tsg_event_id_post_event(tsg,
5824 NVGPU_EVENT_ID_BPT_PAUSE); 5812 NVGPU_EVENT_ID_BPT_PAUSE);
5825 } else {
5826 gk20a_channel_event_id_post_event(ch,
5827 NVGPU_EVENT_ID_BPT_PAUSE);
5828 }
5829 } 5813 }
5830 5814
5831 return 0; 5815 return 0;
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
index 5e2b9b82..08fe0365 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
@@ -87,5 +87,24 @@ u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg);
87int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg, 87int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
88 u32 priority); 88 u32 priority);
89 89
90struct gk20a_event_id_data {
91 struct gk20a *g;
92
93 int id; /* ch or tsg */
94 u32 event_id;
95
96 bool event_posted;
97
98 struct nvgpu_cond event_id_wq;
99 struct nvgpu_mutex lock;
100 struct nvgpu_list_node event_id_node;
101};
102
103static inline struct gk20a_event_id_data *
104gk20a_event_id_data_from_event_id_node(struct nvgpu_list_node *node)
105{
106 return (struct gk20a_event_id_data *)
107 ((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node));
108};
90 109
91#endif /* __TSG_GK20A_H_ */ 110#endif /* __TSG_GK20A_H_ */