summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/intr.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-06-05 17:25:35 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-07 23:07:00 -0400
commit942029a433390f3385ed9d6fc35476bbf9eafd98 (patch)
tree5a3ad7164d71908c3b0da0d95fb0220cc247af36 /drivers/gpu/nvgpu/common/linux/intr.c
parentfc724baa4becf051b3e6647858a6ded90f1cee86 (diff)
gpu: nvgpu: Split non-stall interrupt handling
Split handling of stalling interrupt to Linux specific chip agnostic and OS independent chip specific parts. Linux specific chip independent part contains handler for ISR and passing the control to a bottom half worker. It uses the new MC HALs intr_nonstall (query interrupt status), intr_nonstall_pause (pause interrupts), intr_nonstall_resume (resume interrupts), and is_intr1_pending (query per-engine interrupt bit). MC HAL isr_nonstall is removed, because its work is now handled in chip independent code. JIRA NVGPU-26 Change-Id: I3e4c9905ef6eef7f1cc9f71b0278518ae663f87e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1497048 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/intr.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/intr.c84
1 files changed, 84 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/intr.c b/drivers/gpu/nvgpu/common/linux/intr.c
index 77e44dd9..7d699dee 100644
--- a/drivers/gpu/nvgpu/common/linux/intr.c
+++ b/drivers/gpu/nvgpu/common/linux/intr.c
@@ -12,10 +12,12 @@
12 */ 12 */
13 13
14#include <trace/events/gk20a.h> 14#include <trace/events/gk20a.h>
15#include <linux/irqreturn.h>
15 16
16#include "gk20a/gk20a.h" 17#include "gk20a/gk20a.h"
17 18
18#include <nvgpu/atomic.h> 19#include <nvgpu/atomic.h>
20#include <nvgpu/unit.h>
19 21
20irqreturn_t nvgpu_intr_stall(struct gk20a *g) 22irqreturn_t nvgpu_intr_stall(struct gk20a *g)
21{ 23{
@@ -56,3 +58,85 @@ irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
56 return IRQ_HANDLED; 58 return IRQ_HANDLED;
57} 59}
58 60
61irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
62{
63 u32 mc_intr_1;
64 u32 hw_irq_count;
65 u32 engine_id_idx;
66 u32 active_engine_id = 0;
67 u32 engine_enum = ENGINE_INVAL_GK20A;
68 int ops_old, ops_new, ops = 0;
69 if (!g->power_on)
70 return IRQ_NONE;
71
72 /* not from gpu when sharing irq with others */
73 mc_intr_1 = g->ops.mc.intr_nonstall(g);
74 if (unlikely(!mc_intr_1))
75 return IRQ_NONE;
76
77 g->ops.mc.intr_nonstall_pause(g);
78
79 if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1))
80 ops |= gk20a_fifo_nonstall_isr(g);
81
82 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines;
83 engine_id_idx++) {
84 struct fifo_engine_info_gk20a *engine_info;
85
86 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
87 engine_info = &g->fifo.engine_info[active_engine_id];
88
89 if (mc_intr_1 & engine_info->intr_mask) {
90 engine_enum = engine_info->engine_enum;
91 /* GR Engine */
92 if (engine_enum == ENGINE_GR_GK20A)
93 ops |= gk20a_gr_nonstall_isr(g);
94
95 /* CE Engine */
96 if (((engine_enum == ENGINE_GRCE_GK20A) ||
97 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
98 g->ops.ce2.isr_nonstall)
99 ops |= g->ops.ce2.isr_nonstall(g,
100 engine_info->inst_id,
101 engine_info->pri_base);
102 }
103 }
104 if (ops) {
105 do {
106 ops_old = atomic_read(&g->nonstall_ops);
107 ops_new = ops_old | ops;
108 } while (ops_old != atomic_cmpxchg(&g->nonstall_ops,
109 ops_old, ops_new));
110
111 queue_work(g->nonstall_work_queue, &g->nonstall_fn_work);
112 }
113
114 hw_irq_count = atomic_inc_return(&g->hw_irq_nonstall_count);
115
116 /* sync handled irq counter before re-enabling interrupts */
117 atomic_set(&g->sw_irq_nonstall_last_handled, hw_irq_count);
118
119 g->ops.mc.intr_nonstall_resume(g);
120
121 wake_up_all(&g->sw_irq_nonstall_last_handled_wq);
122
123 return IRQ_HANDLED;
124}
125
126void nvgpu_intr_nonstall_cb(struct work_struct *work)
127{
128 struct gk20a *g = container_of(work, struct gk20a, nonstall_fn_work);
129 u32 ops;
130 bool semaphore_wakeup, post_events;
131
132 do {
133 ops = atomic_xchg(&g->nonstall_ops, 0);
134
135 semaphore_wakeup = ops & gk20a_nonstall_ops_wakeup_semaphore;
136 post_events = ops & gk20a_nonstall_ops_post_events;
137
138 if (semaphore_wakeup)
139 gk20a_channel_semaphore_wakeup(g, post_events);
140
141 } while (atomic_read(&g->nonstall_ops) != 0);
142}