summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/intr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/intr.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/intr.c154
1 files changed, 154 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/intr.c b/drivers/gpu/nvgpu/common/linux/intr.c
new file mode 100644
index 00000000..d1b6ef36
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/intr.c
@@ -0,0 +1,154 @@
1/*
2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <trace/events/gk20a.h>
15#include <linux/irqreturn.h>
16
17#include "gk20a/gk20a.h"
18
19#include <nvgpu/atomic.h>
20#include <nvgpu/unit.h>
21#include "os_linux.h"
22
23irqreturn_t nvgpu_intr_stall(struct gk20a *g)
24{
25 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
26 u32 mc_intr_0;
27
28 trace_mc_gk20a_intr_stall(g->name);
29
30 if (!g->power_on)
31 return IRQ_NONE;
32
33 /* not from gpu when sharing irq with others */
34 mc_intr_0 = g->ops.mc.intr_stall(g);
35 if (unlikely(!mc_intr_0))
36 return IRQ_NONE;
37
38 g->ops.mc.intr_stall_pause(g);
39
40 atomic_inc(&l->hw_irq_stall_count);
41
42 trace_mc_gk20a_intr_stall_done(g->name);
43
44 return IRQ_WAKE_THREAD;
45}
46
47irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
48{
49 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
50 int hw_irq_count;
51
52 gk20a_dbg(gpu_dbg_intr, "interrupt thread launched");
53
54 trace_mc_gk20a_intr_thread_stall(g->name);
55
56 hw_irq_count = atomic_read(&l->hw_irq_stall_count);
57 g->ops.mc.isr_stall(g);
58 g->ops.mc.intr_stall_resume(g);
59 /* sync handled irq counter before re-enabling interrupts */
60 atomic_set(&l->sw_irq_stall_last_handled, hw_irq_count);
61
62 nvgpu_cond_broadcast(&l->sw_irq_stall_last_handled_wq);
63
64 trace_mc_gk20a_intr_thread_stall_done(g->name);
65
66 return IRQ_HANDLED;
67}
68
69irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
70{
71 u32 mc_intr_1;
72 u32 hw_irq_count;
73 u32 engine_id_idx;
74 u32 active_engine_id = 0;
75 u32 engine_enum = ENGINE_INVAL_GK20A;
76 int ops_old, ops_new, ops = 0;
77 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
78
79 if (!g->power_on)
80 return IRQ_NONE;
81
82 /* not from gpu when sharing irq with others */
83 mc_intr_1 = g->ops.mc.intr_nonstall(g);
84 if (unlikely(!mc_intr_1))
85 return IRQ_NONE;
86
87 g->ops.mc.intr_nonstall_pause(g);
88
89 if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1))
90 ops |= gk20a_fifo_nonstall_isr(g);
91
92 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines;
93 engine_id_idx++) {
94 struct fifo_engine_info_gk20a *engine_info;
95
96 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
97 engine_info = &g->fifo.engine_info[active_engine_id];
98
99 if (mc_intr_1 & engine_info->intr_mask) {
100 engine_enum = engine_info->engine_enum;
101 /* GR Engine */
102 if (engine_enum == ENGINE_GR_GK20A)
103 ops |= gk20a_gr_nonstall_isr(g);
104
105 /* CE Engine */
106 if (((engine_enum == ENGINE_GRCE_GK20A) ||
107 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
108 g->ops.ce2.isr_nonstall)
109 ops |= g->ops.ce2.isr_nonstall(g,
110 engine_info->inst_id,
111 engine_info->pri_base);
112 }
113 }
114 if (ops) {
115 do {
116 ops_old = atomic_read(&l->nonstall_ops);
117 ops_new = ops_old | ops;
118 } while (ops_old != atomic_cmpxchg(&l->nonstall_ops,
119 ops_old, ops_new));
120
121 queue_work(l->nonstall_work_queue, &l->nonstall_fn_work);
122 }
123
124 hw_irq_count = atomic_inc_return(&l->hw_irq_nonstall_count);
125
126 /* sync handled irq counter before re-enabling interrupts */
127 atomic_set(&l->sw_irq_nonstall_last_handled, hw_irq_count);
128
129 g->ops.mc.intr_nonstall_resume(g);
130
131 nvgpu_cond_broadcast(&l->sw_irq_nonstall_last_handled_wq);
132
133 return IRQ_HANDLED;
134}
135
136void nvgpu_intr_nonstall_cb(struct work_struct *work)
137{
138 struct nvgpu_os_linux *l =
139 container_of(work, struct nvgpu_os_linux, nonstall_fn_work);
140 struct gk20a *g = &l->g;
141 u32 ops;
142 bool semaphore_wakeup, post_events;
143
144 do {
145 ops = atomic_xchg(&l->nonstall_ops, 0);
146
147 semaphore_wakeup = ops & gk20a_nonstall_ops_wakeup_semaphore;
148 post_events = ops & gk20a_nonstall_ops_post_events;
149
150 if (semaphore_wakeup)
151 gk20a_channel_semaphore_wakeup(g, post_events);
152
153 } while (atomic_read(&l->nonstall_ops) != 0);
154}