summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/intr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/intr.c')
-rw-r--r--drivers/gpu/nvgpu/os/linux/intr.c122
1 files changed, 122 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/intr.c b/drivers/gpu/nvgpu/os/linux/intr.c
new file mode 100644
index 00000000..7ffc7e87
--- /dev/null
+++ b/drivers/gpu/nvgpu/os/linux/intr.c
@@ -0,0 +1,122 @@
1/*
2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <trace/events/gk20a.h>
15#include <linux/irqreturn.h>
16
17#include "gk20a/gk20a.h"
18#include "gk20a/mc_gk20a.h"
19
20#include <nvgpu/atomic.h>
21#include <nvgpu/unit.h>
22#include "os_linux.h"
23
24irqreturn_t nvgpu_intr_stall(struct gk20a *g)
25{
26 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
27 u32 mc_intr_0;
28
29 trace_mc_gk20a_intr_stall(g->name);
30
31 if (!g->power_on)
32 return IRQ_NONE;
33
34 /* not from gpu when sharing irq with others */
35 mc_intr_0 = g->ops.mc.intr_stall(g);
36 if (unlikely(!mc_intr_0))
37 return IRQ_NONE;
38
39 g->ops.mc.intr_stall_pause(g);
40
41 atomic_inc(&l->hw_irq_stall_count);
42
43 trace_mc_gk20a_intr_stall_done(g->name);
44
45 return IRQ_WAKE_THREAD;
46}
47
48irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
49{
50 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
51 int hw_irq_count;
52
53 nvgpu_log(g, gpu_dbg_intr, "interrupt thread launched");
54
55 trace_mc_gk20a_intr_thread_stall(g->name);
56
57 hw_irq_count = atomic_read(&l->hw_irq_stall_count);
58 g->ops.mc.isr_stall(g);
59 g->ops.mc.intr_stall_resume(g);
60 /* sync handled irq counter before re-enabling interrupts */
61 atomic_set(&l->sw_irq_stall_last_handled, hw_irq_count);
62
63 nvgpu_cond_broadcast(&l->sw_irq_stall_last_handled_wq);
64
65 trace_mc_gk20a_intr_thread_stall_done(g->name);
66
67 return IRQ_HANDLED;
68}
69
70irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
71{
72 u32 non_stall_intr_val;
73 u32 hw_irq_count;
74 int ops_old, ops_new, ops = 0;
75 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
76
77 if (!g->power_on)
78 return IRQ_NONE;
79
80 /* not from gpu when sharing irq with others */
81 non_stall_intr_val = g->ops.mc.intr_nonstall(g);
82 if (unlikely(!non_stall_intr_val))
83 return IRQ_NONE;
84
85 g->ops.mc.intr_nonstall_pause(g);
86
87 ops = g->ops.mc.isr_nonstall(g);
88 if (ops) {
89 do {
90 ops_old = atomic_read(&l->nonstall_ops);
91 ops_new = ops_old | ops;
92 } while (ops_old != atomic_cmpxchg(&l->nonstall_ops,
93 ops_old, ops_new));
94
95 queue_work(l->nonstall_work_queue, &l->nonstall_fn_work);
96 }
97
98 hw_irq_count = atomic_inc_return(&l->hw_irq_nonstall_count);
99
100 /* sync handled irq counter before re-enabling interrupts */
101 atomic_set(&l->sw_irq_nonstall_last_handled, hw_irq_count);
102
103 g->ops.mc.intr_nonstall_resume(g);
104
105 nvgpu_cond_broadcast(&l->sw_irq_nonstall_last_handled_wq);
106
107 return IRQ_HANDLED;
108}
109
110void nvgpu_intr_nonstall_cb(struct work_struct *work)
111{
112 struct nvgpu_os_linux *l =
113 container_of(work, struct nvgpu_os_linux, nonstall_fn_work);
114 struct gk20a *g = &l->g;
115
116 do {
117 u32 ops;
118
119 ops = atomic_xchg(&l->nonstall_ops, 0);
120 mc_gk20a_handle_intr_nonstall(g, ops);
121 } while (atomic_read(&l->nonstall_ops) != 0);
122}