summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-07-27 15:15:19 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-03 11:44:58 -0400
commite1df72771ba5e5331888f5bfc171f71bd8f4aed7 (patch)
tree26e367639be69587c2ba577a2b0a4ea8cb91efce /drivers/gpu/nvgpu/common
parent11e29991acd25baef5b786605e136b5e71737b8e (diff)
gpu: nvgpu: Move isr related fields from gk20a
Move fields in struct gk20a related to interrupt handling into Linux specific nvgpu_os_linux. At the same time move the counter logic from function in HAL into Linux specific code, and two Linux specific power management functions from generic gk20a.c to Linux specific module.c. JIRA NVGPU-123 Change-Id: I0a08fd2e81297c8dff7a85c263ded928496c4de0 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1528177 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Sourab Gupta <sourabg@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/linux/driver_common.c17
-rw-r--r--drivers/gpu/nvgpu/common/linux/intr.c34
-rw-r--r--drivers/gpu/nvgpu/common/linux/module.c35
-rw-r--r--drivers/gpu/nvgpu/common/linux/module.h1
-rw-r--r--drivers/gpu/nvgpu/common/linux/os_linux.h14
5 files changed, 79 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c
index 4e2cb2b4..bd1b7611 100644
--- a/drivers/gpu/nvgpu/common/linux/driver_common.c
+++ b/drivers/gpu/nvgpu/common/linux/driver_common.c
@@ -38,8 +38,8 @@ static void nvgpu_init_vars(struct gk20a *g)
38 struct device *dev = dev_from_gk20a(g); 38 struct device *dev = dev_from_gk20a(g);
39 struct gk20a_platform *platform = dev_get_drvdata(dev); 39 struct gk20a_platform *platform = dev_get_drvdata(dev);
40 40
41 init_waitqueue_head(&g->sw_irq_stall_last_handled_wq); 41 init_waitqueue_head(&l->sw_irq_stall_last_handled_wq);
42 init_waitqueue_head(&g->sw_irq_nonstall_last_handled_wq); 42 init_waitqueue_head(&l->sw_irq_nonstall_last_handled_wq);
43 gk20a_init_gr(g); 43 gk20a_init_gr(g);
44 44
45 init_rwsem(&g->busy_lock); 45 init_rwsem(&g->busy_lock);
@@ -236,18 +236,19 @@ static int cyclic_delta(int a, int b)
236 */ 236 */
237void nvgpu_wait_for_deferred_interrupts(struct gk20a *g) 237void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
238{ 238{
239 int stall_irq_threshold = atomic_read(&g->hw_irq_stall_count); 239 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
240 int nonstall_irq_threshold = atomic_read(&g->hw_irq_nonstall_count); 240 int stall_irq_threshold = atomic_read(&l->hw_irq_stall_count);
241 int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count);
241 242
242 /* wait until all stalling irqs are handled */ 243 /* wait until all stalling irqs are handled */
243 wait_event(g->sw_irq_stall_last_handled_wq, 244 wait_event(l->sw_irq_stall_last_handled_wq,
244 cyclic_delta(stall_irq_threshold, 245 cyclic_delta(stall_irq_threshold,
245 atomic_read(&g->sw_irq_stall_last_handled)) 246 atomic_read(&l->sw_irq_stall_last_handled))
246 <= 0); 247 <= 0);
247 248
248 /* wait until all non-stalling irqs are handled */ 249 /* wait until all non-stalling irqs are handled */
249 wait_event(g->sw_irq_nonstall_last_handled_wq, 250 wait_event(l->sw_irq_nonstall_last_handled_wq,
250 cyclic_delta(nonstall_irq_threshold, 251 cyclic_delta(nonstall_irq_threshold,
251 atomic_read(&g->sw_irq_nonstall_last_handled)) 252 atomic_read(&l->sw_irq_nonstall_last_handled))
252 <= 0); 253 <= 0);
253} 254}
diff --git a/drivers/gpu/nvgpu/common/linux/intr.c b/drivers/gpu/nvgpu/common/linux/intr.c
index 7d699dee..da177b55 100644
--- a/drivers/gpu/nvgpu/common/linux/intr.c
+++ b/drivers/gpu/nvgpu/common/linux/intr.c
@@ -18,9 +18,11 @@
18 18
19#include <nvgpu/atomic.h> 19#include <nvgpu/atomic.h>
20#include <nvgpu/unit.h> 20#include <nvgpu/unit.h>
21#include "os_linux.h"
21 22
22irqreturn_t nvgpu_intr_stall(struct gk20a *g) 23irqreturn_t nvgpu_intr_stall(struct gk20a *g)
23{ 24{
25 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
24 u32 mc_intr_0; 26 u32 mc_intr_0;
25 27
26 trace_mc_gk20a_intr_stall(g->name); 28 trace_mc_gk20a_intr_stall(g->name);
@@ -35,7 +37,7 @@ irqreturn_t nvgpu_intr_stall(struct gk20a *g)
35 37
36 g->ops.mc.intr_stall_pause(g); 38 g->ops.mc.intr_stall_pause(g);
37 39
38 atomic_inc(&g->hw_irq_stall_count); 40 atomic_inc(&l->hw_irq_stall_count);
39 41
40 trace_mc_gk20a_intr_stall_done(g->name); 42 trace_mc_gk20a_intr_stall_done(g->name);
41 43
@@ -44,14 +46,20 @@ irqreturn_t nvgpu_intr_stall(struct gk20a *g)
44 46
45irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g) 47irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
46{ 48{
49 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
50 int hw_irq_count;
51
47 gk20a_dbg(gpu_dbg_intr, "interrupt thread launched"); 52 gk20a_dbg(gpu_dbg_intr, "interrupt thread launched");
48 53
49 trace_mc_gk20a_intr_thread_stall(g->name); 54 trace_mc_gk20a_intr_thread_stall(g->name);
50 55
56 hw_irq_count = atomic_read(&l->hw_irq_stall_count);
51 g->ops.mc.isr_stall(g); 57 g->ops.mc.isr_stall(g);
52 g->ops.mc.intr_stall_resume(g); 58 g->ops.mc.intr_stall_resume(g);
59 /* sync handled irq counter before re-enabling interrupts */
60 atomic_set(&l->sw_irq_stall_last_handled, hw_irq_count);
53 61
54 wake_up_all(&g->sw_irq_stall_last_handled_wq); 62 wake_up_all(&l->sw_irq_stall_last_handled_wq);
55 63
56 trace_mc_gk20a_intr_thread_stall_done(g->name); 64 trace_mc_gk20a_intr_thread_stall_done(g->name);
57 65
@@ -66,6 +74,8 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
66 u32 active_engine_id = 0; 74 u32 active_engine_id = 0;
67 u32 engine_enum = ENGINE_INVAL_GK20A; 75 u32 engine_enum = ENGINE_INVAL_GK20A;
68 int ops_old, ops_new, ops = 0; 76 int ops_old, ops_new, ops = 0;
77 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
78
69 if (!g->power_on) 79 if (!g->power_on)
70 return IRQ_NONE; 80 return IRQ_NONE;
71 81
@@ -103,34 +113,36 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
103 } 113 }
104 if (ops) { 114 if (ops) {
105 do { 115 do {
106 ops_old = atomic_read(&g->nonstall_ops); 116 ops_old = atomic_read(&l->nonstall_ops);
107 ops_new = ops_old | ops; 117 ops_new = ops_old | ops;
108 } while (ops_old != atomic_cmpxchg(&g->nonstall_ops, 118 } while (ops_old != atomic_cmpxchg(&l->nonstall_ops,
109 ops_old, ops_new)); 119 ops_old, ops_new));
110 120
111 queue_work(g->nonstall_work_queue, &g->nonstall_fn_work); 121 queue_work(l->nonstall_work_queue, &l->nonstall_fn_work);
112 } 122 }
113 123
114 hw_irq_count = atomic_inc_return(&g->hw_irq_nonstall_count); 124 hw_irq_count = atomic_inc_return(&l->hw_irq_nonstall_count);
115 125
116 /* sync handled irq counter before re-enabling interrupts */ 126 /* sync handled irq counter before re-enabling interrupts */
117 atomic_set(&g->sw_irq_nonstall_last_handled, hw_irq_count); 127 atomic_set(&l->sw_irq_nonstall_last_handled, hw_irq_count);
118 128
119 g->ops.mc.intr_nonstall_resume(g); 129 g->ops.mc.intr_nonstall_resume(g);
120 130
121 wake_up_all(&g->sw_irq_nonstall_last_handled_wq); 131 wake_up_all(&l->sw_irq_nonstall_last_handled_wq);
122 132
123 return IRQ_HANDLED; 133 return IRQ_HANDLED;
124} 134}
125 135
126void nvgpu_intr_nonstall_cb(struct work_struct *work) 136void nvgpu_intr_nonstall_cb(struct work_struct *work)
127{ 137{
128 struct gk20a *g = container_of(work, struct gk20a, nonstall_fn_work); 138 struct nvgpu_os_linux *l =
139 container_of(work, struct nvgpu_os_linux, nonstall_fn_work);
140 struct gk20a *g = &l->g;
129 u32 ops; 141 u32 ops;
130 bool semaphore_wakeup, post_events; 142 bool semaphore_wakeup, post_events;
131 143
132 do { 144 do {
133 ops = atomic_xchg(&g->nonstall_ops, 0); 145 ops = atomic_xchg(&l->nonstall_ops, 0);
134 146
135 semaphore_wakeup = ops & gk20a_nonstall_ops_wakeup_semaphore; 147 semaphore_wakeup = ops & gk20a_nonstall_ops_wakeup_semaphore;
136 post_events = ops & gk20a_nonstall_ops_post_events; 148 post_events = ops & gk20a_nonstall_ops_post_events;
@@ -138,5 +150,5 @@ void nvgpu_intr_nonstall_cb(struct work_struct *work)
138 if (semaphore_wakeup) 150 if (semaphore_wakeup)
139 gk20a_channel_semaphore_wakeup(g, post_events); 151 gk20a_channel_semaphore_wakeup(g, post_events);
140 152
141 } while (atomic_read(&g->nonstall_ops) != 0); 153 } while (atomic_read(&l->nonstall_ops) != 0);
142} 154}
diff --git a/drivers/gpu/nvgpu/common/linux/module.c b/drivers/gpu/nvgpu/common/linux/module.c
index d19a7a45..cbfe6ad7 100644
--- a/drivers/gpu/nvgpu/common/linux/module.c
+++ b/drivers/gpu/nvgpu/common/linux/module.c
@@ -136,6 +136,7 @@ void gk20a_idle(struct gk20a *g)
136int gk20a_pm_finalize_poweron(struct device *dev) 136int gk20a_pm_finalize_poweron(struct device *dev)
137{ 137{
138 struct gk20a *g = get_gk20a(dev); 138 struct gk20a *g = get_gk20a(dev);
139 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
139 struct gk20a_platform *platform = gk20a_get_platform(dev); 140 struct gk20a_platform *platform = gk20a_get_platform(dev);
140 int err, nice_value; 141 int err, nice_value;
141 142
@@ -163,10 +164,10 @@ int gk20a_pm_finalize_poweron(struct device *dev)
163 set_user_nice(current, -20); 164 set_user_nice(current, -20);
164 165
165 /* Enable interrupt workqueue */ 166 /* Enable interrupt workqueue */
166 if (!g->nonstall_work_queue) { 167 if (!l->nonstall_work_queue) {
167 g->nonstall_work_queue = alloc_workqueue("%s", 168 l->nonstall_work_queue = alloc_workqueue("%s",
168 WQ_HIGHPRI, 1, "mc_nonstall"); 169 WQ_HIGHPRI, 1, "mc_nonstall");
169 INIT_WORK(&g->nonstall_fn_work, nvgpu_intr_nonstall_cb); 170 INIT_WORK(&l->nonstall_fn_work, nvgpu_intr_nonstall_cb);
170 } 171 }
171 172
172 err = gk20a_finalize_poweron(g); 173 err = gk20a_finalize_poweron(g);
@@ -827,6 +828,34 @@ static int gk20a_pm_init(struct device *dev)
827 return err; 828 return err;
828} 829}
829 830
831/*
832 * Start the process for unloading the driver. Set NVGPU_DRIVER_IS_DYING.
833 */
834void gk20a_driver_start_unload(struct gk20a *g)
835{
836 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
837
838 gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n");
839
840 down_write(&g->busy_lock);
841 __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true);
842 up_write(&g->busy_lock);
843
844 if (g->is_virtual)
845 return;
846
847 gk20a_wait_for_idle(dev_from_gk20a(g));
848
849 nvgpu_wait_for_deferred_interrupts(g);
850 gk20a_channel_cancel_pending_sema_waits(g);
851
852 if (l->nonstall_work_queue) {
853 cancel_work_sync(&l->nonstall_fn_work);
854 destroy_workqueue(l->nonstall_work_queue);
855 l->nonstall_work_queue = NULL;
856 }
857}
858
830static inline void set_gk20a(struct platform_device *pdev, struct gk20a *gk20a) 859static inline void set_gk20a(struct platform_device *pdev, struct gk20a *gk20a)
831{ 860{
832 gk20a_get_platform(&pdev->dev)->g = gk20a; 861 gk20a_get_platform(&pdev->dev)->g = gk20a;
diff --git a/drivers/gpu/nvgpu/common/linux/module.h b/drivers/gpu/nvgpu/common/linux/module.h
index 0fde0b41..cfbbc0c7 100644
--- a/drivers/gpu/nvgpu/common/linux/module.h
+++ b/drivers/gpu/nvgpu/common/linux/module.h
@@ -18,6 +18,7 @@ struct device;
18 18
19int gk20a_pm_finalize_poweron(struct device *dev); 19int gk20a_pm_finalize_poweron(struct device *dev);
20void gk20a_remove_support(struct gk20a *g); 20void gk20a_remove_support(struct gk20a *g);
21void gk20a_driver_start_unload(struct gk20a *g);
21 22
22extern struct class nvgpu_class; 23extern struct class nvgpu_class;
23 24
diff --git a/drivers/gpu/nvgpu/common/linux/os_linux.h b/drivers/gpu/nvgpu/common/linux/os_linux.h
index cf012acc..8f304fe3 100644
--- a/drivers/gpu/nvgpu/common/linux/os_linux.h
+++ b/drivers/gpu/nvgpu/common/linux/os_linux.h
@@ -69,6 +69,20 @@ struct nvgpu_os_linux {
69 struct devfreq *devfreq; 69 struct devfreq *devfreq;
70 70
71 struct device_dma_parameters dma_parms; 71 struct device_dma_parameters dma_parms;
72
73 atomic_t hw_irq_stall_count;
74 atomic_t hw_irq_nonstall_count;
75
76 wait_queue_head_t sw_irq_stall_last_handled_wq;
77 atomic_t sw_irq_stall_last_handled;
78
79 atomic_t nonstall_ops;
80
81 wait_queue_head_t sw_irq_nonstall_last_handled_wq;
82 atomic_t sw_irq_nonstall_last_handled;
83
84 struct work_struct nonstall_fn_work;
85 struct workqueue_struct *nonstall_work_queue;
72}; 86};
73 87
74static inline struct nvgpu_os_linux *nvgpu_os_linux_from_gk20a(struct gk20a *g) 88static inline struct nvgpu_os_linux *nvgpu_os_linux_from_gk20a(struct gk20a *g)