diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2017-07-27 15:15:19 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-08-03 11:44:58 -0400 |
commit | e1df72771ba5e5331888f5bfc171f71bd8f4aed7 (patch) | |
tree | 26e367639be69587c2ba577a2b0a4ea8cb91efce | |
parent | 11e29991acd25baef5b786605e136b5e71737b8e (diff) |
gpu: nvgpu: Move isr related fields from gk20a
Move fields in struct gk20a related to interrupt handling into
Linux specific nvgpu_os_linux. At the same time move the counter
logic from function in HAL into Linux specific code, and two Linux
specific power management functions from generic gk20a.c to Linux
specific module.c.
JIRA NVGPU-123
Change-Id: I0a08fd2e81297c8dff7a85c263ded928496c4de0
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1528177
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Sourab Gupta <sourabg@nvidia.com>
GVS: Gerrit_Virtual_Submit
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/driver_common.c | 17 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/intr.c | 34 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/module.c | 35 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/module.h | 1 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/os_linux.h | 14 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.c | 26 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 14 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mc_gk20a.c | 5 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/mc_gp10b.c | 5 |
9 files changed, 79 insertions, 72 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c index 4e2cb2b4..bd1b7611 100644 --- a/drivers/gpu/nvgpu/common/linux/driver_common.c +++ b/drivers/gpu/nvgpu/common/linux/driver_common.c | |||
@@ -38,8 +38,8 @@ static void nvgpu_init_vars(struct gk20a *g) | |||
38 | struct device *dev = dev_from_gk20a(g); | 38 | struct device *dev = dev_from_gk20a(g); |
39 | struct gk20a_platform *platform = dev_get_drvdata(dev); | 39 | struct gk20a_platform *platform = dev_get_drvdata(dev); |
40 | 40 | ||
41 | init_waitqueue_head(&g->sw_irq_stall_last_handled_wq); | 41 | init_waitqueue_head(&l->sw_irq_stall_last_handled_wq); |
42 | init_waitqueue_head(&g->sw_irq_nonstall_last_handled_wq); | 42 | init_waitqueue_head(&l->sw_irq_nonstall_last_handled_wq); |
43 | gk20a_init_gr(g); | 43 | gk20a_init_gr(g); |
44 | 44 | ||
45 | init_rwsem(&g->busy_lock); | 45 | init_rwsem(&g->busy_lock); |
@@ -236,18 +236,19 @@ static int cyclic_delta(int a, int b) | |||
236 | */ | 236 | */ |
237 | void nvgpu_wait_for_deferred_interrupts(struct gk20a *g) | 237 | void nvgpu_wait_for_deferred_interrupts(struct gk20a *g) |
238 | { | 238 | { |
239 | int stall_irq_threshold = atomic_read(&g->hw_irq_stall_count); | 239 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); |
240 | int nonstall_irq_threshold = atomic_read(&g->hw_irq_nonstall_count); | 240 | int stall_irq_threshold = atomic_read(&l->hw_irq_stall_count); |
241 | int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count); | ||
241 | 242 | ||
242 | /* wait until all stalling irqs are handled */ | 243 | /* wait until all stalling irqs are handled */ |
243 | wait_event(g->sw_irq_stall_last_handled_wq, | 244 | wait_event(l->sw_irq_stall_last_handled_wq, |
244 | cyclic_delta(stall_irq_threshold, | 245 | cyclic_delta(stall_irq_threshold, |
245 | atomic_read(&g->sw_irq_stall_last_handled)) | 246 | atomic_read(&l->sw_irq_stall_last_handled)) |
246 | <= 0); | 247 | <= 0); |
247 | 248 | ||
248 | /* wait until all non-stalling irqs are handled */ | 249 | /* wait until all non-stalling irqs are handled */ |
249 | wait_event(g->sw_irq_nonstall_last_handled_wq, | 250 | wait_event(l->sw_irq_nonstall_last_handled_wq, |
250 | cyclic_delta(nonstall_irq_threshold, | 251 | cyclic_delta(nonstall_irq_threshold, |
251 | atomic_read(&g->sw_irq_nonstall_last_handled)) | 252 | atomic_read(&l->sw_irq_nonstall_last_handled)) |
252 | <= 0); | 253 | <= 0); |
253 | } | 254 | } |
diff --git a/drivers/gpu/nvgpu/common/linux/intr.c b/drivers/gpu/nvgpu/common/linux/intr.c index 7d699dee..da177b55 100644 --- a/drivers/gpu/nvgpu/common/linux/intr.c +++ b/drivers/gpu/nvgpu/common/linux/intr.c | |||
@@ -18,9 +18,11 @@ | |||
18 | 18 | ||
19 | #include <nvgpu/atomic.h> | 19 | #include <nvgpu/atomic.h> |
20 | #include <nvgpu/unit.h> | 20 | #include <nvgpu/unit.h> |
21 | #include "os_linux.h" | ||
21 | 22 | ||
22 | irqreturn_t nvgpu_intr_stall(struct gk20a *g) | 23 | irqreturn_t nvgpu_intr_stall(struct gk20a *g) |
23 | { | 24 | { |
25 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
24 | u32 mc_intr_0; | 26 | u32 mc_intr_0; |
25 | 27 | ||
26 | trace_mc_gk20a_intr_stall(g->name); | 28 | trace_mc_gk20a_intr_stall(g->name); |
@@ -35,7 +37,7 @@ irqreturn_t nvgpu_intr_stall(struct gk20a *g) | |||
35 | 37 | ||
36 | g->ops.mc.intr_stall_pause(g); | 38 | g->ops.mc.intr_stall_pause(g); |
37 | 39 | ||
38 | atomic_inc(&g->hw_irq_stall_count); | 40 | atomic_inc(&l->hw_irq_stall_count); |
39 | 41 | ||
40 | trace_mc_gk20a_intr_stall_done(g->name); | 42 | trace_mc_gk20a_intr_stall_done(g->name); |
41 | 43 | ||
@@ -44,14 +46,20 @@ irqreturn_t nvgpu_intr_stall(struct gk20a *g) | |||
44 | 46 | ||
45 | irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g) | 47 | irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g) |
46 | { | 48 | { |
49 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
50 | int hw_irq_count; | ||
51 | |||
47 | gk20a_dbg(gpu_dbg_intr, "interrupt thread launched"); | 52 | gk20a_dbg(gpu_dbg_intr, "interrupt thread launched"); |
48 | 53 | ||
49 | trace_mc_gk20a_intr_thread_stall(g->name); | 54 | trace_mc_gk20a_intr_thread_stall(g->name); |
50 | 55 | ||
56 | hw_irq_count = atomic_read(&l->hw_irq_stall_count); | ||
51 | g->ops.mc.isr_stall(g); | 57 | g->ops.mc.isr_stall(g); |
52 | g->ops.mc.intr_stall_resume(g); | 58 | g->ops.mc.intr_stall_resume(g); |
59 | /* sync handled irq counter before re-enabling interrupts */ | ||
60 | atomic_set(&l->sw_irq_stall_last_handled, hw_irq_count); | ||
53 | 61 | ||
54 | wake_up_all(&g->sw_irq_stall_last_handled_wq); | 62 | wake_up_all(&l->sw_irq_stall_last_handled_wq); |
55 | 63 | ||
56 | trace_mc_gk20a_intr_thread_stall_done(g->name); | 64 | trace_mc_gk20a_intr_thread_stall_done(g->name); |
57 | 65 | ||
@@ -66,6 +74,8 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g) | |||
66 | u32 active_engine_id = 0; | 74 | u32 active_engine_id = 0; |
67 | u32 engine_enum = ENGINE_INVAL_GK20A; | 75 | u32 engine_enum = ENGINE_INVAL_GK20A; |
68 | int ops_old, ops_new, ops = 0; | 76 | int ops_old, ops_new, ops = 0; |
77 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
78 | |||
69 | if (!g->power_on) | 79 | if (!g->power_on) |
70 | return IRQ_NONE; | 80 | return IRQ_NONE; |
71 | 81 | ||
@@ -103,34 +113,36 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g) | |||
103 | } | 113 | } |
104 | if (ops) { | 114 | if (ops) { |
105 | do { | 115 | do { |
106 | ops_old = atomic_read(&g->nonstall_ops); | 116 | ops_old = atomic_read(&l->nonstall_ops); |
107 | ops_new = ops_old | ops; | 117 | ops_new = ops_old | ops; |
108 | } while (ops_old != atomic_cmpxchg(&g->nonstall_ops, | 118 | } while (ops_old != atomic_cmpxchg(&l->nonstall_ops, |
109 | ops_old, ops_new)); | 119 | ops_old, ops_new)); |
110 | 120 | ||
111 | queue_work(g->nonstall_work_queue, &g->nonstall_fn_work); | 121 | queue_work(l->nonstall_work_queue, &l->nonstall_fn_work); |
112 | } | 122 | } |
113 | 123 | ||
114 | hw_irq_count = atomic_inc_return(&g->hw_irq_nonstall_count); | 124 | hw_irq_count = atomic_inc_return(&l->hw_irq_nonstall_count); |
115 | 125 | ||
116 | /* sync handled irq counter before re-enabling interrupts */ | 126 | /* sync handled irq counter before re-enabling interrupts */ |
117 | atomic_set(&g->sw_irq_nonstall_last_handled, hw_irq_count); | 127 | atomic_set(&l->sw_irq_nonstall_last_handled, hw_irq_count); |
118 | 128 | ||
119 | g->ops.mc.intr_nonstall_resume(g); | 129 | g->ops.mc.intr_nonstall_resume(g); |
120 | 130 | ||
121 | wake_up_all(&g->sw_irq_nonstall_last_handled_wq); | 131 | wake_up_all(&l->sw_irq_nonstall_last_handled_wq); |
122 | 132 | ||
123 | return IRQ_HANDLED; | 133 | return IRQ_HANDLED; |
124 | } | 134 | } |
125 | 135 | ||
126 | void nvgpu_intr_nonstall_cb(struct work_struct *work) | 136 | void nvgpu_intr_nonstall_cb(struct work_struct *work) |
127 | { | 137 | { |
128 | struct gk20a *g = container_of(work, struct gk20a, nonstall_fn_work); | 138 | struct nvgpu_os_linux *l = |
139 | container_of(work, struct nvgpu_os_linux, nonstall_fn_work); | ||
140 | struct gk20a *g = &l->g; | ||
129 | u32 ops; | 141 | u32 ops; |
130 | bool semaphore_wakeup, post_events; | 142 | bool semaphore_wakeup, post_events; |
131 | 143 | ||
132 | do { | 144 | do { |
133 | ops = atomic_xchg(&g->nonstall_ops, 0); | 145 | ops = atomic_xchg(&l->nonstall_ops, 0); |
134 | 146 | ||
135 | semaphore_wakeup = ops & gk20a_nonstall_ops_wakeup_semaphore; | 147 | semaphore_wakeup = ops & gk20a_nonstall_ops_wakeup_semaphore; |
136 | post_events = ops & gk20a_nonstall_ops_post_events; | 148 | post_events = ops & gk20a_nonstall_ops_post_events; |
@@ -138,5 +150,5 @@ void nvgpu_intr_nonstall_cb(struct work_struct *work) | |||
138 | if (semaphore_wakeup) | 150 | if (semaphore_wakeup) |
139 | gk20a_channel_semaphore_wakeup(g, post_events); | 151 | gk20a_channel_semaphore_wakeup(g, post_events); |
140 | 152 | ||
141 | } while (atomic_read(&g->nonstall_ops) != 0); | 153 | } while (atomic_read(&l->nonstall_ops) != 0); |
142 | } | 154 | } |
diff --git a/drivers/gpu/nvgpu/common/linux/module.c b/drivers/gpu/nvgpu/common/linux/module.c index d19a7a45..cbfe6ad7 100644 --- a/drivers/gpu/nvgpu/common/linux/module.c +++ b/drivers/gpu/nvgpu/common/linux/module.c | |||
@@ -136,6 +136,7 @@ void gk20a_idle(struct gk20a *g) | |||
136 | int gk20a_pm_finalize_poweron(struct device *dev) | 136 | int gk20a_pm_finalize_poweron(struct device *dev) |
137 | { | 137 | { |
138 | struct gk20a *g = get_gk20a(dev); | 138 | struct gk20a *g = get_gk20a(dev); |
139 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
139 | struct gk20a_platform *platform = gk20a_get_platform(dev); | 140 | struct gk20a_platform *platform = gk20a_get_platform(dev); |
140 | int err, nice_value; | 141 | int err, nice_value; |
141 | 142 | ||
@@ -163,10 +164,10 @@ int gk20a_pm_finalize_poweron(struct device *dev) | |||
163 | set_user_nice(current, -20); | 164 | set_user_nice(current, -20); |
164 | 165 | ||
165 | /* Enable interrupt workqueue */ | 166 | /* Enable interrupt workqueue */ |
166 | if (!g->nonstall_work_queue) { | 167 | if (!l->nonstall_work_queue) { |
167 | g->nonstall_work_queue = alloc_workqueue("%s", | 168 | l->nonstall_work_queue = alloc_workqueue("%s", |
168 | WQ_HIGHPRI, 1, "mc_nonstall"); | 169 | WQ_HIGHPRI, 1, "mc_nonstall"); |
169 | INIT_WORK(&g->nonstall_fn_work, nvgpu_intr_nonstall_cb); | 170 | INIT_WORK(&l->nonstall_fn_work, nvgpu_intr_nonstall_cb); |
170 | } | 171 | } |
171 | 172 | ||
172 | err = gk20a_finalize_poweron(g); | 173 | err = gk20a_finalize_poweron(g); |
@@ -827,6 +828,34 @@ static int gk20a_pm_init(struct device *dev) | |||
827 | return err; | 828 | return err; |
828 | } | 829 | } |
829 | 830 | ||
831 | /* | ||
832 | * Start the process for unloading the driver. Set NVGPU_DRIVER_IS_DYING. | ||
833 | */ | ||
834 | void gk20a_driver_start_unload(struct gk20a *g) | ||
835 | { | ||
836 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
837 | |||
838 | gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n"); | ||
839 | |||
840 | down_write(&g->busy_lock); | ||
841 | __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); | ||
842 | up_write(&g->busy_lock); | ||
843 | |||
844 | if (g->is_virtual) | ||
845 | return; | ||
846 | |||
847 | gk20a_wait_for_idle(dev_from_gk20a(g)); | ||
848 | |||
849 | nvgpu_wait_for_deferred_interrupts(g); | ||
850 | gk20a_channel_cancel_pending_sema_waits(g); | ||
851 | |||
852 | if (l->nonstall_work_queue) { | ||
853 | cancel_work_sync(&l->nonstall_fn_work); | ||
854 | destroy_workqueue(l->nonstall_work_queue); | ||
855 | l->nonstall_work_queue = NULL; | ||
856 | } | ||
857 | } | ||
858 | |||
830 | static inline void set_gk20a(struct platform_device *pdev, struct gk20a *gk20a) | 859 | static inline void set_gk20a(struct platform_device *pdev, struct gk20a *gk20a) |
831 | { | 860 | { |
832 | gk20a_get_platform(&pdev->dev)->g = gk20a; | 861 | gk20a_get_platform(&pdev->dev)->g = gk20a; |
diff --git a/drivers/gpu/nvgpu/common/linux/module.h b/drivers/gpu/nvgpu/common/linux/module.h index 0fde0b41..cfbbc0c7 100644 --- a/drivers/gpu/nvgpu/common/linux/module.h +++ b/drivers/gpu/nvgpu/common/linux/module.h | |||
@@ -18,6 +18,7 @@ struct device; | |||
18 | 18 | ||
19 | int gk20a_pm_finalize_poweron(struct device *dev); | 19 | int gk20a_pm_finalize_poweron(struct device *dev); |
20 | void gk20a_remove_support(struct gk20a *g); | 20 | void gk20a_remove_support(struct gk20a *g); |
21 | void gk20a_driver_start_unload(struct gk20a *g); | ||
21 | 22 | ||
22 | extern struct class nvgpu_class; | 23 | extern struct class nvgpu_class; |
23 | 24 | ||
diff --git a/drivers/gpu/nvgpu/common/linux/os_linux.h b/drivers/gpu/nvgpu/common/linux/os_linux.h index cf012acc..8f304fe3 100644 --- a/drivers/gpu/nvgpu/common/linux/os_linux.h +++ b/drivers/gpu/nvgpu/common/linux/os_linux.h | |||
@@ -69,6 +69,20 @@ struct nvgpu_os_linux { | |||
69 | struct devfreq *devfreq; | 69 | struct devfreq *devfreq; |
70 | 70 | ||
71 | struct device_dma_parameters dma_parms; | 71 | struct device_dma_parameters dma_parms; |
72 | |||
73 | atomic_t hw_irq_stall_count; | ||
74 | atomic_t hw_irq_nonstall_count; | ||
75 | |||
76 | wait_queue_head_t sw_irq_stall_last_handled_wq; | ||
77 | atomic_t sw_irq_stall_last_handled; | ||
78 | |||
79 | atomic_t nonstall_ops; | ||
80 | |||
81 | wait_queue_head_t sw_irq_nonstall_last_handled_wq; | ||
82 | atomic_t sw_irq_nonstall_last_handled; | ||
83 | |||
84 | struct work_struct nonstall_fn_work; | ||
85 | struct workqueue_struct *nonstall_work_queue; | ||
72 | }; | 86 | }; |
73 | 87 | ||
74 | static inline struct nvgpu_os_linux *nvgpu_os_linux_from_gk20a(struct gk20a *g) | 88 | static inline struct nvgpu_os_linux *nvgpu_os_linux_from_gk20a(struct gk20a *g) |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index 21e861fe..6350bcf5 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c | |||
@@ -358,32 +358,6 @@ int gk20a_can_busy(struct gk20a *g) | |||
358 | return 1; | 358 | return 1; |
359 | } | 359 | } |
360 | 360 | ||
361 | /* | ||
362 | * Start the process for unloading the driver. Set NVGPU_DRIVER_IS_DYING. | ||
363 | */ | ||
364 | void gk20a_driver_start_unload(struct gk20a *g) | ||
365 | { | ||
366 | gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n"); | ||
367 | |||
368 | down_write(&g->busy_lock); | ||
369 | __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); | ||
370 | up_write(&g->busy_lock); | ||
371 | |||
372 | if (g->is_virtual) | ||
373 | return; | ||
374 | |||
375 | gk20a_wait_for_idle(dev_from_gk20a(g)); | ||
376 | |||
377 | nvgpu_wait_for_deferred_interrupts(g); | ||
378 | gk20a_channel_cancel_pending_sema_waits(g); | ||
379 | |||
380 | if (g->nonstall_work_queue) { | ||
381 | cancel_work_sync(&g->nonstall_fn_work); | ||
382 | destroy_workqueue(g->nonstall_work_queue); | ||
383 | g->nonstall_work_queue = NULL; | ||
384 | } | ||
385 | } | ||
386 | |||
387 | int gk20a_wait_for_idle(struct device *dev) | 361 | int gk20a_wait_for_idle(struct device *dev) |
388 | { | 362 | { |
389 | struct gk20a *g = get_gk20a(dev); | 363 | struct gk20a *g = get_gk20a(dev); |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 90c35a7b..8d9318b2 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -1042,10 +1042,6 @@ struct gk20a { | |||
1042 | 1042 | ||
1043 | atomic_t usage_count; | 1043 | atomic_t usage_count; |
1044 | 1044 | ||
1045 | atomic_t nonstall_ops; | ||
1046 | struct work_struct nonstall_fn_work; | ||
1047 | struct workqueue_struct *nonstall_work_queue; | ||
1048 | |||
1049 | struct kref refcount; | 1045 | struct kref refcount; |
1050 | 1046 | ||
1051 | struct resource *reg_mem; | 1047 | struct resource *reg_mem; |
@@ -1224,15 +1220,6 @@ struct gk20a { | |||
1224 | u32 max_ltc_count; | 1220 | u32 max_ltc_count; |
1225 | u32 ltc_count; | 1221 | u32 ltc_count; |
1226 | 1222 | ||
1227 | atomic_t hw_irq_stall_count; | ||
1228 | atomic_t hw_irq_nonstall_count; | ||
1229 | |||
1230 | atomic_t sw_irq_stall_last_handled; | ||
1231 | wait_queue_head_t sw_irq_stall_last_handled_wq; | ||
1232 | |||
1233 | atomic_t sw_irq_nonstall_last_handled; | ||
1234 | wait_queue_head_t sw_irq_nonstall_last_handled_wq; | ||
1235 | |||
1236 | struct gk20a_channel_worker { | 1223 | struct gk20a_channel_worker { |
1237 | struct nvgpu_thread poll_task; | 1224 | struct nvgpu_thread poll_task; |
1238 | atomic_t put; | 1225 | atomic_t put; |
@@ -1485,7 +1472,6 @@ int __gk20a_do_idle(struct gk20a *g, bool force_reset); | |||
1485 | int __gk20a_do_unidle(struct gk20a *g); | 1472 | int __gk20a_do_unidle(struct gk20a *g); |
1486 | 1473 | ||
1487 | int gk20a_can_busy(struct gk20a *g); | 1474 | int gk20a_can_busy(struct gk20a *g); |
1488 | void gk20a_driver_start_unload(struct gk20a *g); | ||
1489 | int gk20a_wait_for_idle(struct device *dev); | 1475 | int gk20a_wait_for_idle(struct device *dev); |
1490 | 1476 | ||
1491 | #define NVGPU_GPU_ARCHITECTURE_SHIFT 4 | 1477 | #define NVGPU_GPU_ARCHITECTURE_SHIFT 4 |
diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c index accda972..e25fcfc3 100644 --- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c | |||
@@ -27,13 +27,11 @@ | |||
27 | void mc_gk20a_isr_stall(struct gk20a *g) | 27 | void mc_gk20a_isr_stall(struct gk20a *g) |
28 | { | 28 | { |
29 | u32 mc_intr_0; | 29 | u32 mc_intr_0; |
30 | int hw_irq_count; | ||
31 | u32 engine_id_idx; | 30 | u32 engine_id_idx; |
32 | u32 active_engine_id = 0; | 31 | u32 active_engine_id = 0; |
33 | u32 engine_enum = ENGINE_INVAL_GK20A; | 32 | u32 engine_enum = ENGINE_INVAL_GK20A; |
34 | 33 | ||
35 | mc_intr_0 = g->ops.mc.intr_stall(g); | 34 | mc_intr_0 = g->ops.mc.intr_stall(g); |
36 | hw_irq_count = atomic_read(&g->hw_irq_stall_count); | ||
37 | 35 | ||
38 | gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); | 36 | gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0); |
39 | 37 | ||
@@ -67,9 +65,6 @@ void mc_gk20a_isr_stall(struct gk20a *g) | |||
67 | g->ops.ltc.isr(g); | 65 | g->ops.ltc.isr(g); |
68 | if (mc_intr_0 & mc_intr_0_pbus_pending_f()) | 66 | if (mc_intr_0 & mc_intr_0_pbus_pending_f()) |
69 | g->ops.bus.isr(g); | 67 | g->ops.bus.isr(g); |
70 | |||
71 | /* sync handled irq counter before re-enabling interrupts */ | ||
72 | atomic_set(&g->sw_irq_stall_last_handled, hw_irq_count); | ||
73 | } | 68 | } |
74 | 69 | ||
75 | void mc_gk20a_intr_enable(struct gk20a *g) | 70 | void mc_gk20a_intr_enable(struct gk20a *g) |
diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c index 718869f6..b7a52349 100644 --- a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c | |||
@@ -71,14 +71,12 @@ void mc_gp10b_intr_unit_config(struct gk20a *g, bool enable, | |||
71 | void mc_gp10b_isr_stall(struct gk20a *g) | 71 | void mc_gp10b_isr_stall(struct gk20a *g) |
72 | { | 72 | { |
73 | u32 mc_intr_0; | 73 | u32 mc_intr_0; |
74 | int hw_irq_count; | ||
75 | 74 | ||
76 | u32 engine_id_idx; | 75 | u32 engine_id_idx; |
77 | u32 active_engine_id = 0; | 76 | u32 active_engine_id = 0; |
78 | u32 engine_enum = ENGINE_INVAL_GK20A; | 77 | u32 engine_enum = ENGINE_INVAL_GK20A; |
79 | 78 | ||
80 | mc_intr_0 = gk20a_readl(g, mc_intr_r(0)); | 79 | mc_intr_0 = gk20a_readl(g, mc_intr_r(0)); |
81 | hw_irq_count = atomic_read(&g->hw_irq_stall_count); | ||
82 | 80 | ||
83 | gk20a_dbg(gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0); | 81 | gk20a_dbg(gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0); |
84 | 82 | ||
@@ -116,9 +114,6 @@ void mc_gp10b_isr_stall(struct gk20a *g) | |||
116 | if (mc_intr_0 & mc_intr_pbus_pending_f()) | 114 | if (mc_intr_0 & mc_intr_pbus_pending_f()) |
117 | g->ops.bus.isr(g); | 115 | g->ops.bus.isr(g); |
118 | 116 | ||
119 | /* sync handled irq counter before re-enabling interrupts */ | ||
120 | atomic_set(&g->sw_irq_stall_last_handled, hw_irq_count); | ||
121 | |||
122 | gk20a_dbg(gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); | 117 | gk20a_dbg(gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); |
123 | 118 | ||
124 | } | 119 | } |