From 942029a433390f3385ed9d6fc35476bbf9eafd98 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Mon, 5 Jun 2017 14:25:35 -0700 Subject: gpu: nvgpu: Split non-stall interrupt handling Split handling of stalling interrupt to Linux specific chip agnostic and OS independent chip specific parts. Linux specific chip independent part contains handler for ISR and passing the control to a bottom half worker. It uses the new MC HALs intr_nonstall (query interrupt status), intr_nonstall_pause (pause interrupts), intr_nonstall_resume (resume interrupts), and is_intr1_pending (query per-engine interrupt bit). MC HAL isr_nonstall is removed, because its work is now handled in chip independent code. JIRA NVGPU-26 Change-Id: I3e4c9905ef6eef7f1cc9f71b0278518ae663f87e Signed-off-by: Terje Bergstrom Reviewed-on: http://git-master/r/1497048 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/mc_gk20a.c | 128 ++++++++----------------------------- 1 file changed, 26 insertions(+), 102 deletions(-) (limited to 'drivers/gpu/nvgpu/gk20a/mc_gk20a.c') diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c index 2cdcaaeb..111872a2 100644 --- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c @@ -24,62 +24,6 @@ #include -void mc_gk20a_nonstall_cb(struct work_struct *work) -{ - struct gk20a *g = container_of(work, struct gk20a, nonstall_fn_work); - u32 ops; - bool semaphore_wakeup, post_events; - - do { - ops = atomic_xchg(&g->nonstall_ops, 0); - - semaphore_wakeup = ops & gk20a_nonstall_ops_wakeup_semaphore; - post_events = ops & gk20a_nonstall_ops_post_events; - - if (semaphore_wakeup) - gk20a_channel_semaphore_wakeup(g, post_events); - - } while (atomic_read(&g->nonstall_ops) != 0); -} - -irqreturn_t mc_gk20a_isr_nonstall(struct gk20a *g) -{ - u32 mc_intr_1; - u32 hw_irq_count; - - if (!g->power_on) - return IRQ_NONE; - - /* not from gpu when sharing irq with others */ - mc_intr_1 = gk20a_readl(g, mc_intr_1_r()); - if (unlikely(!mc_intr_1)) - return IRQ_NONE; - - gk20a_writel(g, mc_intr_en_1_r(), - mc_intr_en_1_inta_disabled_f()); - - /* flush previous write */ - gk20a_readl(g, mc_intr_en_1_r()); - - if (g->ops.mc.isr_thread_nonstall) - g->ops.mc.isr_thread_nonstall(g, mc_intr_1); - - hw_irq_count = atomic_inc_return(&g->hw_irq_nonstall_count); - - /* sync handled irq counter before re-enabling interrupts */ - atomic_set(&g->sw_irq_nonstall_last_handled, hw_irq_count); - - gk20a_writel(g, mc_intr_en_1_r(), - mc_intr_en_1_inta_hardware_f()); - - /* flush previous write */ - gk20a_readl(g, mc_intr_en_1_r()); - - wake_up_all(&g->sw_irq_nonstall_last_handled_wq); - - return IRQ_HANDLED; -} - void mc_gk20a_isr_stall(struct gk20a *g) { u32 mc_intr_0; @@ -128,49 +72,6 @@ void mc_gk20a_isr_stall(struct gk20a *g) atomic_set(&g->sw_irq_stall_last_handled, hw_irq_count); } -void mc_gk20a_intr_thread_nonstall(struct gk20a *g, u32 mc_intr_1) -{ - u32 engine_id_idx; - u32 active_engine_id = 0; - u32 engine_enum = ENGINE_INVAL_GK20A; - int ops_old, ops_new, ops = 0; - - if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1)) - ops |= gk20a_fifo_nonstall_isr(g); - - for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; - engine_id_idx++) { - active_engine_id = g->fifo.active_engines_list[engine_id_idx]; - - if (mc_intr_1 & - g->fifo.engine_info[active_engine_id].intr_mask) { - engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; - /* GR Engine */ - if (engine_enum == ENGINE_GR_GK20A) - ops |= gk20a_gr_nonstall_isr(g); - - /* CE Engine */ - if (((engine_enum == ENGINE_GRCE_GK20A) || - (engine_enum == ENGINE_ASYNC_CE_GK20A)) && - g->ops.ce2.isr_nonstall) - ops |= g->ops.ce2.isr_nonstall(g, - g->fifo.engine_info[active_engine_id]. - inst_id, - g->fifo.engine_info[active_engine_id]. - pri_base); - } - } - if (ops) { - do { - ops_old = atomic_read(&g->nonstall_ops); - ops_new = ops_old | ops; - } while (ops_old != atomic_cmpxchg(&g->nonstall_ops, - ops_old, ops_new)); - - queue_work(g->nonstall_work_queue, &g->nonstall_fn_work); - } -} - void mc_gk20a_intr_enable(struct gk20a *g) { u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g); @@ -226,11 +127,34 @@ void mc_gk20a_intr_stall_resume(struct gk20a *g) gk20a_readl(g, mc_intr_en_0_r()); } +void mc_gk20a_intr_nonstall_pause(struct gk20a *g) +{ + gk20a_writel(g, mc_intr_en_1_r(), + mc_intr_en_0_inta_disabled_f()); + + /* flush previous write */ + gk20a_readl(g, mc_intr_en_1_r()); +} + +void mc_gk20a_intr_nonstall_resume(struct gk20a *g) +{ + gk20a_writel(g, mc_intr_en_1_r(), + mc_intr_en_0_inta_hardware_f()); + + /* flush previous write */ + gk20a_readl(g, mc_intr_en_1_r()); +} + u32 mc_gk20a_intr_stall(struct gk20a *g) { return gk20a_readl(g, mc_intr_0_r()); } +u32 mc_gk20a_intr_nonstall(struct gk20a *g) +{ + return gk20a_readl(g, mc_intr_1_r()); +} + void gk20a_mc_disable(struct gk20a *g, u32 units) { u32 pmc; @@ -320,9 +244,9 @@ void gk20a_init_mc(struct gpu_ops *gops) gops->mc.intr_stall = mc_gk20a_intr_stall; gops->mc.intr_stall_pause = mc_gk20a_intr_stall_pause; gops->mc.intr_stall_resume = mc_gk20a_intr_stall_resume; - gops->mc.isr_nonstall = mc_gk20a_isr_nonstall; - gops->mc.isr_thread_nonstall = mc_gk20a_intr_thread_nonstall; - gops->mc.isr_nonstall_cb = mc_gk20a_nonstall_cb; + gops->mc.intr_nonstall = mc_gk20a_intr_nonstall; + gops->mc.intr_nonstall_pause = mc_gk20a_intr_nonstall_pause; + gops->mc.intr_nonstall_resume = mc_gk20a_intr_nonstall_resume; gops->mc.enable = gk20a_mc_enable; gops->mc.disable = gk20a_mc_disable; gops->mc.reset = gk20a_mc_reset; -- cgit v1.2.2