summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/driver_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/driver_common.c')
-rw-r--r--drivers/gpu/nvgpu/os/linux/driver_common.c60
1 files changed, 50 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/driver_common.c b/drivers/gpu/nvgpu/os/linux/driver_common.c
index c76dabeb..602d325d 100644
--- a/drivers/gpu/nvgpu/os/linux/driver_common.c
+++ b/drivers/gpu/nvgpu/os/linux/driver_common.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -312,30 +312,70 @@ static int cyclic_delta(int a, int b)
312} 312}
313 313
314/** 314/**
315 * nvgpu_wait_for_deferred_interrupts - Wait for interrupts to complete 315 * nvgpu_wait_for_stall_interrupts - Wait for the stalling interrupts to
316 * complete.
316 * 317 *
317 * @g - The GPU to wait on. 318 * @g - The GPU to wait on.
319 * @timeout - maximum time period to wait for.
318 * 320 *
319 * Waits until all interrupt handlers that have been scheduled to run have 321 * Waits until all stalling interrupt handlers that have been scheduled to run
320 * completed. 322 * have completed.
321 */ 323 */
322void nvgpu_wait_for_deferred_interrupts(struct gk20a *g) 324int nvgpu_wait_for_stall_interrupts(struct gk20a *g, u32 timeout)
323{ 325{
324 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 326 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
325 int stall_irq_threshold = atomic_read(&l->hw_irq_stall_count); 327 int stall_irq_threshold = atomic_read(&l->hw_irq_stall_count);
326 int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count);
327 328
328 /* wait until all stalling irqs are handled */ 329 /* wait until all stalling irqs are handled */
329 NVGPU_COND_WAIT(&l->sw_irq_stall_last_handled_wq, 330 return NVGPU_COND_WAIT(&l->sw_irq_stall_last_handled_wq,
330 cyclic_delta(stall_irq_threshold, 331 cyclic_delta(stall_irq_threshold,
331 atomic_read(&l->sw_irq_stall_last_handled)) 332 atomic_read(&l->sw_irq_stall_last_handled))
332 <= 0, 0); 333 <= 0, timeout);
334}
335
336/**
337 * nvgpu_wait_for_nonstall_interrupts - Wait for the nonstalling interrupts to
338 * complete.
339 *
340 * @g - The GPU to wait on.
341 * @timeout - maximum time period to wait for.
342 *
343 * Waits until all non-stalling interrupt handlers that have been scheduled to
344 * run have completed.
345 */
346int nvgpu_wait_for_nonstall_interrupts(struct gk20a *g, u32 timeout)
347{
348 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
349 int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count);
333 350
334 /* wait until all non-stalling irqs are handled */ 351 /* wait until all non-stalling irqs are handled */
335 NVGPU_COND_WAIT(&l->sw_irq_nonstall_last_handled_wq, 352 return NVGPU_COND_WAIT(&l->sw_irq_nonstall_last_handled_wq,
336 cyclic_delta(nonstall_irq_threshold, 353 cyclic_delta(nonstall_irq_threshold,
337 atomic_read(&l->sw_irq_nonstall_last_handled)) 354 atomic_read(&l->sw_irq_nonstall_last_handled))
338 <= 0, 0); 355 <= 0, timeout);
356}
357
358/**
359 * nvgpu_wait_for_deferred_interrupts - Wait for interrupts to complete
360 *
361 * @g - The GPU to wait on.
362 *
363 * Waits until all interrupt handlers that have been scheduled to run have
364 * completed.
365 */
366void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
367{
368 int ret;
369
370 ret = nvgpu_wait_for_stall_interrupts(g, 0U);
371 if (ret != 0) {
372 nvgpu_err(g, "wait for stall interrupts failed %d", ret);
373 }
374
375 ret = nvgpu_wait_for_nonstall_interrupts(g, 0U);
376 if (ret != 0) {
377 nvgpu_err(g, "wait for nonstall interrupts failed %d", ret);
378 }
339} 379}
340 380
341static void nvgpu_free_gk20a(struct gk20a *g) 381static void nvgpu_free_gk20a(struct gk20a *g)