aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/driver_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/os/linux/driver_common.c')
-rw-r--r--include/os/linux/driver_common.c69
1 files changed, 59 insertions, 10 deletions
diff --git a/include/os/linux/driver_common.c b/include/os/linux/driver_common.c
index c76dabe..8f5872d 100644
--- a/include/os/linux/driver_common.c
+++ b/include/os/linux/driver_common.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -18,6 +18,7 @@
18#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of_platform.h>
21#include <uapi/linux/nvgpu.h> 22#include <uapi/linux/nvgpu.h>
22 23
23#include <nvgpu/defaults.h> 24#include <nvgpu/defaults.h>
@@ -241,6 +242,8 @@ int nvgpu_probe(struct gk20a *g,
241 struct device *dev = dev_from_gk20a(g); 242 struct device *dev = dev_from_gk20a(g);
242 struct gk20a_platform *platform = dev_get_drvdata(dev); 243 struct gk20a_platform *platform = dev_get_drvdata(dev);
243 int err = 0; 244 int err = 0;
245 struct device_node *np = dev->of_node;
246 bool disable_l3_alloc = false;
244 247
245 nvgpu_init_vars(g); 248 nvgpu_init_vars(g);
246 nvgpu_init_gr_vars(g); 249 nvgpu_init_gr_vars(g);
@@ -265,6 +268,12 @@ int nvgpu_probe(struct gk20a *g,
265 return err; 268 return err;
266 } 269 }
267 270
271 disable_l3_alloc = of_property_read_bool(np, "disable_l3_alloc");
272 if (disable_l3_alloc) {
273 nvgpu_log_info(g, "L3 alloc is disabled\n");
274 __nvgpu_set_enabled(g, NVGPU_DISABLE_L3_SUPPORT, true);
275 }
276
268 nvgpu_init_mm_vars(g); 277 nvgpu_init_mm_vars(g);
269 278
270 /* platform probe can defer do user init only if probe succeeds */ 279 /* platform probe can defer do user init only if probe succeeds */
@@ -312,30 +321,70 @@ static int cyclic_delta(int a, int b)
312} 321}
313 322
314/** 323/**
315 * nvgpu_wait_for_deferred_interrupts - Wait for interrupts to complete 324 * nvgpu_wait_for_stall_interrupts - Wait for the stalling interrupts to
325 * complete.
316 * 326 *
317 * @g - The GPU to wait on. 327 * @g - The GPU to wait on.
328 * @timeout - maximum time period to wait for.
318 * 329 *
319 * Waits until all interrupt handlers that have been scheduled to run have 330 * Waits until all stalling interrupt handlers that have been scheduled to run
320 * completed. 331 * have completed.
321 */ 332 */
322void nvgpu_wait_for_deferred_interrupts(struct gk20a *g) 333int nvgpu_wait_for_stall_interrupts(struct gk20a *g, u32 timeout)
323{ 334{
324 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); 335 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
325 int stall_irq_threshold = atomic_read(&l->hw_irq_stall_count); 336 int stall_irq_threshold = atomic_read(&l->hw_irq_stall_count);
326 int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count);
327 337
328 /* wait until all stalling irqs are handled */ 338 /* wait until all stalling irqs are handled */
329 NVGPU_COND_WAIT(&l->sw_irq_stall_last_handled_wq, 339 return NVGPU_COND_WAIT(&l->sw_irq_stall_last_handled_wq,
330 cyclic_delta(stall_irq_threshold, 340 cyclic_delta(stall_irq_threshold,
331 atomic_read(&l->sw_irq_stall_last_handled)) 341 atomic_read(&l->sw_irq_stall_last_handled))
332 <= 0, 0); 342 <= 0, timeout);
343}
344
345/**
346 * nvgpu_wait_for_nonstall_interrupts - Wait for the nonstalling interrupts to
347 * complete.
348 *
349 * @g - The GPU to wait on.
350 * @timeout - maximum time period to wait for.
351 *
352 * Waits until all non-stalling interrupt handlers that have been scheduled to
353 * run have completed.
354 */
355int nvgpu_wait_for_nonstall_interrupts(struct gk20a *g, u32 timeout)
356{
357 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
358 int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count);
333 359
334 /* wait until all non-stalling irqs are handled */ 360 /* wait until all non-stalling irqs are handled */
335 NVGPU_COND_WAIT(&l->sw_irq_nonstall_last_handled_wq, 361 return NVGPU_COND_WAIT(&l->sw_irq_nonstall_last_handled_wq,
336 cyclic_delta(nonstall_irq_threshold, 362 cyclic_delta(nonstall_irq_threshold,
337 atomic_read(&l->sw_irq_nonstall_last_handled)) 363 atomic_read(&l->sw_irq_nonstall_last_handled))
338 <= 0, 0); 364 <= 0, timeout);
365}
366
367/**
368 * nvgpu_wait_for_deferred_interrupts - Wait for interrupts to complete
369 *
370 * @g - The GPU to wait on.
371 *
372 * Waits until all interrupt handlers that have been scheduled to run have
373 * completed.
374 */
375void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
376{
377 int ret;
378
379 ret = nvgpu_wait_for_stall_interrupts(g, 0U);
380 if (ret != 0) {
381 nvgpu_err(g, "wait for stall interrupts failed %d", ret);
382 }
383
384 ret = nvgpu_wait_for_nonstall_interrupts(g, 0U);
385 if (ret != 0) {
386 nvgpu_err(g, "wait for nonstall interrupts failed %d", ret);
387 }
339} 388}
340 389
341static void nvgpu_free_gk20a(struct gk20a *g) 390static void nvgpu_free_gk20a(struct gk20a *g)